hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7ff821bf5de6837b2ea1910416038596cfe1376 | 11,409 | py | Python | sdk/python/pulumi_azure_native/databoxedge/v20200901preview/user.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/databoxedge/v20200901preview/user.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/databoxedge/v20200901preview/user.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
device_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
encrypted_password: Optional[pulumi.Input['AsymmetricEncryptedSecretArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
user_type: Optional[pulumi.Input[Union[str, 'UserType']]] = None):
"""
The set of arguments for constructing a User resource.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input['AsymmetricEncryptedSecretArgs'] encrypted_password: The password details.
:param pulumi.Input[str] name: The user name.
:param pulumi.Input[Union[str, 'UserType']] user_type: Type of the user.
"""
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if encrypted_password is not None:
pulumi.set(__self__, "encrypted_password", encrypted_password)
if name is not None:
pulumi.set(__self__, "name", name)
if user_type is not None:
pulumi.set(__self__, "user_type", user_type)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The device name.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="encryptedPassword")
def encrypted_password(self) -> Optional[pulumi.Input['AsymmetricEncryptedSecretArgs']]:
"""
The password details.
"""
return pulumi.get(self, "encrypted_password")
@encrypted_password.setter
def encrypted_password(self, value: Optional[pulumi.Input['AsymmetricEncryptedSecretArgs']]):
pulumi.set(self, "encrypted_password", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The user name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="userType")
def user_type(self) -> Optional[pulumi.Input[Union[str, 'UserType']]]:
"""
Type of the user.
"""
return pulumi.get(self, "user_type")
@user_type.setter
def user_type(self, value: Optional[pulumi.Input[Union[str, 'UserType']]]):
pulumi.set(self, "user_type", value)
class User(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device_name: Optional[pulumi.Input[str]] = None,
encrypted_password: Optional[pulumi.Input[pulumi.InputType['AsymmetricEncryptedSecretArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
user_type: Optional[pulumi.Input[Union[str, 'UserType']]] = None,
__props__=None):
"""
Represents a user who has access to one or more shares on the Data Box Edge/Gateway device.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[pulumi.InputType['AsymmetricEncryptedSecretArgs']] encrypted_password: The password details.
:param pulumi.Input[str] name: The user name.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[Union[str, 'UserType']] user_type: Type of the user.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a user who has access to one or more shares on the Data Box Edge/Gateway device.
:param str resource_name: The name of the resource.
:param UserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device_name: Optional[pulumi.Input[str]] = None,
encrypted_password: Optional[pulumi.Input[pulumi.InputType['AsymmetricEncryptedSecretArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
user_type: Optional[pulumi.Input[Union[str, 'UserType']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserArgs.__new__(UserArgs)
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
__props__.__dict__["encrypted_password"] = encrypted_password
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["user_type"] = user_type
__props__.__dict__["share_access_rights"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:User"), pulumi.Alias(type_="azure-native:databoxedge:User"), pulumi.Alias(type_="azure-nextgen:databoxedge:User"), pulumi.Alias(type_="azure-native:databoxedge/v20190301:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190301:User"), pulumi.Alias(type_="azure-native:databoxedge/v20190701:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190701:User"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:User"), pulumi.Alias(type_="azure-native:databoxedge/v20200501preview:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:User"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:User"), pulumi.Alias(type_="azure-native:databoxedge/v20201201:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:User"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201preview:User")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(User, __self__).__init__(
'azure-native:databoxedge/v20200901preview:User',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'User':
"""
Get an existing User resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = UserArgs.__new__(UserArgs)
__props__.__dict__["encrypted_password"] = None
__props__.__dict__["name"] = None
__props__.__dict__["share_access_rights"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_type"] = None
return User(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="encryptedPassword")
def encrypted_password(self) -> pulumi.Output[Optional['outputs.AsymmetricEncryptedSecretResponse']]:
"""
The password details.
"""
return pulumi.get(self, "encrypted_password")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="shareAccessRights")
def share_access_rights(self) -> pulumi.Output[Sequence['outputs.ShareAccessRightResponse']]:
"""
List of shares that the user has rights on. This field should not be specified during user creation.
"""
return pulumi.get(self, "share_access_rights")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
User in DataBoxEdge Resource
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userType")
def user_type(self) -> pulumi.Output[Optional[str]]:
"""
Type of the user.
"""
return pulumi.get(self, "user_type")
| 44.392996 | 1,148 | 0.653519 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
device_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
encrypted_password: Optional[pulumi.Input['AsymmetricEncryptedSecretArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
user_type: Optional[pulumi.Input[Union[str, 'UserType']]] = None):
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if encrypted_password is not None:
pulumi.set(__self__, "encrypted_password", encrypted_password)
if name is not None:
pulumi.set(__self__, "name", name)
if user_type is not None:
pulumi.set(__self__, "user_type", user_type)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="encryptedPassword")
def encrypted_password(self) -> Optional[pulumi.Input['AsymmetricEncryptedSecretArgs']]:
return pulumi.get(self, "encrypted_password")
@encrypted_password.setter
def encrypted_password(self, value: Optional[pulumi.Input['AsymmetricEncryptedSecretArgs']]):
pulumi.set(self, "encrypted_password", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="userType")
def user_type(self) -> Optional[pulumi.Input[Union[str, 'UserType']]]:
return pulumi.get(self, "user_type")
@user_type.setter
def user_type(self, value: Optional[pulumi.Input[Union[str, 'UserType']]]):
pulumi.set(self, "user_type", value)
class User(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device_name: Optional[pulumi.Input[str]] = None,
encrypted_password: Optional[pulumi.Input[pulumi.InputType['AsymmetricEncryptedSecretArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
user_type: Optional[pulumi.Input[Union[str, 'UserType']]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: UserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
device_name: Optional[pulumi.Input[str]] = None,
encrypted_password: Optional[pulumi.Input[pulumi.InputType['AsymmetricEncryptedSecretArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
user_type: Optional[pulumi.Input[Union[str, 'UserType']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserArgs.__new__(UserArgs)
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
__props__.__dict__["encrypted_password"] = encrypted_password
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["user_type"] = user_type
__props__.__dict__["share_access_rights"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:User"), pulumi.Alias(type_="azure-native:databoxedge:User"), pulumi.Alias(type_="azure-nextgen:databoxedge:User"), pulumi.Alias(type_="azure-native:databoxedge/v20190301:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190301:User"), pulumi.Alias(type_="azure-native:databoxedge/v20190701:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190701:User"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:User"), pulumi.Alias(type_="azure-native:databoxedge/v20200501preview:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:User"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:User"), pulumi.Alias(type_="azure-native:databoxedge/v20201201:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:User"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:User"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201preview:User")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(User, __self__).__init__(
'azure-native:databoxedge/v20200901preview:User',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'User':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = UserArgs.__new__(UserArgs)
__props__.__dict__["encrypted_password"] = None
__props__.__dict__["name"] = None
__props__.__dict__["share_access_rights"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_type"] = None
return User(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="encryptedPassword")
def encrypted_password(self) -> pulumi.Output[Optional['outputs.AsymmetricEncryptedSecretResponse']]:
return pulumi.get(self, "encrypted_password")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="shareAccessRights")
def share_access_rights(self) -> pulumi.Output[Sequence['outputs.ShareAccessRightResponse']]:
return pulumi.get(self, "share_access_rights")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userType")
def user_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "user_type")
| true | true |
f7ff821d70ce82e373bca3604bbc305030fb02b8 | 1,048 | py | Python | mdls/config/mistune_conf.py | ompugao/mdls | 7ba9fbf75fd13c6e3f85fc248cb32e966dea2399 | [
"MIT"
] | null | null | null | mdls/config/mistune_conf.py | ompugao/mdls | 7ba9fbf75fd13c6e3f85fc248cb32e966dea2399 | [
"MIT"
] | null | null | null | mdls/config/mistune_conf.py | ompugao/mdls | 7ba9fbf75fd13c6e3f85fc248cb32e966dea2399 | [
"MIT"
] | null | null | null | # Copyright 2017 Palantir Technologies, Inc.
import logging
import os
from mdls._utils import find_parents
from .source import ConfigSource
log = logging.getLogger(__name__)
CONFIG_KEY = 'mistune'
PROJECT_CONFIGS = []
OPTIONS = [
('mathjax', 'plugins.mistune.mathjax.enabled', bool),
#('wikilink', 'plugins.pycodestyle.exclude', bool),
]
class MistuneConfig(ConfigSource):
"""Parse Mistune configurations."""
def user_config(self):
config_file = self._user_config_file()
config = self.read_config_from_files([config_file])
return self.parse_config(config, CONFIG_KEY, OPTIONS)
def _user_config_file(self):
if self.is_windows:
return os.path.expanduser('~\\.markdown_mistune')
return os.path.join(self.xdg_home, '.markdown_mistune')
def project_config(self, document_path):
files = find_parents(self.root_path, document_path, PROJECT_CONFIGS)
config = self.read_config_from_files(files)
return self.parse_config(config, CONFIG_KEY, OPTIONS)
| 29.942857 | 76 | 0.714695 |
import logging
import os
from mdls._utils import find_parents
from .source import ConfigSource
log = logging.getLogger(__name__)
CONFIG_KEY = 'mistune'
PROJECT_CONFIGS = []
OPTIONS = [
('mathjax', 'plugins.mistune.mathjax.enabled', bool),
]
class MistuneConfig(ConfigSource):
def user_config(self):
config_file = self._user_config_file()
config = self.read_config_from_files([config_file])
return self.parse_config(config, CONFIG_KEY, OPTIONS)
def _user_config_file(self):
if self.is_windows:
return os.path.expanduser('~\\.markdown_mistune')
return os.path.join(self.xdg_home, '.markdown_mistune')
def project_config(self, document_path):
files = find_parents(self.root_path, document_path, PROJECT_CONFIGS)
config = self.read_config_from_files(files)
return self.parse_config(config, CONFIG_KEY, OPTIONS)
| true | true |
f7ff832c8ffa3f620328d179f677359a6dd3c98c | 560 | py | Python | daemons/notification/app.py | ambrosejcarr/matrix-service | f61252d79941fa962240e27062682c9676f07e95 | [
"MIT"
] | 11 | 2018-10-26T20:47:55.000Z | 2022-02-02T10:32:42.000Z | daemons/notification/app.py | ambrosejcarr/matrix-service | f61252d79941fa962240e27062682c9676f07e95 | [
"MIT"
] | 379 | 2018-06-04T22:44:33.000Z | 2020-06-03T00:20:08.000Z | daemons/notification/app.py | ambrosejcarr/matrix-service | f61252d79941fa962240e27062682c9676f07e95 | [
"MIT"
] | 4 | 2018-11-22T01:00:27.000Z | 2020-09-01T16:42:05.000Z | import json
from matrix.lambdas.daemons.notification import NotificationHandler
def notification_handler(event, context):
notification = json.loads(event["Records"][0]["body"])
assert ('bundle_uuid' in notification and 'bundle_version' in notification and 'event_type' in notification)
bundle_uuid = notification["bundle_uuid"]
bundle_version = notification["bundle_version"]
event_type = notification["event_type"]
notification_handler = NotificationHandler(bundle_uuid, bundle_version, event_type)
notification_handler.run()
| 35 | 112 | 0.778571 | import json
from matrix.lambdas.daemons.notification import NotificationHandler
def notification_handler(event, context):
notification = json.loads(event["Records"][0]["body"])
assert ('bundle_uuid' in notification and 'bundle_version' in notification and 'event_type' in notification)
bundle_uuid = notification["bundle_uuid"]
bundle_version = notification["bundle_version"]
event_type = notification["event_type"]
notification_handler = NotificationHandler(bundle_uuid, bundle_version, event_type)
notification_handler.run()
| true | true |
f7ff838702ae36e42626aa978ddbadaf027b4e37 | 1,404 | py | Python | vhoops/modules/home/ui/routes.py | yigitbasalma/vhoops | e55e841631aeaeace371218229e9323a9b4fa06d | [
"Unlicense",
"MIT"
] | 4 | 2021-02-26T11:14:15.000Z | 2021-12-26T13:11:17.000Z | vhoops/modules/home/ui/routes.py | yigitbasalma/vhoops | e55e841631aeaeace371218229e9323a9b4fa06d | [
"Unlicense",
"MIT"
] | 2 | 2021-02-26T10:16:03.000Z | 2021-02-26T10:16:19.000Z | vhoops/modules/home/ui/routes.py | yigitbasalma/vhoops | e55e841631aeaeace371218229e9323a9b4fa06d | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template
from flask_login import login_required, current_user
from vhoops.modules.home.forms.user_profile import General, ChangePassword
from vhoops.modules.alerts.api.controllers import get_alerts_func
from vhoops.modules.teams.api.controllers import get_all_teams_func
from vhoops.modules.users.api.controllers import get_all_users_func
home_router = Blueprint("home_router", __name__)
@home_router.route("/home", methods=["GET"])
@login_required
def user_home_page():
return render_template(
"home/home-page.html",
alerts=get_alerts_func(filters="status:open"),
teams=get_all_teams_func(),
users=get_all_users_func()
)
@home_router.route("/profile", methods=["GET"])
@login_required
def user_profile_page():
# General section form
form_general = General()
form_general.first_name.render_kw["value"] = current_user.first_name
form_general.last_name.render_kw["value"] = current_user.last_name
form_general.email.render_kw["value"] = current_user.email
form_general.phone.render_kw["value"] = current_user.phone_number
# Change password section form
change_password_form = ChangePassword()
return render_template(
"home/user-profile.html",
form_general=form_general,
change_password_form=change_password_form
)
| 31.2 | 74 | 0.752137 |
from flask import Blueprint, render_template
from flask_login import login_required, current_user
from vhoops.modules.home.forms.user_profile import General, ChangePassword
from vhoops.modules.alerts.api.controllers import get_alerts_func
from vhoops.modules.teams.api.controllers import get_all_teams_func
from vhoops.modules.users.api.controllers import get_all_users_func
home_router = Blueprint("home_router", __name__)
@home_router.route("/home", methods=["GET"])
@login_required
def user_home_page():
return render_template(
"home/home-page.html",
alerts=get_alerts_func(filters="status:open"),
teams=get_all_teams_func(),
users=get_all_users_func()
)
@home_router.route("/profile", methods=["GET"])
@login_required
def user_profile_page():
form_general = General()
form_general.first_name.render_kw["value"] = current_user.first_name
form_general.last_name.render_kw["value"] = current_user.last_name
form_general.email.render_kw["value"] = current_user.email
form_general.phone.render_kw["value"] = current_user.phone_number
change_password_form = ChangePassword()
return render_template(
"home/user-profile.html",
form_general=form_general,
change_password_form=change_password_form
)
| true | true |
f7ff83b8a9ef044ce063db9886bad6944b26782d | 448 | py | Python | theBroker/venv/Scripts/pip3.7-script.py | emirgo/WeatherStation | f0f8c3464470991fc962d83cea20f3bcfd6a04b6 | [
"MIT"
] | null | null | null | theBroker/venv/Scripts/pip3.7-script.py | emirgo/WeatherStation | f0f8c3464470991fc962d83cea20f3bcfd6a04b6 | [
"MIT"
] | null | null | null | theBroker/venv/Scripts/pip3.7-script.py | emirgo/WeatherStation | f0f8c3464470991fc962d83cea20f3bcfd6a04b6 | [
"MIT"
] | null | null | null | #!"D:\1 UNIVERSITY\ProejctSoftwareEngineering\WeatherStation\theBroker\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| 34.461538 | 95 | 0.685268 |
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| true | true |
f7ff8447dc20bb3f3a0edf84a6a61a23962008fa | 6,419 | py | Python | examples/domains/wikitext103_wae.py | awslabs/w-lda | 15eb320faac0570e858c689df6f2c61bcad3010e | [
"Apache-2.0"
] | 40 | 2019-12-06T20:55:47.000Z | 2022-03-25T14:07:34.000Z | examples/domains/wikitext103_wae.py | awslabs/w-lda | 15eb320faac0570e858c689df6f2c61bcad3010e | [
"Apache-2.0"
] | 7 | 2019-12-09T12:44:53.000Z | 2021-09-30T02:20:15.000Z | examples/domains/wikitext103_wae.py | awslabs/w-lda | 15eb320faac0570e858c689df6f2c61bcad3010e | [
"Apache-2.0"
] | 9 | 2019-12-08T08:20:03.000Z | 2021-12-28T14:17:32.000Z | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import shutil
import numpy as np
from core import Data
from utils import reverse_dict
import scipy.sparse as sparse
import nltk
class Wikitext103(Data):
def __init__(self, batch_size, data_path='', ctx=None, saveto='', **kwargs):
self.saveto = saveto
super(Wikitext103, self).__init__(batch_size, data_path, ctx)
def load(self, path='./data/wikitext-103', features='BoW', match_avitm=True):
if path[:2] == '~/':
path = os.path.join(os.path.expanduser(path[:2]), path[2:])
### Specify the file locations
train_path = path + '/wikitext-103_tra.csr.npz'
test_path = path + '/wikitext-103_test.csr.npz'
vocab_path = path + '/vocab.txt'
### Load train
train_csr = sparse.load_npz(train_path)
train = np.array(train_csr.todense()).astype('float32')
### Load test
test_csr = sparse.load_npz(test_path)
test = np.array(test_csr.todense()).astype('float32')
### load vocab
ENCODING = "ISO-8859-1"
# ENCODING = "utf-8"
with open(vocab_path, encoding=ENCODING) as f:
vocab_list = [line.strip('\n') for line in f]
# construct maps
vocab2dim = dict(zip(vocab_list, range(len(vocab_list))))
dim2vocab = reverse_dict(vocab2dim)
return [train, None, test, None, None, None], [None, None, None], [vocab2dim, dim2vocab, None, None]
if __name__ == '__main__':
def check_create_dir(dir):
if os.path.exists(dir): # cleanup existing data folder
shutil.rmtree(dir)
os.mkdir(dir)
# create directory for data
dataset = 'wikitext-103'
current_dir = os.getcwd()
data_dir = os.path.join(current_dir, "data")
if not os.path.exists(data_dir):
print('Creating directory:', data_dir)
os.mkdir(data_dir)
data_dir = os.path.join(current_dir, "data", dataset)
check_create_dir(data_dir)
os.chdir(data_dir)
print('Current directory: ', os.getcwd())
# download data
os.system("curl -O https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip")
os.system("unzip wikitext-103-v1.zip")
# parse into documents
def is_document_start(line):
if len(line) < 4:
return False
if line[0] is '=' and line[-1] is '=':
if line[2] is not '=':
return True
else:
return False
else:
return False
def token_list_per_doc(input_dir, token_file):
lines_list = []
line_prev = ''
prev_line_start_doc = False
with open(os.path.join(input_dir, token_file), 'r', encoding='utf-8') as f:
for l in f:
line = l.strip()
if prev_line_start_doc and line:
# the previous line should not have been start of a document!
lines_list.pop()
lines_list[-1] = lines_list[-1] + ' ' + line_prev
if line:
if is_document_start(line) and not line_prev:
lines_list.append(line)
prev_line_start_doc = True
else:
lines_list[-1] = lines_list[-1] + ' ' + line
prev_line_start_doc = False
else:
prev_line_start_doc = False
line_prev = line
print("{} documents parsed!".format(len(lines_list)))
return lines_list
input_dir = os.path.join(data_dir, dataset)
train_file = 'wiki.train.tokens'
val_file = 'wiki.valid.tokens'
test_file = 'wiki.test.tokens'
train_doc_list = token_list_per_doc(input_dir, train_file)
val_doc_list = token_list_per_doc(input_dir, val_file)
test_doc_list = token_list_per_doc(input_dir, test_file)
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
import re
token_pattern = re.compile(r"(?u)\b\w\w+\b")
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t) for t in doc.split() if len(t) >= 2 and re.match("[a-z].*", t)
and re.match(token_pattern, t)]
import time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
print('Lemmatizing and counting, this may take a few minutes...')
start_time = time.time()
vectorizer = CountVectorizer(input='content', analyzer='word', stop_words='english',
tokenizer=LemmaTokenizer(), max_df=0.8, min_df=3, max_features=20000)
train_vectors = vectorizer.fit_transform(train_doc_list)
val_vectors = vectorizer.transform(val_doc_list)
test_vectors = vectorizer.transform(test_doc_list)
vocab_list = vectorizer.get_feature_names()
vocab_size = len(vocab_list)
print('vocab size:', vocab_size)
print('Done. Time elapsed: {:.2f}s'.format(time.time() - start_time))
import scipy.sparse as sparse
def shuffle_and_dtype(vectors):
idx = np.arange(vectors.shape[0])
np.random.shuffle(idx)
vectors = vectors[idx]
vectors = sparse.csr_matrix(vectors, dtype=np.float32)
print(type(vectors), vectors.dtype)
return vectors
train_vectors = shuffle_and_dtype(train_vectors)
val_vectors = shuffle_and_dtype(val_vectors)
test_vectors = shuffle_and_dtype(test_vectors)
with open('vocab.txt', 'w', encoding='utf-8') as f:
for item in vocab_list:
f.write(item+'\n')
sparse.save_npz('wikitext-103_tra.csr.npz', train_vectors)
sparse.save_npz('wikitext-103_val.csr.npz', val_vectors)
sparse.save_npz('wikitext-103_test.csr.npz', test_vectors)
| 34.88587 | 108 | 0.627045 |
import os
import shutil
import numpy as np
from core import Data
from utils import reverse_dict
import scipy.sparse as sparse
import nltk
class Wikitext103(Data):
def __init__(self, batch_size, data_path='', ctx=None, saveto='', **kwargs):
self.saveto = saveto
super(Wikitext103, self).__init__(batch_size, data_path, ctx)
def load(self, path='./data/wikitext-103', features='BoW', match_avitm=True):
if path[:2] == '~/':
path = os.path.join(os.path.expanduser(path[:2]), path[2:])
test_path = path + '/wikitext-103_test.csr.npz'
vocab_path = path + '/vocab.txt'
se.load_npz(train_path)
train = np.array(train_csr.todense()).astype('float32')
rse.load_npz(test_path)
test = np.array(test_csr.todense()).astype('float32')
8859-1"
with open(vocab_path, encoding=ENCODING) as f:
vocab_list = [line.strip('\n') for line in f]
vocab2dim = dict(zip(vocab_list, range(len(vocab_list))))
dim2vocab = reverse_dict(vocab2dim)
return [train, None, test, None, None, None], [None, None, None], [vocab2dim, dim2vocab, None, None]
if __name__ == '__main__':
def check_create_dir(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
os.mkdir(dir)
dataset = 'wikitext-103'
current_dir = os.getcwd()
data_dir = os.path.join(current_dir, "data")
if not os.path.exists(data_dir):
print('Creating directory:', data_dir)
os.mkdir(data_dir)
data_dir = os.path.join(current_dir, "data", dataset)
check_create_dir(data_dir)
os.chdir(data_dir)
print('Current directory: ', os.getcwd())
os.system("curl -O https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip")
os.system("unzip wikitext-103-v1.zip")
def is_document_start(line):
if len(line) < 4:
return False
if line[0] is '=' and line[-1] is '=':
if line[2] is not '=':
return True
else:
return False
else:
return False
def token_list_per_doc(input_dir, token_file):
lines_list = []
line_prev = ''
prev_line_start_doc = False
with open(os.path.join(input_dir, token_file), 'r', encoding='utf-8') as f:
for l in f:
line = l.strip()
if prev_line_start_doc and line:
lines_list.pop()
lines_list[-1] = lines_list[-1] + ' ' + line_prev
if line:
if is_document_start(line) and not line_prev:
lines_list.append(line)
prev_line_start_doc = True
else:
lines_list[-1] = lines_list[-1] + ' ' + line
prev_line_start_doc = False
else:
prev_line_start_doc = False
line_prev = line
print("{} documents parsed!".format(len(lines_list)))
return lines_list
input_dir = os.path.join(data_dir, dataset)
train_file = 'wiki.train.tokens'
val_file = 'wiki.valid.tokens'
test_file = 'wiki.test.tokens'
train_doc_list = token_list_per_doc(input_dir, train_file)
val_doc_list = token_list_per_doc(input_dir, val_file)
test_doc_list = token_list_per_doc(input_dir, test_file)
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
import re
token_pattern = re.compile(r"(?u)\b\w\w+\b")
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t) for t in doc.split() if len(t) >= 2 and re.match("[a-z].*", t)
and re.match(token_pattern, t)]
import time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
print('Lemmatizing and counting, this may take a few minutes...')
start_time = time.time()
vectorizer = CountVectorizer(input='content', analyzer='word', stop_words='english',
tokenizer=LemmaTokenizer(), max_df=0.8, min_df=3, max_features=20000)
train_vectors = vectorizer.fit_transform(train_doc_list)
val_vectors = vectorizer.transform(val_doc_list)
test_vectors = vectorizer.transform(test_doc_list)
vocab_list = vectorizer.get_feature_names()
vocab_size = len(vocab_list)
print('vocab size:', vocab_size)
print('Done. Time elapsed: {:.2f}s'.format(time.time() - start_time))
import scipy.sparse as sparse
def shuffle_and_dtype(vectors):
idx = np.arange(vectors.shape[0])
np.random.shuffle(idx)
vectors = vectors[idx]
vectors = sparse.csr_matrix(vectors, dtype=np.float32)
print(type(vectors), vectors.dtype)
return vectors
train_vectors = shuffle_and_dtype(train_vectors)
val_vectors = shuffle_and_dtype(val_vectors)
test_vectors = shuffle_and_dtype(test_vectors)
with open('vocab.txt', 'w', encoding='utf-8') as f:
for item in vocab_list:
f.write(item+'\n')
sparse.save_npz('wikitext-103_tra.csr.npz', train_vectors)
sparse.save_npz('wikitext-103_val.csr.npz', val_vectors)
sparse.save_npz('wikitext-103_test.csr.npz', test_vectors)
| true | true |
f7ff8468c11e20a862b15c32e9ef8d87b9c6c1cd | 15,595 | py | Python | unit_tests/bigquery/test_query.py | Ofekmeister/google-cloud-python | 07dd51bc447beca67b8da1c66f1dfb944ef70418 | [
"Apache-2.0"
] | null | null | null | unit_tests/bigquery/test_query.py | Ofekmeister/google-cloud-python | 07dd51bc447beca67b8da1c66f1dfb944ef70418 | [
"Apache-2.0"
] | null | null | null | unit_tests/bigquery/test_query.py | Ofekmeister/google-cloud-python | 07dd51bc447beca67b8da1c66f1dfb944ef70418 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestQueryResults(unittest.TestCase):
PROJECT = 'project'
JOB_NAME = 'job_name'
JOB_NAME = 'test-synchronous-query'
JOB_TYPE = 'query'
QUERY = 'select count(*) from persons'
TOKEN = 'TOKEN'
def _getTargetClass(self):
from google.cloud.bigquery.query import QueryResults
return QueryResults
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _makeResource(self, complete=False):
resource = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'jobComplete': complete,
'errors': [],
'schema': {
'fields': [
{'name': 'full_name', 'type': 'STRING', 'mode': 'REQURED'},
{'name': 'age', 'type': 'INTEGER', 'mode': 'REQURED'},
],
},
}
if complete:
resource['totalRows'] = '1000'
resource['rows'] = [
{'f': [
{'v': 'Phred Phlyntstone'},
{'v': 32},
]},
{'f': [
{'v': 'Bharney Rhubble'},
{'v': 33},
]},
{'f': [
{'v': 'Wylma Phlyntstone'},
{'v': 29},
]},
{'f': [
{'v': 'Bhettye Rhubble'},
{'v': 27},
]},
]
resource['pageToken'] = self.TOKEN
resource['totalBytesProcessed'] = 100000
resource['cacheHit'] = False
return resource
def _verifySchema(self, query, resource):
from google.cloud.bigquery.schema import SchemaField
if 'schema' in resource:
fields = resource['schema']['fields']
self.assertEqual(len(query.schema), len(fields))
for found, expected in zip(query.schema, fields):
self.assertTrue(isinstance(found, SchemaField))
self.assertEqual(found.name, expected['name'])
self.assertEqual(found.field_type, expected['type'])
self.assertEqual(found.mode, expected['mode'])
self.assertEqual(found.description,
expected.get('description'))
self.assertEqual(found.fields, expected.get('fields'))
else:
self.assertTrue(query.schema is None)
def _verifyRows(self, query, resource):
expected = resource.get('rows')
if expected is None:
self.assertEqual(query.rows, [])
else:
found = query.rows
self.assertEqual(len(found), len(expected))
for f_row, e_row in zip(found, expected):
self.assertEqual(f_row,
tuple([cell['v'] for cell in e_row['f']]))
def _verifyResourceProperties(self, query, resource):
self.assertEqual(query.cache_hit, resource.get('cacheHit'))
self.assertEqual(query.complete, resource.get('jobComplete'))
self.assertEqual(query.errors, resource.get('errors'))
self.assertEqual(query.page_token, resource.get('pageToken'))
self.assertEqual(query.total_rows, resource.get('totalRows'))
self.assertEqual(query.total_bytes_processed,
resource.get('totalBytesProcessed'))
if 'jobReference' in resource:
self.assertEqual(query.name, resource['jobReference']['jobId'])
else:
self.assertTrue(query.name is None)
self._verifySchema(query, resource)
self._verifyRows(query, resource)
def test_ctor(self):
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
self.assertEqual(query.query, self.QUERY)
self.assertTrue(query._client is client)
self.assertTrue(query.cache_hit is None)
self.assertTrue(query.complete is None)
self.assertTrue(query.errors is None)
self.assertTrue(query.name is None)
self.assertTrue(query.page_token is None)
self.assertEqual(query.rows, [])
self.assertTrue(query.schema is None)
self.assertTrue(query.total_rows is None)
self.assertTrue(query.total_bytes_processed is None)
self.assertTrue(query.default_dataset is None)
self.assertTrue(query.max_results is None)
self.assertTrue(query.preserve_nulls is None)
self.assertTrue(query.use_query_cache is None)
self.assertTrue(query.use_legacy_sql is None)
def test_job_wo_jobid(self):
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
self.assertTrue(query.job is None)
def test_job_w_jobid(self):
from google.cloud.bigquery.job import QueryJob
SERVER_GENERATED = 'SERVER_GENERATED'
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
query._properties['jobReference'] = {
'projectId': self.PROJECT,
'jobId': SERVER_GENERATED,
}
job = query.job
self.assertTrue(isinstance(job, QueryJob))
self.assertEqual(job.query, self.QUERY)
self.assertTrue(job._client is client)
self.assertEqual(job.name, SERVER_GENERATED)
fetched_later = query.job
self.assertTrue(fetched_later is job)
def test_schema(self):
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
self._verifyResourceProperties(query, {})
resource = {
'schema': {
'fields': [
{'name': 'full_name', 'type': 'STRING', 'mode': 'REQURED'},
{'name': 'age', 'type': 'INTEGER', 'mode': 'REQURED'},
],
},
}
query._set_properties(resource)
self._verifyResourceProperties(query, resource)
def test_run_w_bound_client(self):
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=False)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
self.assertEqual(query.udf_resources, [])
query.run()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {'query': self.QUERY}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_run_w_alternate_client(self):
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=True)
DATASET = 'test_dataset'
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
query = self._makeOne(self.QUERY, client1)
query.default_dataset = client2.dataset(DATASET)
query.max_results = 100
query.preserve_nulls = True
query.timeout_ms = 20000
query.use_query_cache = False
query.use_legacy_sql = True
query.dry_run = True
query.run(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'query': self.QUERY,
'defaultDataset': {
'projectId': self.PROJECT,
'datasetId': DATASET,
},
'dryRun': True,
'maxResults': 100,
'preserveNulls': True,
'timeoutMs': 20000,
'useQueryCache': False,
'useLegacySql': True,
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_run_w_inline_udf(self):
from google.cloud.bigquery.job import UDFResource
INLINE_UDF_CODE = 'var someCode = "here";'
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=False)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
query.udf_resources = [UDFResource("inlineCode", INLINE_UDF_CODE)]
query.run()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {'query': self.QUERY,
'userDefinedFunctionResources':
[{'inlineCode': INLINE_UDF_CODE}]}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_run_w_udf_resource_uri(self):
from google.cloud.bigquery.job import UDFResource
RESOURCE_URI = 'gs://some-bucket/js/lib.js'
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=False)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
query.udf_resources = [UDFResource("resourceUri", RESOURCE_URI)]
query.run()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {'query': self.QUERY,
'userDefinedFunctionResources':
[{'resourceUri': RESOURCE_URI}]}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_run_w_mixed_udfs(self):
from google.cloud.bigquery.job import UDFResource
RESOURCE_URI = 'gs://some-bucket/js/lib.js'
INLINE_UDF_CODE = 'var someCode = "here";'
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=False)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
query.udf_resources = [UDFResource("resourceUri", RESOURCE_URI),
UDFResource("inlineCode", INLINE_UDF_CODE)]
query.run()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(query.udf_resources,
[UDFResource("resourceUri", RESOURCE_URI),
UDFResource("inlineCode", INLINE_UDF_CODE)])
SENT = {'query': self.QUERY,
'userDefinedFunctionResources': [
{'resourceUri': RESOURCE_URI},
{"inlineCode": INLINE_UDF_CODE}]}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_fetch_data_query_not_yet_run(self):
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
self.assertRaises(ValueError, query.fetch_data)
def test_fetch_data_w_bound_client(self):
PATH = 'projects/%s/queries/%s' % (self.PROJECT, self.JOB_NAME)
BEFORE = self._makeResource(complete=False)
AFTER = self._makeResource(complete=True)
del AFTER['totalRows']
conn = _Connection(AFTER)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
query._set_properties(BEFORE)
self.assertFalse(query.complete)
rows, total_rows, page_token = query.fetch_data()
self.assertTrue(query.complete)
self.assertEqual(len(rows), 4)
self.assertEqual(rows[0], ('Phred Phlyntstone', 32))
self.assertEqual(rows[1], ('Bharney Rhubble', 33))
self.assertEqual(rows[2], ('Wylma Phlyntstone', 29))
self.assertEqual(rows[3], ('Bhettye Rhubble', 27))
self.assertEqual(total_rows, None)
self.assertEqual(page_token, AFTER['pageToken'])
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
def test_fetch_data_w_alternate_client(self):
PATH = 'projects/%s/queries/%s' % (self.PROJECT, self.JOB_NAME)
MAX = 10
TOKEN = 'TOKEN'
START = 2257
TIMEOUT = 20000
BEFORE = self._makeResource(complete=False)
AFTER = self._makeResource(complete=True)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(AFTER)
client2 = _Client(project=self.PROJECT, connection=conn2)
query = self._makeOne(self.QUERY, client1)
query._set_properties(BEFORE)
self.assertFalse(query.complete)
rows, total_rows, page_token = query.fetch_data(
client=client2, max_results=MAX, page_token=TOKEN,
start_index=START, timeout_ms=TIMEOUT)
self.assertTrue(query.complete)
self.assertEqual(len(rows), 4)
self.assertEqual(rows[0], ('Phred Phlyntstone', 32))
self.assertEqual(rows[1], ('Bharney Rhubble', 33))
self.assertEqual(rows[2], ('Wylma Phlyntstone', 29))
self.assertEqual(rows[3], ('Bhettye Rhubble', 27))
self.assertEqual(total_rows, int(AFTER['totalRows']))
self.assertEqual(page_token, AFTER['pageToken'])
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'],
{'maxResults': MAX,
'pageToken': TOKEN,
'startIndex': START,
'timeoutMs': TIMEOUT})
class _Client(object):
def __init__(self, project='project', connection=None):
self.project = project
self.connection = connection
def dataset(self, name):
from google.cloud.bigquery.dataset import Dataset
return Dataset(name, client=self)
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
| 38.506173 | 79 | 0.597756 |
import unittest
class TestQueryResults(unittest.TestCase):
PROJECT = 'project'
JOB_NAME = 'job_name'
JOB_NAME = 'test-synchronous-query'
JOB_TYPE = 'query'
QUERY = 'select count(*) from persons'
TOKEN = 'TOKEN'
def _getTargetClass(self):
from google.cloud.bigquery.query import QueryResults
return QueryResults
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _makeResource(self, complete=False):
resource = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'jobComplete': complete,
'errors': [],
'schema': {
'fields': [
{'name': 'full_name', 'type': 'STRING', 'mode': 'REQURED'},
{'name': 'age', 'type': 'INTEGER', 'mode': 'REQURED'},
],
},
}
if complete:
resource['totalRows'] = '1000'
resource['rows'] = [
{'f': [
{'v': 'Phred Phlyntstone'},
{'v': 32},
]},
{'f': [
{'v': 'Bharney Rhubble'},
{'v': 33},
]},
{'f': [
{'v': 'Wylma Phlyntstone'},
{'v': 29},
]},
{'f': [
{'v': 'Bhettye Rhubble'},
{'v': 27},
]},
]
resource['pageToken'] = self.TOKEN
resource['totalBytesProcessed'] = 100000
resource['cacheHit'] = False
return resource
def _verifySchema(self, query, resource):
from google.cloud.bigquery.schema import SchemaField
if 'schema' in resource:
fields = resource['schema']['fields']
self.assertEqual(len(query.schema), len(fields))
for found, expected in zip(query.schema, fields):
self.assertTrue(isinstance(found, SchemaField))
self.assertEqual(found.name, expected['name'])
self.assertEqual(found.field_type, expected['type'])
self.assertEqual(found.mode, expected['mode'])
self.assertEqual(found.description,
expected.get('description'))
self.assertEqual(found.fields, expected.get('fields'))
else:
self.assertTrue(query.schema is None)
def _verifyRows(self, query, resource):
expected = resource.get('rows')
if expected is None:
self.assertEqual(query.rows, [])
else:
found = query.rows
self.assertEqual(len(found), len(expected))
for f_row, e_row in zip(found, expected):
self.assertEqual(f_row,
tuple([cell['v'] for cell in e_row['f']]))
def _verifyResourceProperties(self, query, resource):
self.assertEqual(query.cache_hit, resource.get('cacheHit'))
self.assertEqual(query.complete, resource.get('jobComplete'))
self.assertEqual(query.errors, resource.get('errors'))
self.assertEqual(query.page_token, resource.get('pageToken'))
self.assertEqual(query.total_rows, resource.get('totalRows'))
self.assertEqual(query.total_bytes_processed,
resource.get('totalBytesProcessed'))
if 'jobReference' in resource:
self.assertEqual(query.name, resource['jobReference']['jobId'])
else:
self.assertTrue(query.name is None)
self._verifySchema(query, resource)
self._verifyRows(query, resource)
def test_ctor(self):
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
self.assertEqual(query.query, self.QUERY)
self.assertTrue(query._client is client)
self.assertTrue(query.cache_hit is None)
self.assertTrue(query.complete is None)
self.assertTrue(query.errors is None)
self.assertTrue(query.name is None)
self.assertTrue(query.page_token is None)
self.assertEqual(query.rows, [])
self.assertTrue(query.schema is None)
self.assertTrue(query.total_rows is None)
self.assertTrue(query.total_bytes_processed is None)
self.assertTrue(query.default_dataset is None)
self.assertTrue(query.max_results is None)
self.assertTrue(query.preserve_nulls is None)
self.assertTrue(query.use_query_cache is None)
self.assertTrue(query.use_legacy_sql is None)
def test_job_wo_jobid(self):
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
self.assertTrue(query.job is None)
def test_job_w_jobid(self):
from google.cloud.bigquery.job import QueryJob
SERVER_GENERATED = 'SERVER_GENERATED'
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
query._properties['jobReference'] = {
'projectId': self.PROJECT,
'jobId': SERVER_GENERATED,
}
job = query.job
self.assertTrue(isinstance(job, QueryJob))
self.assertEqual(job.query, self.QUERY)
self.assertTrue(job._client is client)
self.assertEqual(job.name, SERVER_GENERATED)
fetched_later = query.job
self.assertTrue(fetched_later is job)
def test_schema(self):
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
self._verifyResourceProperties(query, {})
resource = {
'schema': {
'fields': [
{'name': 'full_name', 'type': 'STRING', 'mode': 'REQURED'},
{'name': 'age', 'type': 'INTEGER', 'mode': 'REQURED'},
],
},
}
query._set_properties(resource)
self._verifyResourceProperties(query, resource)
def test_run_w_bound_client(self):
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=False)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
self.assertEqual(query.udf_resources, [])
query.run()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {'query': self.QUERY}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_run_w_alternate_client(self):
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=True)
DATASET = 'test_dataset'
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
query = self._makeOne(self.QUERY, client1)
query.default_dataset = client2.dataset(DATASET)
query.max_results = 100
query.preserve_nulls = True
query.timeout_ms = 20000
query.use_query_cache = False
query.use_legacy_sql = True
query.dry_run = True
query.run(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'query': self.QUERY,
'defaultDataset': {
'projectId': self.PROJECT,
'datasetId': DATASET,
},
'dryRun': True,
'maxResults': 100,
'preserveNulls': True,
'timeoutMs': 20000,
'useQueryCache': False,
'useLegacySql': True,
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_run_w_inline_udf(self):
from google.cloud.bigquery.job import UDFResource
INLINE_UDF_CODE = 'var someCode = "here";'
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=False)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
query.udf_resources = [UDFResource("inlineCode", INLINE_UDF_CODE)]
query.run()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {'query': self.QUERY,
'userDefinedFunctionResources':
[{'inlineCode': INLINE_UDF_CODE}]}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_run_w_udf_resource_uri(self):
from google.cloud.bigquery.job import UDFResource
RESOURCE_URI = 'gs://some-bucket/js/lib.js'
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=False)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
query.udf_resources = [UDFResource("resourceUri", RESOURCE_URI)]
query.run()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {'query': self.QUERY,
'userDefinedFunctionResources':
[{'resourceUri': RESOURCE_URI}]}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_run_w_mixed_udfs(self):
from google.cloud.bigquery.job import UDFResource
RESOURCE_URI = 'gs://some-bucket/js/lib.js'
INLINE_UDF_CODE = 'var someCode = "here";'
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=False)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
query.udf_resources = [UDFResource("resourceUri", RESOURCE_URI),
UDFResource("inlineCode", INLINE_UDF_CODE)]
query.run()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(query.udf_resources,
[UDFResource("resourceUri", RESOURCE_URI),
UDFResource("inlineCode", INLINE_UDF_CODE)])
SENT = {'query': self.QUERY,
'userDefinedFunctionResources': [
{'resourceUri': RESOURCE_URI},
{"inlineCode": INLINE_UDF_CODE}]}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_fetch_data_query_not_yet_run(self):
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
self.assertRaises(ValueError, query.fetch_data)
def test_fetch_data_w_bound_client(self):
PATH = 'projects/%s/queries/%s' % (self.PROJECT, self.JOB_NAME)
BEFORE = self._makeResource(complete=False)
AFTER = self._makeResource(complete=True)
del AFTER['totalRows']
conn = _Connection(AFTER)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
query._set_properties(BEFORE)
self.assertFalse(query.complete)
rows, total_rows, page_token = query.fetch_data()
self.assertTrue(query.complete)
self.assertEqual(len(rows), 4)
self.assertEqual(rows[0], ('Phred Phlyntstone', 32))
self.assertEqual(rows[1], ('Bharney Rhubble', 33))
self.assertEqual(rows[2], ('Wylma Phlyntstone', 29))
self.assertEqual(rows[3], ('Bhettye Rhubble', 27))
self.assertEqual(total_rows, None)
self.assertEqual(page_token, AFTER['pageToken'])
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
def test_fetch_data_w_alternate_client(self):
PATH = 'projects/%s/queries/%s' % (self.PROJECT, self.JOB_NAME)
MAX = 10
TOKEN = 'TOKEN'
START = 2257
TIMEOUT = 20000
BEFORE = self._makeResource(complete=False)
AFTER = self._makeResource(complete=True)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(AFTER)
client2 = _Client(project=self.PROJECT, connection=conn2)
query = self._makeOne(self.QUERY, client1)
query._set_properties(BEFORE)
self.assertFalse(query.complete)
rows, total_rows, page_token = query.fetch_data(
client=client2, max_results=MAX, page_token=TOKEN,
start_index=START, timeout_ms=TIMEOUT)
self.assertTrue(query.complete)
self.assertEqual(len(rows), 4)
self.assertEqual(rows[0], ('Phred Phlyntstone', 32))
self.assertEqual(rows[1], ('Bharney Rhubble', 33))
self.assertEqual(rows[2], ('Wylma Phlyntstone', 29))
self.assertEqual(rows[3], ('Bhettye Rhubble', 27))
self.assertEqual(total_rows, int(AFTER['totalRows']))
self.assertEqual(page_token, AFTER['pageToken'])
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'],
{'maxResults': MAX,
'pageToken': TOKEN,
'startIndex': START,
'timeoutMs': TIMEOUT})
class _Client(object):
def __init__(self, project='project', connection=None):
self.project = project
self.connection = connection
def dataset(self, name):
from google.cloud.bigquery.dataset import Dataset
return Dataset(name, client=self)
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
| true | true |
f7ff84b0dacbdaf59048ed59c911b8125c0ae9f8 | 10,029 | py | Python | backend/src/database/usersCollectionManager.py | NRizzoInc/email_messages | d15826efb4e19d6c97738df582fc767d4db9ba3e | [
"MIT"
] | 1 | 2020-03-22T03:48:41.000Z | 2020-03-22T03:48:41.000Z | backend/src/database/usersCollectionManager.py | NRizzoInc/email_messages | d15826efb4e19d6c97738df582fc767d4db9ba3e | [
"MIT"
] | 31 | 2020-07-15T01:38:27.000Z | 2020-07-20T20:55:41.000Z | backend/src/database/usersCollectionManager.py | NRizzoInc/email_messages | d15826efb4e19d6c97738df582fc767d4db9ba3e | [
"MIT"
] | null | null | null | """
\n@File: Responsible for maintaing the "users" collection
"""
#------------------------------STANDARD DEPENDENCIES-----------------------------#
import json
import pickle, copyreg, ssl # for serializing User objects (SSL obj requires more work)
import uuid
#-----------------------------3RD PARTY DEPENDENCIES-----------------------------#
from pymongo import MongoClient, collection
from bson.binary import Binary # for serializing/derializing User objects
#--------------------------------OUR DEPENDENCIES--------------------------------#
from backend.src import utils
from .databaseBaseClass import DatabaseBaseClass
class UsersCollectionManager(DatabaseBaseClass):
def __init__(self):
"""
\n@Brief: This class is responsible for managing the user's collection
\n@Note: Inheret from DatabaseBaseClass which gives it alot of util functions and self variables
"""
# Inheret all functions and 'self' variables
super().__init__()
def createSafeCookieId(self):
"""Creates UUID for users"""
# do-while loop to make sure non-colliding unique id is made
while True:
# https://docs.python.org/3/library/uuid.html -- safe random uuid
userToken = str(uuid.uuid4())
inUse = self.isIdInUse(userToken)
if not inUse: break # leave loop once new id is found
else: print(f"userToken '{userToken}' is already taken")
return userToken
def _addUserToColl(self, userToken, username, password, userObj):
"""
\n@Brief: High level api function call to add a user to the database
\n@Param: userToken - The user's unique safe id (aka id)
\n@Param: username - The user's chosen username
\n@Param: password - The user's chosen password
\n@Param: userObj - Reference to the instantiated userObj
"""
newUser = {
"id": userToken,
"username": username,
"password": password,
"User": self._serializeObj(userObj) if userObj != None else None # need to serialize object for storage
}
self._replaceDataById(self.usersColl, userToken, newUser)
def isUsernameInUse(self, usernameToTest:str):
usernameExists = self._docExists(self.usersColl, {"username": usernameToTest})
# print(f"usernameExists: {usernameExists}")
return usernameExists
def isIdInUse(self, idToTest:str):
idExists = self._docExists(self.usersColl, {"id": idToTest})
# print(f"idExists: {idExists}")
return idExists
def getIdByUsername(self, username)->str():
"""
\n@Param: username - The username matching the id you are looking for
\n@Return: The corresponding id
\n@Note: Useful if chained with other functions that require id (i.e. 'findUserById()')
"""
matches = list(self.usersColl.find({"username": username}))
matchId = list(filter(self.filterLocalhost(), matches))[0]["id"]
return matchId
def findUserById(self, userToken, UserObjRef):
"""
\n@Param: userToken - The user's unique token id
\n@Param: UserObjRef - reference to the constructor for the User object
\n@Return: The 'User' object (None if 'User' DNE or unset)
"""
userDoc = self._getDocById(self.usersColl, userToken)
return self._createUserObjIfDNE(userDoc, UserObjRef)
def countNumUsernameMatch(self, username):
return self.usersColl.find({"username": username}).count()
def getUserByUsername(self, username, UserObjRef):
"""
\n@Param: username: The username of the user's account
\n@Param: UserObjRef - reference to the constructor for the User object
\n@Returns: None if username does not exist
"""
userDoc = self._getDocByUsername(self.usersColl, username)
return self._createUserObjIfDNE(userDoc, UserObjRef)
def _createUserObjIfDNE(self, userDoc, UserObjRef):
"""
\n@Brief: Get the User object referenced in the document. If it doesn't exist, create one
\n@Param: userDoc - The dictionary containing the information belonging to a specific user
\n@Param: UserObjRef - reference to the constructor for the User object
\n@Returns: The User object associated with the document (creates one if not already made/set)
\n@Note: Good if paired with '__checkIfUserValid'
"""
userObj = self.__checkIfUserValid(userDoc)
userId = userDoc["id"]
return userObj if userObj != None else UserObjRef(userId)
def _createUserDocIfUsernameDNE(self, username, id="", password:str=""):
"""
\n@BRief: Helper function that creates a new user in the database if username not found
\n@Param: username - the username to search for
\n@Param: id - (optional) The id to try to assign to the user if dne. WARNING: only use for cli
\n@Param: password - (optional) If username does not exit, user this password for the user
\n@Note: A user doc in the database contains id, username, password, and User object
"""
usernameExists = self.isUsernameInUse(username)
idInUse = self.isIdInUse(id)
uuid = id if not idInUse else self.createSafeCookieId()
self._addUserToColl(uuid, username, password, None)
def _createUserDocIfIdDNE(self, id, username="", password:str=""):
"""
\n@BRief: Helper function that creates a new user in the database if username not found
\n@Param: id - the user's id to search for
\n@Param: username - (optional) The username to try to assign to the user if dne
\n@Param: password - (optional) If username does not exit, user this password for the user
\n@Note: A user doc in the database contains id, username, password, and User object
\n@Return: True if already exists, false if had to create it
"""
idInUse = self.isIdInUse(id)
if idInUse: return True
username = "" if username == "" or username == None or self.isUsernameInUse(username) else username
self._addUserToColl(id, username, password, None)
return False
def __checkIfUserValid(self, userDoc:dict):
"""
\n@Brief: Helper function that checks if the 'User' obj within the document has been set and is valid
\n@Param: userDoc - The dictionary containing the information belonging to a specific user
\n@Return: The User object
"""
if utils.keyExists(userDoc, "User") and userDoc["User"] != None:
serializedUserObj = userDoc["User"]
userObj = self._deserializeData(serializedUserObj)
else: userObj = None
return userObj
def getPasswordFromUsername(self, username:str)->str():
"""
\n@Param: username - The password to find's username
\n@Returns: The matching password
"""
matches = list(self.usersColl.find({"username": username}))
actualPassword = list(filter(self.filterLocalhost(), matches))[0]["password"]
return actualPassword
def getPasswordFromId(self, myId:str)->str():
"""
\n@Param: myId - The password to find's id
\n@Returns: The matching password (or "" if not yet set)
"""
match = self._getDocById(self.usersColl, myId)
username = match["password"] if utils.keyExists(match, "password") else ""
return username
def updateUserObjById(self, myId:str, updatedUserObj:object)->dict():
"""
\n@Brief: Updates the 'User' object in the document corresponding to the id
\n@Param: myId - The UUID of the user to update
\n@Param: updatedUser - The User object to replace the existing one with
\n@Returns: An instance of UpdateResult
"""
query = {"id": myId}
# https://docs.mongodb.com/manual/reference/operator/update/#id1 -- different update commands
# $set = set matching field
serializedUpdatedObj = self._serializeObj(updatedUserObj)
toUpdateWith = {"$set": {"User": serializedUpdatedObj}}
return self.usersColl.update_one(query, toUpdateWith)
def setUsernameById(self, myId:str, username:str):
"""
\n@Brief: Sets the username in the database for the user with 'myId'
\n@Param: myId - The id of the user whose username you want to set
\n@Param: username - The username to set
\n@Note: Probably only useful for command line uses
\n@Returns: An instance of UpdateResult
"""
query = {"id": myId}
toUpdateWith = {"$set": {"username": username}}
return self.usersColl.update_one(query, toUpdateWith)
def setPasswordById(self, myId:str, password:str):
"""
\n@Brief: Sets the password in the database for the user with 'myId'
\n@Param: myId - The id of the user whose username you want to set
\n@Param: password - The password to set
\n@Note: Probably only useful for command line uses
\n@Returns: An instance of UpdateResult
"""
query = {"id": myId}
toUpdateWith = {"$set": {"password": password}}
return self.usersColl.update_one(query, toUpdateWith)
def getUsernameById(self, myId:str)->str():
"""
\n@Brief: Gets the username in the database for the user with 'myId'
\n@Param: myId - The id of the user whose username you want to set
\n@Returns: The username belonging to the ID (empty string if not set)
"""
match = self._getDocById(self.usersColl, myId)
username = match["username"] if utils.keyExists(match, "username") else ""
return username
| 47.084507 | 115 | 0.622495 |
import json
import pickle, copyreg, ssl
import uuid
from pymongo import MongoClient, collection
from bson.binary import Binary
from backend.src import utils
from .databaseBaseClass import DatabaseBaseClass
class UsersCollectionManager(DatabaseBaseClass):
def __init__(self):
super().__init__()
def createSafeCookieId(self):
while True:
userToken = str(uuid.uuid4())
inUse = self.isIdInUse(userToken)
if not inUse: break
else: print(f"userToken '{userToken}' is already taken")
return userToken
def _addUserToColl(self, userToken, username, password, userObj):
newUser = {
"id": userToken,
"username": username,
"password": password,
"User": self._serializeObj(userObj) if userObj != None else None
}
self._replaceDataById(self.usersColl, userToken, newUser)
def isUsernameInUse(self, usernameToTest:str):
usernameExists = self._docExists(self.usersColl, {"username": usernameToTest})
return usernameExists
def isIdInUse(self, idToTest:str):
idExists = self._docExists(self.usersColl, {"id": idToTest})
return idExists
def getIdByUsername(self, username)->str():
matches = list(self.usersColl.find({"username": username}))
matchId = list(filter(self.filterLocalhost(), matches))[0]["id"]
return matchId
def findUserById(self, userToken, UserObjRef):
userDoc = self._getDocById(self.usersColl, userToken)
return self._createUserObjIfDNE(userDoc, UserObjRef)
def countNumUsernameMatch(self, username):
return self.usersColl.find({"username": username}).count()
def getUserByUsername(self, username, UserObjRef):
userDoc = self._getDocByUsername(self.usersColl, username)
return self._createUserObjIfDNE(userDoc, UserObjRef)
def _createUserObjIfDNE(self, userDoc, UserObjRef):
userObj = self.__checkIfUserValid(userDoc)
userId = userDoc["id"]
return userObj if userObj != None else UserObjRef(userId)
def _createUserDocIfUsernameDNE(self, username, id="", password:str=""):
usernameExists = self.isUsernameInUse(username)
idInUse = self.isIdInUse(id)
uuid = id if not idInUse else self.createSafeCookieId()
self._addUserToColl(uuid, username, password, None)
def _createUserDocIfIdDNE(self, id, username="", password:str=""):
idInUse = self.isIdInUse(id)
if idInUse: return True
username = "" if username == "" or username == None or self.isUsernameInUse(username) else username
self._addUserToColl(id, username, password, None)
return False
def __checkIfUserValid(self, userDoc:dict):
if utils.keyExists(userDoc, "User") and userDoc["User"] != None:
serializedUserObj = userDoc["User"]
userObj = self._deserializeData(serializedUserObj)
else: userObj = None
return userObj
def getPasswordFromUsername(self, username:str)->str():
matches = list(self.usersColl.find({"username": username}))
actualPassword = list(filter(self.filterLocalhost(), matches))[0]["password"]
return actualPassword
def getPasswordFromId(self, myId:str)->str():
match = self._getDocById(self.usersColl, myId)
username = match["password"] if utils.keyExists(match, "password") else ""
return username
def updateUserObjById(self, myId:str, updatedUserObj:object)->dict():
query = {"id": myId}
edObj = self._serializeObj(updatedUserObj)
toUpdateWith = {"$set": {"User": serializedUpdatedObj}}
return self.usersColl.update_one(query, toUpdateWith)
def setUsernameById(self, myId:str, username:str):
query = {"id": myId}
toUpdateWith = {"$set": {"username": username}}
return self.usersColl.update_one(query, toUpdateWith)
def setPasswordById(self, myId:str, password:str):
query = {"id": myId}
toUpdateWith = {"$set": {"password": password}}
return self.usersColl.update_one(query, toUpdateWith)
def getUsernameById(self, myId:str)->str():
match = self._getDocById(self.usersColl, myId)
username = match["username"] if utils.keyExists(match, "username") else ""
return username
| true | true |
f7ff84d32698d8122ff12028df7022ef707fb1cd | 123 | py | Python | af_price_fall_notifier_app/apps.py | chasesagar/Amazon-Scraper | b93ba83660f1c95173f7b35bd59221b4ed0ce37d | [
"MIT"
] | null | null | null | af_price_fall_notifier_app/apps.py | chasesagar/Amazon-Scraper | b93ba83660f1c95173f7b35bd59221b4ed0ce37d | [
"MIT"
] | null | null | null | af_price_fall_notifier_app/apps.py | chasesagar/Amazon-Scraper | b93ba83660f1c95173f7b35bd59221b4ed0ce37d | [
"MIT"
] | 1 | 2021-06-29T18:48:47.000Z | 2021-06-29T18:48:47.000Z | from django.apps import AppConfig
class AfPriceFallNotifierAppConfig(AppConfig):
name = 'af_price_fall_notifier_app'
| 20.5 | 46 | 0.821138 | from django.apps import AppConfig
class AfPriceFallNotifierAppConfig(AppConfig):
name = 'af_price_fall_notifier_app'
| true | true |
f7ff865acd0b8aa528055e46fb258327fcd51e57 | 28,207 | py | Python | renpy/sl2/slparser.py | theizrael/renpy-7.4.10-sdk | 312192c87f92eb0ac29be78d26421856f767279e | [
"Apache-2.0"
] | null | null | null | renpy/sl2/slparser.py | theizrael/renpy-7.4.10-sdk | 312192c87f92eb0ac29be78d26421856f767279e | [
"Apache-2.0"
] | null | null | null | renpy/sl2/slparser.py | theizrael/renpy-7.4.10-sdk | 312192c87f92eb0ac29be78d26421856f767279e | [
"Apache-2.0"
] | null | null | null | # Copyright 2004-2021 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
from renpy.compat import *
import collections
import renpy.sl2
import renpy.sl2.slast as slast
from ast import literal_eval
# A tuple of style prefixes that we know of.
STYLE_PREFIXES = [
'',
'insensitive_',
'hover_',
'idle_',
'activate_',
'selected_',
'selected_insensitive_',
'selected_hover_',
'selected_idle_',
'selected_activate_',
]
##############################################################################
# Parsing.
# The parser that things are being added to.
parser = None
# All statements we know about.
all_statements = [ ]
# Statements that can contain children.
childbearing_statements = set()
class Positional(object):
"""
This represents a positional parameter to a function.
"""
def __init__(self, name):
self.name = name
if parser:
parser.add(self)
# This is a map from (prefix, use_style_prefixes) to a set of property names.
properties = collections.defaultdict(set)
class Keyword(object):
"""
This represents an optional keyword parameter to a function.
"""
def __init__(self, name):
self.name = name
properties['', False].add(name)
if parser:
parser.add(self)
class Style(object):
"""
This represents a style parameter to a function.
"""
def __init__(self, name):
self.name = name
properties['', True].add(self.name)
if parser:
parser.add(self)
class PrefixStyle(object):
"""
This represents a prefixed style parameter to a function.
"""
def __init__(self, prefix, name):
self.prefix = prefix
self.name = name
properties[prefix, True].add(self.name)
if parser:
parser.add(self)
class Parser(object):
# The number of children this statement takes, out of 0, 1, or "many".
# This defaults to "many" so the has statement errors out when not
# inside something that takes a single child.
nchildren = "many"
def __init__(self, name, statement=True):
# The name of this object.
self.name = name
# The positional arguments, keyword arguments, and child
# statements of this statement.
self.positional = [ ]
self.keyword = { }
self.children = { }
# True if this parser takes "as".
self.variable = False
if statement:
all_statements.append(self)
global parser
parser = self
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
def add(self, i):
"""
Adds a clause to this parser.
"""
if isinstance(i, list):
for j in i:
self.add(j)
return
if isinstance(i, Positional):
self.positional.append(i)
elif isinstance(i, Keyword):
self.keyword[i.name] = i
elif isinstance(i, Style):
for j in STYLE_PREFIXES:
self.keyword[j + i.name] = i
elif isinstance(i, PrefixStyle):
for j in STYLE_PREFIXES:
self.keyword[i.prefix + j + i.name] = i
elif isinstance(i, Parser):
self.children[i.name] = i
def parse_statement(self, loc, l, layout_mode=False, keyword=True):
word = l.word() or l.match(r'\$')
if word and word in self.children:
if layout_mode:
c = self.children[word].parse_layout(loc, l, self, keyword)
else:
c = self.children[word].parse(loc, l, self, keyword)
return c
else:
return None
def parse_layout(self, loc, l, parent, keyword):
l.error("The %s statement cannot be used as a container for the has statement." % self.name)
def parse(self, loc, l, parent, keyword):
"""
This is expected to parse a function statement, and to return
a list of python ast statements.
`loc`
The location of the current statement.
`l`
The lexer.
`parent`
The parent Parser of the current statement.
"""
raise Exception("Not Implemented")
def parse_contents(self, l, target, layout_mode=False, can_has=False, can_tag=False, block_only=False, keyword=True):
"""
Parses the remainder of the current line of `l`, and all of its subblock,
looking for keywords and children.
`layout_mode`
If true, parsing continues to the end of `l`, rather than stopping
with the end of the first logical line.
`can_has`
If true, we should parse layouts.
`can_tag`
If true, we should parse the ``tag`` keyword, as it's used by
screens.
`block_only`
If true, only parse the block and not the initial properties.
"""
seen_keywords = set()
block = False
# Parses a keyword argument from the lexer.
def parse_keyword(l, expect, first_line):
name = l.word()
if name is None:
l.error(expect)
if can_tag and name == "tag":
if target.tag is not None:
l.error('keyword argument %r appears more than once in a %s statement.' % (name, self.name))
target.tag = l.require(l.word)
l.expect_noblock(name)
return True
if self.variable:
if name == "as":
if target.variable is not None:
l.error('an as clause may only appear once in a %s statement.' % (self.name,))
target.variable = l.require(l.word)
return
if name not in self.keyword:
l.error('%r is not a keyword argument or valid child for the %s statement.' % (name, self.name))
if name in seen_keywords:
l.error('keyword argument %r appears more than once in a %s statement.' % (name, self.name))
seen_keywords.add(name)
if name == "at" and block and l.keyword("transform"):
l.require(":")
l.expect_eol()
l.expect_block("ATL block")
expr = renpy.atl.parse_atl(l.subblock_lexer())
target.atl_transform = expr
return
expr = l.comma_expression()
if (not keyword) and (not renpy.config.keyword_after_python):
try:
literal_eval(expr)
except:
l.error("a non-constant keyword argument like '%s %s' is not allowed after a python block." % (name, expr))
target.keyword.append((name, expr))
if not first_line:
l.expect_noblock(name)
if block_only:
l.expect_eol()
l.expect_block(self.name)
block = True
else:
# If not block_only, we allow keyword arguments on the starting
# line.
while True:
if l.match(':'):
l.expect_eol()
l.expect_block(self.name)
block = True
break
if l.eol():
l.expect_noblock(self.name)
block = False
break
parse_keyword(l, 'expected a keyword argument, colon, or end of line.', True)
# A list of lexers we need to parse the contents of.
lexers = [ ]
if block:
lexers.append(l.subblock_lexer())
if layout_mode:
lexers.append(l)
# If we have a block, parse it. This also takes care of parsing the
# block after a has clause.
for l in lexers:
while l.advance():
state = l.checkpoint()
loc = l.get_location()
if l.keyword(r'has'):
if not can_has:
l.error("The has statement is not allowed here.")
if target.has_noncondition_child():
l.error("The has statement may not be given after a child has been supplied.")
c = self.parse_statement(loc, l, layout_mode=True, keyword=keyword)
if c is None:
l.error('Has expects a child statement.')
target.children.append(c)
if c.has_python():
keyword = False
continue
c = self.parse_statement(loc, l)
# Ignore passes.
if isinstance(c, slast.SLPass):
continue
# If not none, add the child to our AST.
if c is not None:
target.children.append(c)
if c.has_python():
keyword = False
continue
l.revert(state)
if not l.eol():
parse_keyword(l, "expected a keyword argument or child statement.", False)
while not l.eol():
parse_keyword(l, "expected a keyword argument or end of line.", False)
def add_positional(self, name):
global parser
parser = self
Positional(name)
return self
def add_property(self, name):
global parser
parser = self
Keyword(name)
return self
def add_style_property(self, name):
global parser
parser = self
Style(name)
return self
def add_prefix_style_property(self, prefix, name):
global parser
parser = self
PrefixStyle(prefix, name)
return self
def add_property_group(self, group, prefix=''):
global parser
parser = self
if group not in renpy.sl2.slproperties.property_groups:
raise Exception("{!r} is not a known property group.".format(group))
for prop in renpy.sl2.slproperties.property_groups[group]:
if isinstance(prop, Keyword):
Keyword(prefix + prop.name)
else:
PrefixStyle(prefix, prop.name)
return self
def add(thing):
parser.add(thing)
# A singleton value.
many = renpy.object.Sentinel("many")
def register_sl_displayable(*args, **kwargs):
"""
:doc: custom_sl class
:args: (name, displayable, style, nchildren=0, scope=False, replaces=False, default_keywords={}, default_properties=True)
Registers a screen language statement that creates a displayable.
`name`
The name of the screen language statement, a string containing a Ren'Py
keyword. This keyword is used to introduce the new statement.
`displayable`
This is a function that, when called, returns a displayable
object. All position arguments, properties, and style properties
are passed as arguments to this function. Other keyword arguments
are also given to this function, a described below.
This must return a Displayable. If it returns multiple displayables,
the _main attribute of the outermost displayable should be set to
the "main" displayable - the one that children should be added
to.
`style`
The base name of the style of this displayable. If the style property
is not given, this will have the style prefix added to it. The
computed style is passed to the `displayable` function as the
``style`` keyword argument.
`nchildren`
The number of children of this displayable. One of:
0
The displayable takes no children.
1
The displayable takes 1 child. If more than one child is given,
the children are placed in a Fixed.
"many"
The displayable takes more than one child.
The following arguments should be passed in using keyword arguments:
`replaces`
If true, and the displayable replaces a prior displayable, that displayable
is passed as a parameter to the new displayable.
`default_keywords`
The default set of keyword arguments to supply to the displayable.
`default_properties`
If true, the ui and position properties are added by default.
Returns an object that can have positional arguments and properties
added to it by calling the following methods. Each of these methods
returns the object it is called on, allowing methods to be chained
together.
.. method:: add_positional(name)
Adds a positional argument with `name`
.. method:: add_property(name)
Adds a property with `name`. Properties are passed as keyword
arguments.
.. method:: add_style_property(name)
Adds a family of properties, ending with `name` and prefixed with
the various style property prefixes. For example, if called with
("size"), this will define size, idle_size, hover_size, etc.
.. method:: add_prefix_style_property(prefix, name)
Adds a family of properties with names consisting of `prefix`,
a style property prefix, and `name`. For example, if called
with a prefix of `text_` and a name of `size`, this will
create text_size, text_idle_size, text_hover_size, etc.
.. method:: add_property_group(group, prefix='')
Adds a group of properties, prefixed with `prefix`. `Group` may
be one of the strings:
* "bar"
* "box"
* "button"
* "position"
* "text"
* "window"
These correspond to groups of :ref:`style-properties`. Group can
also be "ui", in which case it adds the :ref:`common ui properties <common-properties>`.
"""
rv = DisplayableParser(*args, **kwargs)
for i in childbearing_statements:
i.add(rv)
screen_parser.add(rv)
if rv.nchildren != 0:
childbearing_statements.add(rv)
for i in all_statements:
rv.add(i)
rv.add(if_statement)
rv.add(pass_statement)
return rv
class DisplayableParser(Parser):
def __init__(self, name, displayable, style, nchildren=0, scope=False,
pass_context=False, imagemap=False, replaces=False, default_keywords={},
hotspot=False, default_properties=True):
"""
`scope`
If true, the scope is passed into the displayable functionas a keyword
argument named "scope".
`pass_context`
If true, the context is passed as the first positional argument of the
displayable.
`imagemap`
If true, the displayable is treated as defining an imagemap. (The imagemap
is added to and removed from renpy.ui.imagemap_stack as appropriate.)
`hotspot`
If true, the displayable is treated as a hotspot. (It needs to be
re-created if the imagemap it belongs to has changed.)
`default_properties`
If true, the ui and positional properties are added by default.
"""
super(DisplayableParser, self).__init__(name)
# The displayable that is called when this statement runs.
self.displayable = displayable
if nchildren == "many":
nchildren = many
# The number of children we have.
self.nchildren = nchildren
if nchildren != 0:
childbearing_statements.add(self)
self.style = style
self.scope = scope
self.pass_context = pass_context
self.imagemap = imagemap
self.hotspot = hotspot
self.replaces = replaces
self.default_keywords = default_keywords
self.variable = True
Keyword("arguments")
Keyword("properties")
if default_properties:
add(renpy.sl2.slproperties.ui_properties)
add(renpy.sl2.slproperties.position_properties)
def parse_layout(self, loc, l, parent, keyword):
return self.parse(loc, l, parent, keyword, layout_mode=True)
def parse(self, loc, l, parent, keyword, layout_mode=False):
rv = slast.SLDisplayable(
loc,
self.displayable,
scope=self.scope,
child_or_fixed=(self.nchildren == 1),
style=self.style,
pass_context=self.pass_context,
imagemap=self.imagemap,
replaces=self.replaces,
default_keywords=self.default_keywords,
hotspot=self.hotspot,
)
for _i in self.positional:
expr = l.simple_expression()
if expr is None:
break
rv.positional.append(expr)
can_has = (self.nchildren == 1)
self.parse_contents(l, rv, layout_mode=layout_mode, can_has=can_has, can_tag=False)
if len(rv.positional) != len(self.positional):
if not rv.keyword_exist("arguments"):
l.error("{} statement expects {} positional arguments, got {}.".format(self.name, len(self.positional), len(rv.positional)))
return rv
class IfParser(Parser):
def __init__(self, name, node_type, parent_contents):
"""
`node_type`
The type of node to create.
`parent_contents`
If true, our children must be children of our parent. Otherwise,
our children must be children of ourself.
"""
super(IfParser, self).__init__(name)
self.node_type = node_type
self.parent_contents = parent_contents
if not parent_contents:
childbearing_statements.add(self)
def parse(self, loc, l, parent, keyword):
if self.parent_contents:
contents_from = parent
else:
contents_from = self
rv = self.node_type(loc)
condition = l.require(l.python_expression)
l.require(':')
block = slast.SLBlock(loc)
contents_from.parse_contents(l, block, block_only=True)
rv.entries.append((condition, block))
state = l.checkpoint()
while l.advance():
loc = l.get_location()
if l.keyword("elif"):
condition = l.require(l.python_expression)
l.require(':')
block = slast.SLBlock(loc)
contents_from.parse_contents(l, block, block_only=True, keyword=keyword)
rv.entries.append((condition, block))
state = l.checkpoint()
elif l.keyword("else"):
condition = None
l.require(':')
block = slast.SLBlock(loc)
contents_from.parse_contents(l, block, block_only=True, keyword=keyword)
rv.entries.append((condition, block))
state = l.checkpoint()
break
else:
l.revert(state)
break
return rv
if_statement = IfParser("if", slast.SLIf, True)
IfParser("showif", slast.SLShowIf, False)
class ForParser(Parser):
def __init__(self, name):
super(ForParser, self).__init__(name)
childbearing_statements.add(self)
def name_or_tuple_pattern(self, l):
"""
Matches either a name or a tuple pattern. If a single name is being
matched, returns it. Otherwise, returns None.
"""
name = None
pattern = False
while True:
if l.match(r"\("):
name = self.name_or_tuple_pattern(l)
l.require(r'\)')
pattern = True
else:
name = l.name()
if name is None:
break
if l.match(r","):
pattern = True
else:
break
if pattern:
return None
if name is not None:
return name
l.error("expected variable or tuple pattern.")
def parse(self, loc, l, parent, keyword):
l.skip_whitespace()
tuple_start = l.pos
name = self.name_or_tuple_pattern(l)
if not name:
name = "_sl2_i"
pattern = l.text[tuple_start:l.pos]
stmt = pattern + " = " + name
code = renpy.ast.PyCode(stmt, loc)
else:
code = None
if l.match('index'):
index_expression = l.require(l.say_expression)
else:
index_expression = None
l.require('in')
expression = l.require(l.python_expression)
l.require(':')
l.expect_eol()
rv = slast.SLFor(loc, name, expression, index_expression)
if code:
rv.children.append(slast.SLPython(loc, code))
self.parse_contents(l, rv, block_only=True)
return rv
ForParser("for")
class OneLinePythonParser(Parser):
def parse(self, loc, l, parent, keyword):
loc = l.get_location()
source = l.require(l.rest_statement)
l.expect_eol()
l.expect_noblock("one-line python")
code = renpy.ast.PyCode(source, loc)
return slast.SLPython(loc, code)
OneLinePythonParser("$")
class MultiLinePythonParser(Parser):
def parse(self, loc, l, parent, keyword):
loc = l.get_location()
l.require(':')
l.expect_eol()
l.expect_block("python block")
source = l.python_block()
code = renpy.ast.PyCode(source, loc)
return slast.SLPython(loc, code)
MultiLinePythonParser("python")
class PassParser(Parser):
def parse(self, loc, l, parent, keyword):
l.expect_eol()
l.expect_noblock('pass statement')
return slast.SLPass(loc)
pass_statement = PassParser("pass")
class DefaultParser(Parser):
def parse(self, loc, l, parent, keyword):
name = l.require(l.word)
l.require(r'=')
rest = l.rest()
l.expect_eol()
l.expect_noblock('default statement')
return slast.SLDefault(loc, name, rest)
DefaultParser("default")
class UseParser(Parser):
def __init__(self, name):
super(UseParser, self).__init__(name)
childbearing_statements.add(self)
def parse(self, loc, l, parent, keyword):
if l.keyword('expression'):
target = l.require(l.simple_expression)
l.keyword('pass')
else:
target = l.require(l.word)
args = renpy.parser.parse_arguments(l)
if l.keyword('id'):
id_expr = l.simple_expression()
else:
id_expr = None
if l.match(':'):
l.expect_eol()
l.expect_block("use statement")
block = slast.SLBlock(loc)
self.parse_contents(l, block, can_has=True, block_only=True)
else:
l.expect_eol()
l.expect_noblock("use statement")
block = None
return slast.SLUse(loc, target, args, id_expr, block)
UseParser("use")
Keyword("style_prefix")
Keyword("style_group")
class TranscludeParser(Parser):
def parse(self, loc, l, parent, keyword):
l.expect_eol()
return slast.SLTransclude(loc)
TranscludeParser("transclude")
class CustomParser(Parser):
"""
:doc: custom_sl class
:name: renpy.register_sl_statement
Registers a custom screen language statement with Ren'Py.
`name`
This must be a word. It's the name of the custom screen language
statement.
`children`
The number of children this custom statement takes. This should
be 0, 1, or "many", which means zero or more.
`screen`
The screen to use. If not given, defaults to `name`.
Returns an object that can have positional arguments and properties
added to it. This object has the same .add_ methods as the objects
returned by :class:`renpy.register_sl_displayable`.
"""
def __init__(self, name, children="many", screen=None):
Parser.__init__(self, name)
if children == "many":
children = many
for i in childbearing_statements:
i.add(self)
screen_parser.add(self)
self.nchildren = children
if self.nchildren != 0:
childbearing_statements.add(self)
for i in all_statements:
self.add(i)
self.add_property("arguments")
self.add_property("properties")
self.add(if_statement)
self.add(pass_statement)
global parser
parser = None
# The screen to use.
if screen is not None:
self.screen = screen
else:
self.screen = name
def parse(self, loc, l, parent, keyword):
arguments = [ ]
# Parse positional arguments.
for _i in self.positional:
expr = l.simple_expression()
if expr is None:
break
arguments.append(expr)
# Parser keyword arguments and children.
block = slast.SLBlock(loc)
can_has = (self.nchildren == 1)
self.parse_contents(l, block, can_has=can_has, can_tag=False)
if len(arguments) != len(self.positional):
if not block.keyword_exist("arguments"):
l.error("{} statement expects {} positional arguments, got {}.".format(self.name, len(self.positional), len(arguments)))
return slast.SLCustomUse(loc, self.screen, arguments, block)
class ScreenParser(Parser):
def __init__(self):
super(ScreenParser, self).__init__("screen", statement=False)
def parse(self, loc, l, parent, name="_name", keyword=True):
screen = slast.SLScreen(loc)
screen.name = l.require(l.word)
screen.parameters = renpy.parser.parse_parameters(l)
self.parse_contents(l, screen, can_tag=True)
keyword = dict(screen.keyword)
screen.modal = keyword.get("modal", "False")
screen.zorder = keyword.get("zorder", "0")
screen.variant = keyword.get("variant", "None")
screen.predict = keyword.get("predict", "None")
screen.layer = keyword.get("layer", "'screens'")
screen.sensitive = keyword.get("sensitive", "True")
return screen
screen_parser = ScreenParser()
Keyword("modal")
Keyword("zorder")
Keyword("variant")
Keyword("predict")
Keyword("style_group")
Keyword("style_prefix")
Keyword("layer")
Keyword("sensitive")
parser = None
def init():
screen_parser.add(all_statements)
for i in all_statements:
if i in childbearing_statements:
i.add(all_statements)
else:
i.add(if_statement)
i.add(pass_statement)
def parse_screen(l, loc):
"""
Parses the screen statement.
"""
return screen_parser.parse(loc, l, None)
| 27.044104 | 140 | 0.585812 |
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
from renpy.compat import *
import collections
import renpy.sl2
import renpy.sl2.slast as slast
from ast import literal_eval
STYLE_PREFIXES = [
'',
'insensitive_',
'hover_',
'idle_',
'activate_',
'selected_',
'selected_insensitive_',
'selected_hover_',
'selected_idle_',
'selected_activate_',
]
rror('keyword argument %r appears more than once in a %s statement.' % (name, self.name))
target.tag = l.require(l.word)
l.expect_noblock(name)
return True
if self.variable:
if name == "as":
if target.variable is not None:
l.error('an as clause may only appear once in a %s statement.' % (self.name,))
target.variable = l.require(l.word)
return
if name not in self.keyword:
l.error('%r is not a keyword argument or valid child for the %s statement.' % (name, self.name))
if name in seen_keywords:
l.error('keyword argument %r appears more than once in a %s statement.' % (name, self.name))
seen_keywords.add(name)
if name == "at" and block and l.keyword("transform"):
l.require(":")
l.expect_eol()
l.expect_block("ATL block")
expr = renpy.atl.parse_atl(l.subblock_lexer())
target.atl_transform = expr
return
expr = l.comma_expression()
if (not keyword) and (not renpy.config.keyword_after_python):
try:
literal_eval(expr)
except:
l.error("a non-constant keyword argument like '%s %s' is not allowed after a python block." % (name, expr))
target.keyword.append((name, expr))
if not first_line:
l.expect_noblock(name)
if block_only:
l.expect_eol()
l.expect_block(self.name)
block = True
else:
while True:
if l.match(':'):
l.expect_eol()
l.expect_block(self.name)
block = True
break
if l.eol():
l.expect_noblock(self.name)
block = False
break
parse_keyword(l, 'expected a keyword argument, colon, or end of line.', True)
lexers = [ ]
if block:
lexers.append(l.subblock_lexer())
if layout_mode:
lexers.append(l)
for l in lexers:
while l.advance():
state = l.checkpoint()
loc = l.get_location()
if l.keyword(r'has'):
if not can_has:
l.error("The has statement is not allowed here.")
if target.has_noncondition_child():
l.error("The has statement may not be given after a child has been supplied.")
c = self.parse_statement(loc, l, layout_mode=True, keyword=keyword)
if c is None:
l.error('Has expects a child statement.')
target.children.append(c)
if c.has_python():
keyword = False
continue
c = self.parse_statement(loc, l)
if isinstance(c, slast.SLPass):
continue
if c is not None:
target.children.append(c)
if c.has_python():
keyword = False
continue
l.revert(state)
if not l.eol():
parse_keyword(l, "expected a keyword argument or child statement.", False)
while not l.eol():
parse_keyword(l, "expected a keyword argument or end of line.", False)
def add_positional(self, name):
global parser
parser = self
Positional(name)
return self
def add_property(self, name):
global parser
parser = self
Keyword(name)
return self
def add_style_property(self, name):
global parser
parser = self
Style(name)
return self
def add_prefix_style_property(self, prefix, name):
global parser
parser = self
PrefixStyle(prefix, name)
return self
def add_property_group(self, group, prefix=''):
global parser
parser = self
if group not in renpy.sl2.slproperties.property_groups:
raise Exception("{!r} is not a known property group.".format(group))
for prop in renpy.sl2.slproperties.property_groups[group]:
if isinstance(prop, Keyword):
Keyword(prefix + prop.name)
else:
PrefixStyle(prefix, prop.name)
return self
def add(thing):
parser.add(thing)
many = renpy.object.Sentinel("many")
def register_sl_displayable(*args, **kwargs):
rv = DisplayableParser(*args, **kwargs)
for i in childbearing_statements:
i.add(rv)
screen_parser.add(rv)
if rv.nchildren != 0:
childbearing_statements.add(rv)
for i in all_statements:
rv.add(i)
rv.add(if_statement)
rv.add(pass_statement)
return rv
class DisplayableParser(Parser):
def __init__(self, name, displayable, style, nchildren=0, scope=False,
pass_context=False, imagemap=False, replaces=False, default_keywords={},
hotspot=False, default_properties=True):
super(DisplayableParser, self).__init__(name)
self.displayable = displayable
if nchildren == "many":
nchildren = many
self.nchildren = nchildren
if nchildren != 0:
childbearing_statements.add(self)
self.style = style
self.scope = scope
self.pass_context = pass_context
self.imagemap = imagemap
self.hotspot = hotspot
self.replaces = replaces
self.default_keywords = default_keywords
self.variable = True
Keyword("arguments")
Keyword("properties")
if default_properties:
add(renpy.sl2.slproperties.ui_properties)
add(renpy.sl2.slproperties.position_properties)
def parse_layout(self, loc, l, parent, keyword):
return self.parse(loc, l, parent, keyword, layout_mode=True)
def parse(self, loc, l, parent, keyword, layout_mode=False):
rv = slast.SLDisplayable(
loc,
self.displayable,
scope=self.scope,
child_or_fixed=(self.nchildren == 1),
style=self.style,
pass_context=self.pass_context,
imagemap=self.imagemap,
replaces=self.replaces,
default_keywords=self.default_keywords,
hotspot=self.hotspot,
)
for _i in self.positional:
expr = l.simple_expression()
if expr is None:
break
rv.positional.append(expr)
can_has = (self.nchildren == 1)
self.parse_contents(l, rv, layout_mode=layout_mode, can_has=can_has, can_tag=False)
if len(rv.positional) != len(self.positional):
if not rv.keyword_exist("arguments"):
l.error("{} statement expects {} positional arguments, got {}.".format(self.name, len(self.positional), len(rv.positional)))
return rv
class IfParser(Parser):
def __init__(self, name, node_type, parent_contents):
super(IfParser, self).__init__(name)
self.node_type = node_type
self.parent_contents = parent_contents
if not parent_contents:
childbearing_statements.add(self)
def parse(self, loc, l, parent, keyword):
if self.parent_contents:
contents_from = parent
else:
contents_from = self
rv = self.node_type(loc)
condition = l.require(l.python_expression)
l.require(':')
block = slast.SLBlock(loc)
contents_from.parse_contents(l, block, block_only=True)
rv.entries.append((condition, block))
state = l.checkpoint()
while l.advance():
loc = l.get_location()
if l.keyword("elif"):
condition = l.require(l.python_expression)
l.require(':')
block = slast.SLBlock(loc)
contents_from.parse_contents(l, block, block_only=True, keyword=keyword)
rv.entries.append((condition, block))
state = l.checkpoint()
elif l.keyword("else"):
condition = None
l.require(':')
block = slast.SLBlock(loc)
contents_from.parse_contents(l, block, block_only=True, keyword=keyword)
rv.entries.append((condition, block))
state = l.checkpoint()
break
else:
l.revert(state)
break
return rv
if_statement = IfParser("if", slast.SLIf, True)
IfParser("showif", slast.SLShowIf, False)
class ForParser(Parser):
def __init__(self, name):
super(ForParser, self).__init__(name)
childbearing_statements.add(self)
def name_or_tuple_pattern(self, l):
name = None
pattern = False
while True:
if l.match(r"\("):
name = self.name_or_tuple_pattern(l)
l.require(r'\)')
pattern = True
else:
name = l.name()
if name is None:
break
if l.match(r","):
pattern = True
else:
break
if pattern:
return None
if name is not None:
return name
l.error("expected variable or tuple pattern.")
def parse(self, loc, l, parent, keyword):
l.skip_whitespace()
tuple_start = l.pos
name = self.name_or_tuple_pattern(l)
if not name:
name = "_sl2_i"
pattern = l.text[tuple_start:l.pos]
stmt = pattern + " = " + name
code = renpy.ast.PyCode(stmt, loc)
else:
code = None
if l.match('index'):
index_expression = l.require(l.say_expression)
else:
index_expression = None
l.require('in')
expression = l.require(l.python_expression)
l.require(':')
l.expect_eol()
rv = slast.SLFor(loc, name, expression, index_expression)
if code:
rv.children.append(slast.SLPython(loc, code))
self.parse_contents(l, rv, block_only=True)
return rv
ForParser("for")
class OneLinePythonParser(Parser):
def parse(self, loc, l, parent, keyword):
loc = l.get_location()
source = l.require(l.rest_statement)
l.expect_eol()
l.expect_noblock("one-line python")
code = renpy.ast.PyCode(source, loc)
return slast.SLPython(loc, code)
OneLinePythonParser("$")
class MultiLinePythonParser(Parser):
def parse(self, loc, l, parent, keyword):
loc = l.get_location()
l.require(':')
l.expect_eol()
l.expect_block("python block")
source = l.python_block()
code = renpy.ast.PyCode(source, loc)
return slast.SLPython(loc, code)
MultiLinePythonParser("python")
class PassParser(Parser):
def parse(self, loc, l, parent, keyword):
l.expect_eol()
l.expect_noblock('pass statement')
return slast.SLPass(loc)
pass_statement = PassParser("pass")
class DefaultParser(Parser):
def parse(self, loc, l, parent, keyword):
name = l.require(l.word)
l.require(r'=')
rest = l.rest()
l.expect_eol()
l.expect_noblock('default statement')
return slast.SLDefault(loc, name, rest)
DefaultParser("default")
class UseParser(Parser):
def __init__(self, name):
super(UseParser, self).__init__(name)
childbearing_statements.add(self)
def parse(self, loc, l, parent, keyword):
if l.keyword('expression'):
target = l.require(l.simple_expression)
l.keyword('pass')
else:
target = l.require(l.word)
args = renpy.parser.parse_arguments(l)
if l.keyword('id'):
id_expr = l.simple_expression()
else:
id_expr = None
if l.match(':'):
l.expect_eol()
l.expect_block("use statement")
block = slast.SLBlock(loc)
self.parse_contents(l, block, can_has=True, block_only=True)
else:
l.expect_eol()
l.expect_noblock("use statement")
block = None
return slast.SLUse(loc, target, args, id_expr, block)
UseParser("use")
Keyword("style_prefix")
Keyword("style_group")
class TranscludeParser(Parser):
def parse(self, loc, l, parent, keyword):
l.expect_eol()
return slast.SLTransclude(loc)
TranscludeParser("transclude")
class CustomParser(Parser):
def __init__(self, name, children="many", screen=None):
Parser.__init__(self, name)
if children == "many":
children = many
for i in childbearing_statements:
i.add(self)
screen_parser.add(self)
self.nchildren = children
if self.nchildren != 0:
childbearing_statements.add(self)
for i in all_statements:
self.add(i)
self.add_property("arguments")
self.add_property("properties")
self.add(if_statement)
self.add(pass_statement)
global parser
parser = None
if screen is not None:
self.screen = screen
else:
self.screen = name
def parse(self, loc, l, parent, keyword):
arguments = [ ]
for _i in self.positional:
expr = l.simple_expression()
if expr is None:
break
arguments.append(expr)
block = slast.SLBlock(loc)
can_has = (self.nchildren == 1)
self.parse_contents(l, block, can_has=can_has, can_tag=False)
if len(arguments) != len(self.positional):
if not block.keyword_exist("arguments"):
l.error("{} statement expects {} positional arguments, got {}.".format(self.name, len(self.positional), len(arguments)))
return slast.SLCustomUse(loc, self.screen, arguments, block)
class ScreenParser(Parser):
def __init__(self):
super(ScreenParser, self).__init__("screen", statement=False)
def parse(self, loc, l, parent, name="_name", keyword=True):
screen = slast.SLScreen(loc)
screen.name = l.require(l.word)
screen.parameters = renpy.parser.parse_parameters(l)
self.parse_contents(l, screen, can_tag=True)
keyword = dict(screen.keyword)
screen.modal = keyword.get("modal", "False")
screen.zorder = keyword.get("zorder", "0")
screen.variant = keyword.get("variant", "None")
screen.predict = keyword.get("predict", "None")
screen.layer = keyword.get("layer", "'screens'")
screen.sensitive = keyword.get("sensitive", "True")
return screen
screen_parser = ScreenParser()
Keyword("modal")
Keyword("zorder")
Keyword("variant")
Keyword("predict")
Keyword("style_group")
Keyword("style_prefix")
Keyword("layer")
Keyword("sensitive")
parser = None
def init():
screen_parser.add(all_statements)
for i in all_statements:
if i in childbearing_statements:
i.add(all_statements)
else:
i.add(if_statement)
i.add(pass_statement)
def parse_screen(l, loc):
return screen_parser.parse(loc, l, None)
| true | true |
f7ff88ade726ea9906f544491a027b68a650a86a | 17,342 | py | Python | avclass2/avclass2_update_module.py | kazuya-n/avclass | eafc5755d96eb6defb222be4c18831e4f75fb038 | [
"MIT"
] | null | null | null | avclass2/avclass2_update_module.py | kazuya-n/avclass | eafc5755d96eb6defb222be4c18831e4f75fb038 | [
"MIT"
] | null | null | null | avclass2/avclass2_update_module.py | kazuya-n/avclass | eafc5755d96eb6defb222be4c18831e4f75fb038 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
AVClass2 Update module
'''
import sys
import os
import argparse
import logging
# Make sure paths are relative to execution path
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(script_dir, 'lib/'))
from operator import itemgetter
from collections import namedtuple
from avclass2_common import Taxonomy, Expansion, Tagging
# from Levenshtein import ratio as levenshtein_ratio
# Set logging
log = logging.getLogger(__name__)
# Log warn and above to stderr
formatter = logging.Formatter(u'%(message)s')
handler_stderr = logging.StreamHandler(sys.stderr)
handler_stderr.setLevel(logging.INFO)
handler_stderr.setFormatter(formatter)
root = logging.getLogger()
root.setLevel(logging.INFO)
root.addHandler(handler_stderr)
# Default tagging file
default_tagging_file = os.path.join(script_dir, "data/tagging")
# Default expansion file
default_expansion_file = os.path.join(script_dir, "data/expansion")
# Default taxonomy file
default_taxonomy_file = os.path.join(script_dir, "data/taxonomy")
# Threshold for string similarity
sim_threshold = 0.6
# Relation
Rel = namedtuple('Rel', ['t1', 't2', 't1_num', 't2_num',
'nalias_num', 'talias_num', 'tinv_alias_num'])
class Update:
''' Update Module '''
def __init__(self, rel_filepath, in_taxonomy, in_tagging, in_expansion,
n, t):
# Initialize inputs
self.__out_taxonomy = in_taxonomy
self.__out_tagging = in_tagging
self.__out_expansion = in_expansion
self.__n = n
self.__t = t
# Initialize blacklist
self.blist = in_taxonomy.platform_tags()
log.debug(self.blist)
# Maps src -> cnt
self.src_map = {}
# Read relations from file
self.rel_set = self.read_relations(rel_filepath)
def num_rules(self):
return len(self.rel_set)
def is_weak_rel(self, rel):
''' Return true if relationship is weak,
i.e., does not meet thresholds '''
return ((int(rel.nalias_num) < self.__n) or
(float(rel.talias_num) < self.__t))
def is_blacklisted_rel(self, rel):
''' Return true if relationship is blacklisted '''
return (rel.t1 in self.blist) or (rel.t2 in self.blist)
def is_known_rel(self, rel):
''' Return true if relationship is known '''
t1 = rel.t1
t2 = rel.t2
# Known taxonomy relation
if self.__out_taxonomy.overlaps(t1,t2):
return True
# Known expansion rule
t1_dst = self.__out_expansion.get_dst(t1)
t2_dst = self.__out_expansion.get_dst(t2)
if (t2 in t1_dst) or (t1 in t2_dst):
return True
# Known tagging rule
t1_dst = sorted(self.__out_tagging.get_dst(t1))
t2_dst = sorted(self.__out_tagging.get_dst(t2))
if (t2 in t1_dst) or (t1 in t2_dst):
return True
# Known alias in tagging
if t1_dst and (t1_dst == t2_dst):
return True
return False
def add_tag(self, name, path):
''' Add tag to taxonomy if not in tagging '''
l = self.__out_tagging.get_dst(name)
if (not l):
self.__out_taxonomy.add_tag(path)
def add_expansion(self, src, dst_l):
''' Add expansion rule fixing destination if src in tagging '''
# Select source handling aliases
l = self.__out_tagging.get_dst(src)
if l:
new_src = l[0]
else:
new_src = src
# Select destinations removing overlaps with existing rule
l = self.__out_expansion.get_dst(src)
if l:
l.extend(dst_l)
target_l = self.__out_taxonomy.remove_overlaps(l)
self.__out_expansion.add_rule(new_src, target_l, True)
else:
self.__out_expansion.add_rule(new_src, dst_l, True)
def add_alias(self, src, dst, dst_prefix):
''' Add alias relation to taxonomy, tagging '''
# If src in tagging, use most popular target
l = self.__out_tagging.get_dst(src)
target = dst
if l:
cnt_max = self.src_map[dst]
for e in l:
cnt = self.src_map.get(e, 0)
if cnt > cnt_max:
target = e
# If dst is in tagging, update tagging rule destination,
l = self.__out_tagging.get_dst(dst)
if l:
target_l = l
# else add dst to taxonomy
else:
target_l = [target]
self.__out_taxonomy.add_tag('%s:%s' % (dst_prefix, dst))
# Remove src from taxonomy
self.__out_taxonomy.remove_tag(src)
# Replace tagging rule
self.__out_tagging.add_rule(src, target_l, True)
def is_expansion_rel(self, rel):
''' Return true if relation implies expansion rule '''
c1 = self.__out_taxonomy.get_category(rel.t1)
c2 = self.__out_taxonomy.get_category(rel.t2)
return (((c1 == "FAM") and (c2 != c1) and (c2 != "UNK")) or
((c1 == "CLASS") and ((c2 == "FILE") or (c2 == "BEH"))) or
((c1 == "UNK") and ((c2 == "BEH") or (c2 == "CLASS"))))
def find_expansions(self):
''' Find expansions among relations '''
acc = []
for rel in self.rel_set:
p1 = self.__out_taxonomy.get_path(rel.t1)
p2 = self.__out_taxonomy.get_path(rel.t2)
log.debug("Processing %s\t%s" % (p1, p2))
if self.is_expansion_rel(rel):
self.add_expansion(rel.t1, [rel.t2])
acc.append(rel)
for rel in acc:
self.rel_set.remove(rel)
#def is_alias_rel(self, rel):
# ''' Return true if relation implies alias rule '''
# c1 = self.__out_taxonomy.get_category(rel.t1)
# c2 = self.__out_taxonomy.get_category(rel.t2)
# return (((c1 == "UNK") and (c2 == "FAM")) or
# ((c1 == "UNK") and (c2 == "UNK")))
#def find_aliases(self):
# ''' Find aliases among relations '''
# for rel in self.rel_set:
# c1 = self.__out_taxonomy.get_category(rel.t1)
# c2 = self.__out_taxonomy.get_category(rel.t2)
# if self.is_alias_rel(rel):
# self.G.add_node(rel.t1)
# self.G.add_node(rel.t2)
# self.G.add_edge(rel.t1, rel.t2, score=rel.talias_num)
# self.output_components("comp")
def process_relation(self, rel):
''' Process relation and update taxonomy/tagging correspondingly '''
# Obtain tag info
t1 = rel.t1
t2 = rel.t2
p1,c1 = self.__out_taxonomy.get_info(rel.t1)
p2,c2 = self.__out_taxonomy.get_info(rel.t2)
log.debug("Processing %s\t%s" % (p1, p2))
# If both directions strong, then equivalent, i.e., alias
if (float(rel.tinv_alias_num) >= args.t):
if (c1 != "UNK") and (c2 == "UNK"):
prefix = p1[0:p1.rfind(':')]
elif (c1 == "UNK") and (c2 != "UNK"):
prefix = p2[0:p2.rfind(':')]
elif (c1 == "UNK") and (c2 == "UNK"):
prefix = "FAM"
elif (c1 == c2):
prefix = p1[0:p1.rfind(':')]
else:
log.warn("Equivalent rule with different categories: %s\t%s" %
(p1, p2))
return -1
self.add_alias(t1, t2, prefix)
return 1
# UNK -> FAM : alias-family
elif (c1 == "UNK") and (c2 == 'FAM'):
self.add_alias(t1, t2, "FAM")
return 1
# UNK -> CLASS : taxonomy-family
# Return 0 so that expansion handled at end
elif (c1 == "UNK") and (c2 == 'CLASS'):
self.add_tag(t1, 'FAM:%s' % t1)
return 0
# UNK -> BEH : taxonomy-family
# Return 0 so that expansion handled at end
elif (c1 == "UNK") and (c2 == 'BEH'):
self.add_tag(t1, 'FAM:%s' % t1)
return 0
# UNK -> FILE : taxonomy-file
elif (c1 == "UNK") and (c2 == 'FILE'):
self.add_tag(t1, '%s:%s' % (p2, t1))
return 1
# UNK -> UNK
elif (c1 == "UNK") and (c2 == "UNK"):
self.add_alias(t1, t2, "FAM")
return 1
# FAM -> UNK : alias-family
elif (c1 == "FAM") and (c2 == "UNK"):
self.add_alias(t1, t2, "FAM")
return 1
# FILE -> UNK : alias-file
elif (c1 == "FILE") and (c2 == "UNK"):
prefix = p1[0:p1.rfind(':')]
self.add_alias(t1, t2, prefix)
return 1
# Same category : alias
elif (c1 == "FAM") and (c2 == "FAM"):
#elif c1 == c2:
prefix = p2[0:p2.rfind(':')]
self.add_alias(t1, t2, prefix)
return 1
# Target unknown
elif (c2 == "UNK"):
# If tokens are similar, likely family aliases
# log.info("Similarity: %.02f" % levenshtein_ratio(t1, t2))
# if (levenshtein_ratio(t1, t2) > sim_threshold):
# prefix = p1[0:p1.rfind(':')]
# self.add_alias(t1, t2, prefix)
# return 1
# else:
# return 0
return 0
# Default: review taxonomy
else:
return 0
def run(self):
num_iter = 0
while self.rel_set:
# Do a pass in remaining relations
cnt = 0
new_set = set()
log.debug("[-] %03d Processing relations" % num_iter)
while self.rel_set:
rel = self.rel_set.pop()
# If known relation, continue
if self.is_known_rel(rel):
continue
# Process relation
result = self.process_relation(rel)
if result:
cnt += 1
else:
new_set.add(rel)
# Update relation set
self.rel_set = new_set
# If no relations processed, finish
if cnt == 0:
break
else:
num_iter += 1
# Find aliases
# self.find_aliases()
# Find expansions
log.debug("[-] Finding expansions")
self.find_expansions()
def read_relations(self, filepath):
''' Returns relations in file as a set
Filters weak and blacklisted relations '''
rel_set = set()
with open(filepath, 'r') as fd:
for line in fd:
# Ignore comments
if line.startswith('#'):
continue
# Parse line
t1, t2, t1_num, t2_num, nalias_num, talias_num, \
tinv_alias_num = line.strip().split('\t')
# Build relation
rel = Rel(t1, t2, t1_num, t2_num, nalias_num,
talias_num, tinv_alias_num)
# Ignore weak relations
if self.is_weak_rel(rel):
continue
# Ignore blacklisted relations
if self.is_blacklisted_rel(rel):
continue
# Ignore known relations
# NOTE: commented since we check if a
# relation is known before processing it
#if self.is_known_rel(rel):
# continue
# Add relation to set
rel_set.add(rel)
# Add to src_map
self.src_map[rel.t1] = rel.t1_num
self.src_map[rel.t2] = rel.t2_num
return rel_set
def output_relations(self, filepath):
fd = open(filepath, 'w')
fd.write("# t1\tt2\t|t1|\t|t2|\t|t1^t2|\t|t1^t2|/|t1|\t"
"|t1^t2|/|t2|\n")
sorted_rules = sorted(self.rel_set,
key=(lambda r: (
self.__out_taxonomy.get_category(r.t1),
self.__out_taxonomy.get_category(r.t2))),
reverse=False)
for rel in sorted_rules:
p1,c1 = self.__out_taxonomy.get_info(rel.t1)
p2,c2 = self.__out_taxonomy.get_info(rel.t2)
fd.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\n" %(
p1, p2, rel.t1_num, rel.t2_num, rel.nalias_num,
rel.talias_num, rel.tinv_alias_num))
fd.close()
def output_rule_stats(self, fd):
# Initialize maps for statistics
self.dst_map = {}
self.cat_pairs_map = {}
# Compute rule statistics
for rel in self.rel_set:
c1 = self.__out_taxonomy.get_category(rel.t1)
c2 = self.__out_taxonomy.get_category(rel.t2)
self.cat_pairs_map[(c1,c2)] = self.cat_pairs_map.get((c1,
c2), 0) + 1
self.dst_map[rel.t2] = self.dst_map.get(rel.t2, 0) + 1
# Output statistics
cat_pairs = sorted(update.cat_pairs_map.items(), key=itemgetter(1,0),
reverse=True)
for (c1,c2), cnt in cat_pairs:
fd.write("%s\t%s\t%03d\n" % (c1, c2, cnt))
# Print dst statistics
dst_pairs = sorted(update.dst_map.items(), key=itemgetter(1,0),
reverse=False)
for dst, cnt in dst_pairs:
fd.write("%s\t%03d\n" % (taxonomy.get_path(dst), cnt))
def output(self, out_prefix):
if (not out_prefix):
tax_filepath = default_taxonomy_file
tag_filepath = default_tagging_file
exp_filepath = default_expansion_file
else:
tax_filepath = out_prefix + ".taxonomy"
tag_filepath = out_prefix + ".tagging"
exp_filepath = out_prefix + ".expansion"
taxonomy.to_file(tax_filepath)
log.info('[-] Output %d taxonomy tags to %s' % (
len(taxonomy), tax_filepath))
tagging.expand_all_destinations()
tagging.to_file(tag_filepath)
log.info('[-] Output %d tagging rules to %s' % (
len(tagging), tax_filepath))
expansion.to_file(exp_filepath)
log.info('[-] Output %d expansion rules to %s' % (
len(expansion), exp_filepath))
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
description='''Given a .alias file from the labeler,
generates updates for the taxonomy, tagging, and expansion files.''')
argparser.add_argument('-alias',
help='file to parse with alias from labeler'
'Labeler will run if -alias not present')
argparser.add_argument('-n',
help='Minimum number of times that a pair of tokes have been seen.'
'Default: 20',
type=int,
default=20)
argparser.add_argument('-t',
help='Minimum percentage of times two tokens appear together.'
'Default: 1.94',
type=float,
default=0.94)
argparser.add_argument('-o',
help='output prefix for files')
argparser.add_argument('-update',
action='store_true',
help='update default taxonomy,tagging,expansion files in place')
argparser.add_argument('-tag',
help='file with tagging rules.',
default = default_tagging_file)
argparser.add_argument('-tax',
help='file with taxonomy.',
default = default_taxonomy_file)
argparser.add_argument('-exp',
help='file with expansion rules.',
default = default_expansion_file)
# Parse arguments
args = argparser.parse_args()
# Check we have the input
if not args.alias:
log.error('[-] Please provide an alias file with -alias')
exit(1)
# Set output prefix
if args.o:
out_prefix = args.o
else:
out_prefix = os.path.splitext(args.alias)[0]
# Read taxonomy
taxonomy = Taxonomy(args.tax)
log.info('[-] Read %d taxonomy tags from %s' % (
len(taxonomy), args.tax))
# Read tagging rules
tagging = Tagging(args.tag)
log.info('[-] Read %d tagging rules from %s' % (
len(tagging), args.tag))
# Read expansion rules
expansion = Expansion(args.exp)
log.info('[-] Read %d expansion rules from %s' % (
len(expansion), args.exp))
# Build update object
if not args.alias:
alias_fname = os.path.basename(os.path.splitext(ifile)[0]) + '.alias'
else:
alias_fname = args.alias
update = Update(alias_fname, taxonomy, tagging, expansion, args.n, args.t)
log.info('[-] Processing %d relations satisfying t>=%.2f n>=%d' % (
update.num_rules(), args.t, args.n))
# Output initial rules
update.output_relations(out_prefix + ".orig.rules")
# Output initial rules statistics
# update.output_rule_stats(sys.stderr)
# Process relations
update.run()
# Output updated taxonomy,tagging,expansion
if args.update:
update.output(None)
else:
update.output(out_prefix)
# Output final rules
update.output_relations(out_prefix + ".final.rules")
| 34.272727 | 79 | 0.547976 |
import sys
import os
import argparse
import logging
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(script_dir, 'lib/'))
from operator import itemgetter
from collections import namedtuple
from avclass2_common import Taxonomy, Expansion, Tagging
log = logging.getLogger(__name__)
formatter = logging.Formatter(u'%(message)s')
handler_stderr = logging.StreamHandler(sys.stderr)
handler_stderr.setLevel(logging.INFO)
handler_stderr.setFormatter(formatter)
root = logging.getLogger()
root.setLevel(logging.INFO)
root.addHandler(handler_stderr)
default_tagging_file = os.path.join(script_dir, "data/tagging")
default_expansion_file = os.path.join(script_dir, "data/expansion")
default_taxonomy_file = os.path.join(script_dir, "data/taxonomy")
sim_threshold = 0.6
Rel = namedtuple('Rel', ['t1', 't2', 't1_num', 't2_num',
'nalias_num', 'talias_num', 'tinv_alias_num'])
class Update:
def __init__(self, rel_filepath, in_taxonomy, in_tagging, in_expansion,
n, t):
self.__out_taxonomy = in_taxonomy
self.__out_tagging = in_tagging
self.__out_expansion = in_expansion
self.__n = n
self.__t = t
self.blist = in_taxonomy.platform_tags()
log.debug(self.blist)
self.src_map = {}
self.rel_set = self.read_relations(rel_filepath)
def num_rules(self):
return len(self.rel_set)
def is_weak_rel(self, rel):
return ((int(rel.nalias_num) < self.__n) or
(float(rel.talias_num) < self.__t))
def is_blacklisted_rel(self, rel):
return (rel.t1 in self.blist) or (rel.t2 in self.blist)
def is_known_rel(self, rel):
t1 = rel.t1
t2 = rel.t2
if self.__out_taxonomy.overlaps(t1,t2):
return True
t1_dst = self.__out_expansion.get_dst(t1)
t2_dst = self.__out_expansion.get_dst(t2)
if (t2 in t1_dst) or (t1 in t2_dst):
return True
t1_dst = sorted(self.__out_tagging.get_dst(t1))
t2_dst = sorted(self.__out_tagging.get_dst(t2))
if (t2 in t1_dst) or (t1 in t2_dst):
return True
if t1_dst and (t1_dst == t2_dst):
return True
return False
def add_tag(self, name, path):
l = self.__out_tagging.get_dst(name)
if (not l):
self.__out_taxonomy.add_tag(path)
def add_expansion(self, src, dst_l):
l = self.__out_tagging.get_dst(src)
if l:
new_src = l[0]
else:
new_src = src
l = self.__out_expansion.get_dst(src)
if l:
l.extend(dst_l)
target_l = self.__out_taxonomy.remove_overlaps(l)
self.__out_expansion.add_rule(new_src, target_l, True)
else:
self.__out_expansion.add_rule(new_src, dst_l, True)
def add_alias(self, src, dst, dst_prefix):
l = self.__out_tagging.get_dst(src)
target = dst
if l:
cnt_max = self.src_map[dst]
for e in l:
cnt = self.src_map.get(e, 0)
if cnt > cnt_max:
target = e
l = self.__out_tagging.get_dst(dst)
if l:
target_l = l
else:
target_l = [target]
self.__out_taxonomy.add_tag('%s:%s' % (dst_prefix, dst))
self.__out_taxonomy.remove_tag(src)
self.__out_tagging.add_rule(src, target_l, True)
def is_expansion_rel(self, rel):
c1 = self.__out_taxonomy.get_category(rel.t1)
c2 = self.__out_taxonomy.get_category(rel.t2)
return (((c1 == "FAM") and (c2 != c1) and (c2 != "UNK")) or
((c1 == "CLASS") and ((c2 == "FILE") or (c2 == "BEH"))) or
((c1 == "UNK") and ((c2 == "BEH") or (c2 == "CLASS"))))
def find_expansions(self):
acc = []
for rel in self.rel_set:
p1 = self.__out_taxonomy.get_path(rel.t1)
p2 = self.__out_taxonomy.get_path(rel.t2)
log.debug("Processing %s\t%s" % (p1, p2))
if self.is_expansion_rel(rel):
self.add_expansion(rel.t1, [rel.t2])
acc.append(rel)
for rel in acc:
self.rel_set.remove(rel)
def process_relation(self, rel):
t1 = rel.t1
t2 = rel.t2
p1,c1 = self.__out_taxonomy.get_info(rel.t1)
p2,c2 = self.__out_taxonomy.get_info(rel.t2)
log.debug("Processing %s\t%s" % (p1, p2))
if (float(rel.tinv_alias_num) >= args.t):
if (c1 != "UNK") and (c2 == "UNK"):
prefix = p1[0:p1.rfind(':')]
elif (c1 == "UNK") and (c2 != "UNK"):
prefix = p2[0:p2.rfind(':')]
elif (c1 == "UNK") and (c2 == "UNK"):
prefix = "FAM"
elif (c1 == c2):
prefix = p1[0:p1.rfind(':')]
else:
log.warn("Equivalent rule with different categories: %s\t%s" %
(p1, p2))
return -1
self.add_alias(t1, t2, prefix)
return 1
elif (c1 == "UNK") and (c2 == 'FAM'):
self.add_alias(t1, t2, "FAM")
return 1
elif (c1 == "UNK") and (c2 == 'CLASS'):
self.add_tag(t1, 'FAM:%s' % t1)
return 0
elif (c1 == "UNK") and (c2 == 'BEH'):
self.add_tag(t1, 'FAM:%s' % t1)
return 0
elif (c1 == "UNK") and (c2 == 'FILE'):
self.add_tag(t1, '%s:%s' % (p2, t1))
return 1
elif (c1 == "UNK") and (c2 == "UNK"):
self.add_alias(t1, t2, "FAM")
return 1
elif (c1 == "FAM") and (c2 == "UNK"):
self.add_alias(t1, t2, "FAM")
return 1
elif (c1 == "FILE") and (c2 == "UNK"):
prefix = p1[0:p1.rfind(':')]
self.add_alias(t1, t2, prefix)
return 1
elif (c1 == "FAM") and (c2 == "FAM"):
prefix = p2[0:p2.rfind(':')]
self.add_alias(t1, t2, prefix)
return 1
elif (c2 == "UNK"):
return 0
else:
return 0
def run(self):
num_iter = 0
while self.rel_set:
cnt = 0
new_set = set()
log.debug("[-] %03d Processing relations" % num_iter)
while self.rel_set:
rel = self.rel_set.pop()
if self.is_known_rel(rel):
continue
result = self.process_relation(rel)
if result:
cnt += 1
else:
new_set.add(rel)
self.rel_set = new_set
if cnt == 0:
break
else:
num_iter += 1
log.debug("[-] Finding expansions")
self.find_expansions()
def read_relations(self, filepath):
rel_set = set()
with open(filepath, 'r') as fd:
for line in fd:
if line.startswith('#'):
continue
t1, t2, t1_num, t2_num, nalias_num, talias_num, \
tinv_alias_num = line.strip().split('\t')
rel = Rel(t1, t2, t1_num, t2_num, nalias_num,
talias_num, tinv_alias_num)
if self.is_weak_rel(rel):
continue
if self.is_blacklisted_rel(rel):
continue
rel_set.add(rel)
self.src_map[rel.t1] = rel.t1_num
self.src_map[rel.t2] = rel.t2_num
return rel_set
def output_relations(self, filepath):
fd = open(filepath, 'w')
fd.write("# t1\tt2\t|t1|\t|t2|\t|t1^t2|\t|t1^t2|/|t1|\t"
"|t1^t2|/|t2|\n")
sorted_rules = sorted(self.rel_set,
key=(lambda r: (
self.__out_taxonomy.get_category(r.t1),
self.__out_taxonomy.get_category(r.t2))),
reverse=False)
for rel in sorted_rules:
p1,c1 = self.__out_taxonomy.get_info(rel.t1)
p2,c2 = self.__out_taxonomy.get_info(rel.t2)
fd.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\n" %(
p1, p2, rel.t1_num, rel.t2_num, rel.nalias_num,
rel.talias_num, rel.tinv_alias_num))
fd.close()
def output_rule_stats(self, fd):
self.dst_map = {}
self.cat_pairs_map = {}
for rel in self.rel_set:
c1 = self.__out_taxonomy.get_category(rel.t1)
c2 = self.__out_taxonomy.get_category(rel.t2)
self.cat_pairs_map[(c1,c2)] = self.cat_pairs_map.get((c1,
c2), 0) + 1
self.dst_map[rel.t2] = self.dst_map.get(rel.t2, 0) + 1
cat_pairs = sorted(update.cat_pairs_map.items(), key=itemgetter(1,0),
reverse=True)
for (c1,c2), cnt in cat_pairs:
fd.write("%s\t%s\t%03d\n" % (c1, c2, cnt))
dst_pairs = sorted(update.dst_map.items(), key=itemgetter(1,0),
reverse=False)
for dst, cnt in dst_pairs:
fd.write("%s\t%03d\n" % (taxonomy.get_path(dst), cnt))
def output(self, out_prefix):
if (not out_prefix):
tax_filepath = default_taxonomy_file
tag_filepath = default_tagging_file
exp_filepath = default_expansion_file
else:
tax_filepath = out_prefix + ".taxonomy"
tag_filepath = out_prefix + ".tagging"
exp_filepath = out_prefix + ".expansion"
taxonomy.to_file(tax_filepath)
log.info('[-] Output %d taxonomy tags to %s' % (
len(taxonomy), tax_filepath))
tagging.expand_all_destinations()
tagging.to_file(tag_filepath)
log.info('[-] Output %d tagging rules to %s' % (
len(tagging), tax_filepath))
expansion.to_file(exp_filepath)
log.info('[-] Output %d expansion rules to %s' % (
len(expansion), exp_filepath))
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
description='''Given a .alias file from the labeler,
generates updates for the taxonomy, tagging, and expansion files.''')
argparser.add_argument('-alias',
help='file to parse with alias from labeler'
'Labeler will run if -alias not present')
argparser.add_argument('-n',
help='Minimum number of times that a pair of tokes have been seen.'
'Default: 20',
type=int,
default=20)
argparser.add_argument('-t',
help='Minimum percentage of times two tokens appear together.'
'Default: 1.94',
type=float,
default=0.94)
argparser.add_argument('-o',
help='output prefix for files')
argparser.add_argument('-update',
action='store_true',
help='update default taxonomy,tagging,expansion files in place')
argparser.add_argument('-tag',
help='file with tagging rules.',
default = default_tagging_file)
argparser.add_argument('-tax',
help='file with taxonomy.',
default = default_taxonomy_file)
argparser.add_argument('-exp',
help='file with expansion rules.',
default = default_expansion_file)
args = argparser.parse_args()
if not args.alias:
log.error('[-] Please provide an alias file with -alias')
exit(1)
if args.o:
out_prefix = args.o
else:
out_prefix = os.path.splitext(args.alias)[0]
taxonomy = Taxonomy(args.tax)
log.info('[-] Read %d taxonomy tags from %s' % (
len(taxonomy), args.tax))
tagging = Tagging(args.tag)
log.info('[-] Read %d tagging rules from %s' % (
len(tagging), args.tag))
expansion = Expansion(args.exp)
log.info('[-] Read %d expansion rules from %s' % (
len(expansion), args.exp))
if not args.alias:
alias_fname = os.path.basename(os.path.splitext(ifile)[0]) + '.alias'
else:
alias_fname = args.alias
update = Update(alias_fname, taxonomy, tagging, expansion, args.n, args.t)
log.info('[-] Processing %d relations satisfying t>=%.2f n>=%d' % (
update.num_rules(), args.t, args.n))
update.output_relations(out_prefix + ".orig.rules")
update.run()
if args.update:
update.output(None)
else:
update.output(out_prefix)
update.output_relations(out_prefix + ".final.rules")
| true | true |
f7ff88e8eb54ae16817187f7e0247609ddf9b9ab | 2,253 | py | Python | heapprof/tests/si_prefix_test.py | humu/heapprof | c46a5d0de86d5d1c50b0aa1b7014ef0b2b46a821 | [
"MIT"
] | 34 | 2019-08-13T23:55:17.000Z | 2021-06-22T01:29:05.000Z | heapprof/tests/si_prefix_test.py | humu/heapprof | c46a5d0de86d5d1c50b0aa1b7014ef0b2b46a821 | [
"MIT"
] | 7 | 2019-08-13T22:33:03.000Z | 2021-09-27T22:39:59.000Z | heapprof/tests/si_prefix_test.py | humu/heapprof | c46a5d0de86d5d1c50b0aa1b7014ef0b2b46a821 | [
"MIT"
] | 5 | 2019-08-16T01:50:02.000Z | 2021-08-30T20:02:48.000Z | import unittest
from heapprof._si_prefix import siPrefixString
class SIPrefixTest(unittest.TestCase):
def testDecimal(self):
self.assertEqual('1050.00', siPrefixString(1050, threshold=1.1, precision=2))
self.assertEqual('1.05k', siPrefixString(1050, threshold=1.05, precision=2))
self.assertEqual('1.1', siPrefixString(1.1, threshold=1.1, precision=1))
self.assertEqual('2.0M', siPrefixString(2000000, threshold=1.1, precision=1))
self.assertEqual('500m', siPrefixString(0.5, threshold=1.1, precision=0))
self.assertEqual('1.1μ', siPrefixString(1.1e-6, threshold=1.1, precision=1))
def testBinary(self):
self.assertEqual('1050.00', siPrefixString(1050, threshold=1.1, precision=2, binary=True))
self.assertEqual('1.1k', siPrefixString(1130, threshold=1.1, precision=1, binary=True))
self.assertEqual('1.1', siPrefixString(1.1, threshold=1.1, precision=1, binary=True))
self.assertEqual(
'2.0M', siPrefixString(2 * 1024 * 1024, threshold=1.1, precision=1, binary=True)
)
# NB that 0.5 = 512 * (1024)^-1.
self.assertEqual('512m', siPrefixString(0.5, threshold=1.1, precision=0, binary=True))
self.assertEqual('1.2μ', siPrefixString(1.1e-6, threshold=1.1, precision=1, binary=True))
def testIEC(self):
self.assertEqual(
'1050.00', siPrefixString(1050, threshold=1.1, precision=2, binary=True, iecFormat=True)
)
self.assertEqual(
'1.1Ki', siPrefixString(1130, threshold=1.1, precision=1, binary=True, iecFormat=True)
)
self.assertEqual(
'1.1', siPrefixString(1.1, threshold=1.1, precision=1, binary=True, iecFormat=True)
)
self.assertEqual(
'2.0Mi',
siPrefixString(
2 * 1024 * 1024, threshold=1.1, precision=1, binary=True, iecFormat=True
),
)
self.assertEqual(
'512mi', siPrefixString(0.5, threshold=1.1, precision=0, binary=True, iecFormat=True)
)
self.assertEqual(
'1.2μi', siPrefixString(1.1e-6, threshold=1.1, precision=1, binary=True, iecFormat=True)
)
if __name__ == '__main__':
unittest.main()
| 43.326923 | 100 | 0.632046 | import unittest
from heapprof._si_prefix import siPrefixString
class SIPrefixTest(unittest.TestCase):
def testDecimal(self):
self.assertEqual('1050.00', siPrefixString(1050, threshold=1.1, precision=2))
self.assertEqual('1.05k', siPrefixString(1050, threshold=1.05, precision=2))
self.assertEqual('1.1', siPrefixString(1.1, threshold=1.1, precision=1))
self.assertEqual('2.0M', siPrefixString(2000000, threshold=1.1, precision=1))
self.assertEqual('500m', siPrefixString(0.5, threshold=1.1, precision=0))
self.assertEqual('1.1μ', siPrefixString(1.1e-6, threshold=1.1, precision=1))
def testBinary(self):
self.assertEqual('1050.00', siPrefixString(1050, threshold=1.1, precision=2, binary=True))
self.assertEqual('1.1k', siPrefixString(1130, threshold=1.1, precision=1, binary=True))
self.assertEqual('1.1', siPrefixString(1.1, threshold=1.1, precision=1, binary=True))
self.assertEqual(
'2.0M', siPrefixString(2 * 1024 * 1024, threshold=1.1, precision=1, binary=True)
)
self.assertEqual('512m', siPrefixString(0.5, threshold=1.1, precision=0, binary=True))
self.assertEqual('1.2μ', siPrefixString(1.1e-6, threshold=1.1, precision=1, binary=True))
def testIEC(self):
self.assertEqual(
'1050.00', siPrefixString(1050, threshold=1.1, precision=2, binary=True, iecFormat=True)
)
self.assertEqual(
'1.1Ki', siPrefixString(1130, threshold=1.1, precision=1, binary=True, iecFormat=True)
)
self.assertEqual(
'1.1', siPrefixString(1.1, threshold=1.1, precision=1, binary=True, iecFormat=True)
)
self.assertEqual(
'2.0Mi',
siPrefixString(
2 * 1024 * 1024, threshold=1.1, precision=1, binary=True, iecFormat=True
),
)
self.assertEqual(
'512mi', siPrefixString(0.5, threshold=1.1, precision=0, binary=True, iecFormat=True)
)
self.assertEqual(
'1.2μi', siPrefixString(1.1e-6, threshold=1.1, precision=1, binary=True, iecFormat=True)
)
if __name__ == '__main__':
unittest.main()
| true | true |
f7ff89387b76cf80afb68ce72b0c49b1de0205fa | 532 | py | Python | 24.py | RafaelHuang87/Leet-Code-Practice | 7754dcee38ffda18a5759113ef06d7becf4fe728 | [
"MIT"
] | null | null | null | 24.py | RafaelHuang87/Leet-Code-Practice | 7754dcee38ffda18a5759113ef06d7becf4fe728 | [
"MIT"
] | null | null | null | 24.py | RafaelHuang87/Leet-Code-Practice | 7754dcee38ffda18a5759113ef06d7becf4fe728 | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
temp = ListNode(0)
temp.next = head
p = temp
q = temp.next
while q and q.next:
p.next = q.next
q.next = p.next.next
p.next.next = q
p = q
q = q.next
return temp.next | 24.181818 | 52 | 0.503759 |
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
temp = ListNode(0)
temp.next = head
p = temp
q = temp.next
while q and q.next:
p.next = q.next
q.next = p.next.next
p.next.next = q
p = q
q = q.next
return temp.next | true | true |
f7ff89c8555ab8adcdbf88a53a3f7b7f6a7a4435 | 1,155 | py | Python | solutions/ba6a.py | RafikFarhad/Bioinformatics_Codes | 3195a0533bd36fb4060dd7d3809ddd12edca499a | [
"MIT"
] | null | null | null | solutions/ba6a.py | RafikFarhad/Bioinformatics_Codes | 3195a0533bd36fb4060dd7d3809ddd12edca499a | [
"MIT"
] | null | null | null | solutions/ba6a.py | RafikFarhad/Bioinformatics_Codes | 3195a0533bd36fb4060dd7d3809ddd12edca499a | [
"MIT"
] | null | null | null | import itertools
import random
import networkx as nx
import sys
import pandas as pd
sys.setrecursionlimit(2000)
def prettyGenome(arr):
return '(' + ' '.join('{0:+d}'.format(_) for _ in arr) + ')'
def GreedySorting(genome):
length = len(genome)
res = []
for i in range(1, length+1):
try:
pos = genome.index(i)
except:
pos = genome.index(-i)
if pos==i-1 and genome[pos] > 0:
continue
if i==1:
part = genome[pos::-1]
else:
part = genome[pos:i-2:-1]
part = [-_ for _ in part]
genome[i-1:pos+1] = part
res.append(prettyGenome(genome))
if genome[i-1] < 0:
genome[i-1] *= -1
res.append(prettyGenome(genome))
return res
def main(infile, outfile):
# Read the input, but do something non-trivial instead of count the lines in the file
inp = [line.rstrip('\n') for line in infile]
print(inp)
output = GreedySorting([int(a) for a in inp[0][1:-1].split(' ')])
output = '\n'.join(output)
print(output)
# Write the output.
outfile.write(output) | 26.25 | 89 | 0.555844 | import itertools
import random
import networkx as nx
import sys
import pandas as pd
sys.setrecursionlimit(2000)
def prettyGenome(arr):
return '(' + ' '.join('{0:+d}'.format(_) for _ in arr) + ')'
def GreedySorting(genome):
length = len(genome)
res = []
for i in range(1, length+1):
try:
pos = genome.index(i)
except:
pos = genome.index(-i)
if pos==i-1 and genome[pos] > 0:
continue
if i==1:
part = genome[pos::-1]
else:
part = genome[pos:i-2:-1]
part = [-_ for _ in part]
genome[i-1:pos+1] = part
res.append(prettyGenome(genome))
if genome[i-1] < 0:
genome[i-1] *= -1
res.append(prettyGenome(genome))
return res
def main(infile, outfile):
inp = [line.rstrip('\n') for line in infile]
print(inp)
output = GreedySorting([int(a) for a in inp[0][1:-1].split(' ')])
output = '\n'.join(output)
print(output)
outfile.write(output) | true | true |
f7ff89ed2c936f359d381c8ef43ff719baaa57a5 | 8,824 | py | Python | corona_model/start_here.py | kukaiN/modeling_in_python | 99641bc2be5887c5571fd494f5025efe8a0441bb | [
"MIT"
] | 2 | 2021-02-06T04:15:08.000Z | 2022-03-14T12:02:34.000Z | corona_model/start_here.py | kukaiN/modeling_in_python | 99641bc2be5887c5571fd494f5025efe8a0441bb | [
"MIT"
] | null | null | null | corona_model/start_here.py | kukaiN/modeling_in_python | 99641bc2be5887c5571fd494f5025efe8a0441bb | [
"MIT"
] | 4 | 2021-01-03T05:17:54.000Z | 2021-06-08T16:00:25.000Z | import model_framework
import platform
import statfile
import copy
import fileRelated
import pandas as pd
import experiment as experiment
import main_config
from pathlib import Path
def main():
"""intialize and run the model, for indepth detail about the config or how to run the code, go to the github page for this code"""
# you can control for multiple interventions by adding a case:
# [(modified attr1, newVal), (modified attr2, newVal), ...]
# simulation name --> simulation controlled variable(s)
# dont use . or - in the simulation name because the names are used to save images, or any symbols below
modelConfig = main_config.modelConfig
R0_controls = {
"World" : [
("DynamicCapacity", False),
],
"Infection" : [
("baseP" , 1.25),
("SeedNumber", 100),
],
"HybridClass":[
("ChangedSeedNumber", 10),
],
}
# this overrides the previous experiments, since base_p is being chnaged
R0_controls = {
"World" : [
("DynamicCapacity", False),
],
"HybridClass":[
("ChangedSeedNumber", 10),
],
}
def cross_scenarios(scenario1, scenario2):
experiments = {}
for keyname, experiment1 in scenario1.items():
for screenname, screen in scenario2.items():
experiment_name = screenname +"_" + keyname
experiments[experiment_name] = screen.copy()
for key, value in experiment1.items():
#print(key, value)
experiments[experiment_name][key] = value.copy()
return copy.deepcopy(experiments)
def print_nicely(experiment_scenarios):
for ex_name, ex_config in experiment_scenarios.items():
print("\n","*"*20,"\n", ex_name)
for ex_config_name, ex_config_list in ex_config.items():
print(ex_config_name, ":" ,ex_config_list)
#experiment2 = cross_scenarios(experiment.vaccine3, experiment.low_med)
#experiment3 =cross_scenarios(experiment.vaccine4, experiment.facemask3)
experiment1 = experiment.marginals
experiment2 = experiment.original_3x3
experiment3 = cross_scenarios(experiment.different_base_p_jump_025, experiment.medium_student_vary_policy)
experiment4 = cross_scenarios(experiment.medium_student_vary_policy, experiment.off_campus_multiplier)
experiment5 = experiment.diff_seed_number
experiment6 = experiment.facemask_param
#print(len(experiment3))
#print_nicely(experiment3)
basemodel = {"basemodel": {}}
multi_experiments = {
"request_1_marginal": experiment1,#
"request_2_3x3": experiment2,
"request_3_diff_base_p": experiment3,
"request_4_fixed_p_diff_offcampusP": experiment4,
"request_5_diff_seed_number": experiment5,
"request_6_facemask_param": experiment6,
}
print("here are the loaded experiments:")
for r_name, exp in multi_experiments.items():
r_name+=(" "*max(0, (40-len(r_name))))
print(f"{r_name} with {len(exp)} experiments")
#multi_experiments = {"new_request4": experiment.new_check}
user_input = input("which request # do you want to run? 0 to run all in one thread")
user_input = int(user_input)
sp_num = [123, 456, 12, 34, 56]
if (user_input < 0 or user_input > len(multi_experiments)) and user_input not in sp_num:
print("input number does not match experiment number, exiting program")
return
for sp_index, (request_name, modelConfigs) in enumerate(multi_experiments.items()):
if ((sp_index == user_input-1) or (user_input == 0) or (user_input==123 and sp_index < 3) or
(user_input==456 and sp_index >= 3) or (user_input==12 and sp_index < 2) or (user_input==34 and 4>sp_index>1)
or (user_input==56 and sp_index >= 4)):
print(sp_index)
R0Dict = dict()
InfectedCountDict = dict()
output_dir = fileRelated.fullPath(request_name, "outputs")
Path(output_dir).mkdir(parents=False, exist_ok=True)
output_folder = "outputs/"+ request_name
print(request_name)
for index, (modelName, modelControl) in enumerate(modelConfigs.items()):
print("finished", index)
configCopy = copy.deepcopy(modelConfig)
#print("*"*20)
#print(configCopy["Agents"].keys())
#print("*"*20)
#print(f"started working on initializing the simualtion for {modelName}")
for categoryKey, listOfControls in modelControl.items():
#print(listOfControls)
for (specificKey, specificValue) in listOfControls:
if specificKey not in configCopy[categoryKey].keys():
print("error", specificKey, specificValue, " was not assigned correctly")
#return
else:
configCopy[categoryKey][specificKey] = specificValue
R0Count, multiCounts = 100, 100
if index in [0, 1] and False:
R0Count = 200
#print(configCopy1
if index > -1:
#model_framework.simpleCheck(configCopy, days=10, visuals=True, debug=True, modelName=modelName)
InfectedCountDict[modelName] = model_framework.multiSimulation(multiCounts, configCopy, days=100, debug=False, modelName=modelName, outputDir=output_folder)
R0Dict[modelName] = model_framework.R0_simulation(configCopy, R0_controls,R0Count, debug=False, timeSeriesVisual=False, R0Visuals=True, modelName=modelName, outputDir=output_folder)
# the value of the dictionary is ([multiple R0 values], (descriptors, (tuple of useful data like mean and stdev))
print(InfectedCountDict.items())
print(R0Dict.items())
if True:
#for k in R0Dict.keys():
# R0Dict[k] = [list(R0Dict[k][0]) + [1 for _ in range(98)], R0Dict[k][1]]
# print(R0Dict)
simulationGeneration = "0"
saveName = "comparingModels_"+simulationGeneration
# reads R0 data
#fileRelated.mergeR0(R0Dict, fileRelated.fullPath("request_5/R0_data.csv", "outputs"))
print(R0Dict)
if R0Count > 0:
statfile.comparingBoxPlots(R0Dict, plottedData="R0", saveName=saveName, outputDir=output_folder)
if multiCounts >0:
statfile.comparingBoxPlots(InfectedCountDict ,plottedData="inf", saveName=saveName, outputDir=output_folder)
#for key, value in R0Dict.items():
# if isinstance(R0Dict[key][1], str):
# R0Dict[key] = value[0]
# # else do nothing
# #print(key, value)
#print(R0Dict)
# check if dict is not empty
merged = False
if merged:
for k, v in R0Dict.items():
print(k, len(v))
if isinstance(value[-1], str) or isinstance(value[-1], tuple):
R0Dict[k] = v[0]
sameshape = True
sizes = []
for k,v in R0Dict.items():
sizes.append(len(v[0]))
print("size is",sizes)
if len(set(sizes)) == 1:
R0_df = pd.DataFrame(R0Dict)
fileRelated.save_df_to_csv(fileRelated.fullPath("R0_data.csv", output_folder), R0_df)
else:
for specialsize in list(set(sizes)):
new_dict = dict()
newR0_df = None
for k, v in R0Dict.items():
if len(v[0]) == specialsize:
new_dict[k] = copy.deepcopy(v[0])
newR0_df = pd.DataFrame(new_dict)
print(newR0_df)
print(new_dict)
fileRelated.save_df_to_csv(fileRelated.fullPath("R0_data_len"+str(specialsize)+".csv", output_folder), newR0_df)
else: # never ran after jan 30
#statfile.generateVisualByLoading(ControlledExperiment, plottedData="inf", saveName=saveName)
model_framework.createFilledPlot(modelConfig, modelName="baseModel",
simulationN=3, outputDir=output_folder)
if __name__ == "__main__":
main()
| 42.423077 | 201 | 0.578536 | import model_framework
import platform
import statfile
import copy
import fileRelated
import pandas as pd
import experiment as experiment
import main_config
from pathlib import Path
def main():
modelConfig = main_config.modelConfig
R0_controls = {
"World" : [
("DynamicCapacity", False),
],
"Infection" : [
("baseP" , 1.25),
("SeedNumber", 100),
],
"HybridClass":[
("ChangedSeedNumber", 10),
],
}
R0_controls = {
"World" : [
("DynamicCapacity", False),
],
"HybridClass":[
("ChangedSeedNumber", 10),
],
}
def cross_scenarios(scenario1, scenario2):
experiments = {}
for keyname, experiment1 in scenario1.items():
for screenname, screen in scenario2.items():
experiment_name = screenname +"_" + keyname
experiments[experiment_name] = screen.copy()
for key, value in experiment1.items():
experiments[experiment_name][key] = value.copy()
return copy.deepcopy(experiments)
def print_nicely(experiment_scenarios):
for ex_name, ex_config in experiment_scenarios.items():
print("\n","*"*20,"\n", ex_name)
for ex_config_name, ex_config_list in ex_config.items():
print(ex_config_name, ":" ,ex_config_list)
experiment1 = experiment.marginals
experiment2 = experiment.original_3x3
experiment3 = cross_scenarios(experiment.different_base_p_jump_025, experiment.medium_student_vary_policy)
experiment4 = cross_scenarios(experiment.medium_student_vary_policy, experiment.off_campus_multiplier)
experiment5 = experiment.diff_seed_number
experiment6 = experiment.facemask_param
basemodel = {"basemodel": {}}
multi_experiments = {
"request_1_marginal": experiment1,
"request_2_3x3": experiment2,
"request_3_diff_base_p": experiment3,
"request_4_fixed_p_diff_offcampusP": experiment4,
"request_5_diff_seed_number": experiment5,
"request_6_facemask_param": experiment6,
}
print("here are the loaded experiments:")
for r_name, exp in multi_experiments.items():
r_name+=(" "*max(0, (40-len(r_name))))
print(f"{r_name} with {len(exp)} experiments")
user_input = input("which request # do you want to run? 0 to run all in one thread")
user_input = int(user_input)
sp_num = [123, 456, 12, 34, 56]
if (user_input < 0 or user_input > len(multi_experiments)) and user_input not in sp_num:
print("input number does not match experiment number, exiting program")
return
for sp_index, (request_name, modelConfigs) in enumerate(multi_experiments.items()):
if ((sp_index == user_input-1) or (user_input == 0) or (user_input==123 and sp_index < 3) or
(user_input==456 and sp_index >= 3) or (user_input==12 and sp_index < 2) or (user_input==34 and 4>sp_index>1)
or (user_input==56 and sp_index >= 4)):
print(sp_index)
R0Dict = dict()
InfectedCountDict = dict()
output_dir = fileRelated.fullPath(request_name, "outputs")
Path(output_dir).mkdir(parents=False, exist_ok=True)
output_folder = "outputs/"+ request_name
print(request_name)
for index, (modelName, modelControl) in enumerate(modelConfigs.items()):
print("finished", index)
configCopy = copy.deepcopy(modelConfig)
for categoryKey, listOfControls in modelControl.items():
for (specificKey, specificValue) in listOfControls:
if specificKey not in configCopy[categoryKey].keys():
print("error", specificKey, specificValue, " was not assigned correctly")
else:
configCopy[categoryKey][specificKey] = specificValue
R0Count, multiCounts = 100, 100
if index in [0, 1] and False:
R0Count = 200
if index > -1:
InfectedCountDict[modelName] = model_framework.multiSimulation(multiCounts, configCopy, days=100, debug=False, modelName=modelName, outputDir=output_folder)
R0Dict[modelName] = model_framework.R0_simulation(configCopy, R0_controls,R0Count, debug=False, timeSeriesVisual=False, R0Visuals=True, modelName=modelName, outputDir=output_folder)
print(InfectedCountDict.items())
print(R0Dict.items())
if True:
simulationGeneration = "0"
saveName = "comparingModels_"+simulationGeneration
print(R0Dict)
if R0Count > 0:
statfile.comparingBoxPlots(R0Dict, plottedData="R0", saveName=saveName, outputDir=output_folder)
if multiCounts >0:
statfile.comparingBoxPlots(InfectedCountDict ,plottedData="inf", saveName=saveName, outputDir=output_folder)
merged = False
if merged:
for k, v in R0Dict.items():
print(k, len(v))
if isinstance(value[-1], str) or isinstance(value[-1], tuple):
R0Dict[k] = v[0]
sameshape = True
sizes = []
for k,v in R0Dict.items():
sizes.append(len(v[0]))
print("size is",sizes)
if len(set(sizes)) == 1:
R0_df = pd.DataFrame(R0Dict)
fileRelated.save_df_to_csv(fileRelated.fullPath("R0_data.csv", output_folder), R0_df)
else:
for specialsize in list(set(sizes)):
new_dict = dict()
newR0_df = None
for k, v in R0Dict.items():
if len(v[0]) == specialsize:
new_dict[k] = copy.deepcopy(v[0])
newR0_df = pd.DataFrame(new_dict)
print(newR0_df)
print(new_dict)
fileRelated.save_df_to_csv(fileRelated.fullPath("R0_data_len"+str(specialsize)+".csv", output_folder), newR0_df)
else:
model_framework.createFilledPlot(modelConfig, modelName="baseModel",
simulationN=3, outputDir=output_folder)
if __name__ == "__main__":
main()
| true | true |
f7ff8b7ca202cd306780e89b727abe6ed4218fe7 | 12,621 | py | Python | sdk/python/pulumi_azure_native/customerinsights/get_prediction.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/customerinsights/get_prediction.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/customerinsights/get_prediction.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetPredictionResult',
'AwaitableGetPredictionResult',
'get_prediction',
]
@pulumi.output_type
class GetPredictionResult:
"""
The prediction resource format.
"""
def __init__(__self__, auto_analyze=None, description=None, display_name=None, grades=None, id=None, involved_interaction_types=None, involved_kpi_types=None, involved_relationships=None, mappings=None, name=None, negative_outcome_expression=None, positive_outcome_expression=None, prediction_name=None, primary_profile_type=None, provisioning_state=None, scope_expression=None, score_label=None, system_generated_entities=None, tenant_id=None, type=None):
if auto_analyze and not isinstance(auto_analyze, bool):
raise TypeError("Expected argument 'auto_analyze' to be a bool")
pulumi.set(__self__, "auto_analyze", auto_analyze)
if description and not isinstance(description, dict):
raise TypeError("Expected argument 'description' to be a dict")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, dict):
raise TypeError("Expected argument 'display_name' to be a dict")
pulumi.set(__self__, "display_name", display_name)
if grades and not isinstance(grades, list):
raise TypeError("Expected argument 'grades' to be a list")
pulumi.set(__self__, "grades", grades)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if involved_interaction_types and not isinstance(involved_interaction_types, list):
raise TypeError("Expected argument 'involved_interaction_types' to be a list")
pulumi.set(__self__, "involved_interaction_types", involved_interaction_types)
if involved_kpi_types and not isinstance(involved_kpi_types, list):
raise TypeError("Expected argument 'involved_kpi_types' to be a list")
pulumi.set(__self__, "involved_kpi_types", involved_kpi_types)
if involved_relationships and not isinstance(involved_relationships, list):
raise TypeError("Expected argument 'involved_relationships' to be a list")
pulumi.set(__self__, "involved_relationships", involved_relationships)
if mappings and not isinstance(mappings, dict):
raise TypeError("Expected argument 'mappings' to be a dict")
pulumi.set(__self__, "mappings", mappings)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if negative_outcome_expression and not isinstance(negative_outcome_expression, str):
raise TypeError("Expected argument 'negative_outcome_expression' to be a str")
pulumi.set(__self__, "negative_outcome_expression", negative_outcome_expression)
if positive_outcome_expression and not isinstance(positive_outcome_expression, str):
raise TypeError("Expected argument 'positive_outcome_expression' to be a str")
pulumi.set(__self__, "positive_outcome_expression", positive_outcome_expression)
if prediction_name and not isinstance(prediction_name, str):
raise TypeError("Expected argument 'prediction_name' to be a str")
pulumi.set(__self__, "prediction_name", prediction_name)
if primary_profile_type and not isinstance(primary_profile_type, str):
raise TypeError("Expected argument 'primary_profile_type' to be a str")
pulumi.set(__self__, "primary_profile_type", primary_profile_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if scope_expression and not isinstance(scope_expression, str):
raise TypeError("Expected argument 'scope_expression' to be a str")
pulumi.set(__self__, "scope_expression", scope_expression)
if score_label and not isinstance(score_label, str):
raise TypeError("Expected argument 'score_label' to be a str")
pulumi.set(__self__, "score_label", score_label)
if system_generated_entities and not isinstance(system_generated_entities, dict):
raise TypeError("Expected argument 'system_generated_entities' to be a dict")
pulumi.set(__self__, "system_generated_entities", system_generated_entities)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="autoAnalyze")
def auto_analyze(self) -> bool:
"""
Whether do auto analyze.
"""
return pulumi.get(self, "auto_analyze")
@property
@pulumi.getter
def description(self) -> Optional[Mapping[str, str]]:
"""
Description of the prediction.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[Mapping[str, str]]:
"""
Display name of the prediction.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def grades(self) -> Optional[Sequence['outputs.PredictionResponseGrades']]:
"""
The prediction grades.
"""
return pulumi.get(self, "grades")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="involvedInteractionTypes")
def involved_interaction_types(self) -> Optional[Sequence[str]]:
"""
Interaction types involved in the prediction.
"""
return pulumi.get(self, "involved_interaction_types")
@property
@pulumi.getter(name="involvedKpiTypes")
def involved_kpi_types(self) -> Optional[Sequence[str]]:
"""
KPI types involved in the prediction.
"""
return pulumi.get(self, "involved_kpi_types")
@property
@pulumi.getter(name="involvedRelationships")
def involved_relationships(self) -> Optional[Sequence[str]]:
"""
Relationships involved in the prediction.
"""
return pulumi.get(self, "involved_relationships")
@property
@pulumi.getter
def mappings(self) -> 'outputs.PredictionResponseMappings':
"""
Definition of the link mapping of prediction.
"""
return pulumi.get(self, "mappings")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="negativeOutcomeExpression")
def negative_outcome_expression(self) -> str:
"""
Negative outcome expression.
"""
return pulumi.get(self, "negative_outcome_expression")
@property
@pulumi.getter(name="positiveOutcomeExpression")
def positive_outcome_expression(self) -> str:
"""
Positive outcome expression.
"""
return pulumi.get(self, "positive_outcome_expression")
@property
@pulumi.getter(name="predictionName")
def prediction_name(self) -> Optional[str]:
"""
Name of the prediction.
"""
return pulumi.get(self, "prediction_name")
@property
@pulumi.getter(name="primaryProfileType")
def primary_profile_type(self) -> str:
"""
Primary profile type.
"""
return pulumi.get(self, "primary_profile_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="scopeExpression")
def scope_expression(self) -> str:
"""
Scope expression.
"""
return pulumi.get(self, "scope_expression")
@property
@pulumi.getter(name="scoreLabel")
def score_label(self) -> str:
"""
Score label.
"""
return pulumi.get(self, "score_label")
@property
@pulumi.getter(name="systemGeneratedEntities")
def system_generated_entities(self) -> 'outputs.PredictionResponseSystemGeneratedEntities':
"""
System generated entities.
"""
return pulumi.get(self, "system_generated_entities")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The hub name.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetPredictionResult(GetPredictionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPredictionResult(
auto_analyze=self.auto_analyze,
description=self.description,
display_name=self.display_name,
grades=self.grades,
id=self.id,
involved_interaction_types=self.involved_interaction_types,
involved_kpi_types=self.involved_kpi_types,
involved_relationships=self.involved_relationships,
mappings=self.mappings,
name=self.name,
negative_outcome_expression=self.negative_outcome_expression,
positive_outcome_expression=self.positive_outcome_expression,
prediction_name=self.prediction_name,
primary_profile_type=self.primary_profile_type,
provisioning_state=self.provisioning_state,
scope_expression=self.scope_expression,
score_label=self.score_label,
system_generated_entities=self.system_generated_entities,
tenant_id=self.tenant_id,
type=self.type)
def get_prediction(hub_name: Optional[str] = None,
prediction_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPredictionResult:
"""
The prediction resource format.
API Version: 2017-04-26.
:param str hub_name: The name of the hub.
:param str prediction_name: The name of the Prediction.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['hubName'] = hub_name
__args__['predictionName'] = prediction_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:customerinsights:getPrediction', __args__, opts=opts, typ=GetPredictionResult).value
return AwaitableGetPredictionResult(
auto_analyze=__ret__.auto_analyze,
description=__ret__.description,
display_name=__ret__.display_name,
grades=__ret__.grades,
id=__ret__.id,
involved_interaction_types=__ret__.involved_interaction_types,
involved_kpi_types=__ret__.involved_kpi_types,
involved_relationships=__ret__.involved_relationships,
mappings=__ret__.mappings,
name=__ret__.name,
negative_outcome_expression=__ret__.negative_outcome_expression,
positive_outcome_expression=__ret__.positive_outcome_expression,
prediction_name=__ret__.prediction_name,
primary_profile_type=__ret__.primary_profile_type,
provisioning_state=__ret__.provisioning_state,
scope_expression=__ret__.scope_expression,
score_label=__ret__.score_label,
system_generated_entities=__ret__.system_generated_entities,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
| 39.688679 | 460 | 0.67356 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetPredictionResult',
'AwaitableGetPredictionResult',
'get_prediction',
]
@pulumi.output_type
class GetPredictionResult:
def __init__(__self__, auto_analyze=None, description=None, display_name=None, grades=None, id=None, involved_interaction_types=None, involved_kpi_types=None, involved_relationships=None, mappings=None, name=None, negative_outcome_expression=None, positive_outcome_expression=None, prediction_name=None, primary_profile_type=None, provisioning_state=None, scope_expression=None, score_label=None, system_generated_entities=None, tenant_id=None, type=None):
if auto_analyze and not isinstance(auto_analyze, bool):
raise TypeError("Expected argument 'auto_analyze' to be a bool")
pulumi.set(__self__, "auto_analyze", auto_analyze)
if description and not isinstance(description, dict):
raise TypeError("Expected argument 'description' to be a dict")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, dict):
raise TypeError("Expected argument 'display_name' to be a dict")
pulumi.set(__self__, "display_name", display_name)
if grades and not isinstance(grades, list):
raise TypeError("Expected argument 'grades' to be a list")
pulumi.set(__self__, "grades", grades)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if involved_interaction_types and not isinstance(involved_interaction_types, list):
raise TypeError("Expected argument 'involved_interaction_types' to be a list")
pulumi.set(__self__, "involved_interaction_types", involved_interaction_types)
if involved_kpi_types and not isinstance(involved_kpi_types, list):
raise TypeError("Expected argument 'involved_kpi_types' to be a list")
pulumi.set(__self__, "involved_kpi_types", involved_kpi_types)
if involved_relationships and not isinstance(involved_relationships, list):
raise TypeError("Expected argument 'involved_relationships' to be a list")
pulumi.set(__self__, "involved_relationships", involved_relationships)
if mappings and not isinstance(mappings, dict):
raise TypeError("Expected argument 'mappings' to be a dict")
pulumi.set(__self__, "mappings", mappings)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if negative_outcome_expression and not isinstance(negative_outcome_expression, str):
raise TypeError("Expected argument 'negative_outcome_expression' to be a str")
pulumi.set(__self__, "negative_outcome_expression", negative_outcome_expression)
if positive_outcome_expression and not isinstance(positive_outcome_expression, str):
raise TypeError("Expected argument 'positive_outcome_expression' to be a str")
pulumi.set(__self__, "positive_outcome_expression", positive_outcome_expression)
if prediction_name and not isinstance(prediction_name, str):
raise TypeError("Expected argument 'prediction_name' to be a str")
pulumi.set(__self__, "prediction_name", prediction_name)
if primary_profile_type and not isinstance(primary_profile_type, str):
raise TypeError("Expected argument 'primary_profile_type' to be a str")
pulumi.set(__self__, "primary_profile_type", primary_profile_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if scope_expression and not isinstance(scope_expression, str):
raise TypeError("Expected argument 'scope_expression' to be a str")
pulumi.set(__self__, "scope_expression", scope_expression)
if score_label and not isinstance(score_label, str):
raise TypeError("Expected argument 'score_label' to be a str")
pulumi.set(__self__, "score_label", score_label)
if system_generated_entities and not isinstance(system_generated_entities, dict):
raise TypeError("Expected argument 'system_generated_entities' to be a dict")
pulumi.set(__self__, "system_generated_entities", system_generated_entities)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="autoAnalyze")
def auto_analyze(self) -> bool:
return pulumi.get(self, "auto_analyze")
@property
@pulumi.getter
def description(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def grades(self) -> Optional[Sequence['outputs.PredictionResponseGrades']]:
return pulumi.get(self, "grades")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="involvedInteractionTypes")
def involved_interaction_types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "involved_interaction_types")
@property
@pulumi.getter(name="involvedKpiTypes")
def involved_kpi_types(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "involved_kpi_types")
@property
@pulumi.getter(name="involvedRelationships")
def involved_relationships(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "involved_relationships")
@property
@pulumi.getter
def mappings(self) -> 'outputs.PredictionResponseMappings':
return pulumi.get(self, "mappings")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="negativeOutcomeExpression")
def negative_outcome_expression(self) -> str:
return pulumi.get(self, "negative_outcome_expression")
@property
@pulumi.getter(name="positiveOutcomeExpression")
def positive_outcome_expression(self) -> str:
return pulumi.get(self, "positive_outcome_expression")
@property
@pulumi.getter(name="predictionName")
def prediction_name(self) -> Optional[str]:
return pulumi.get(self, "prediction_name")
@property
@pulumi.getter(name="primaryProfileType")
def primary_profile_type(self) -> str:
return pulumi.get(self, "primary_profile_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="scopeExpression")
def scope_expression(self) -> str:
return pulumi.get(self, "scope_expression")
@property
@pulumi.getter(name="scoreLabel")
def score_label(self) -> str:
return pulumi.get(self, "score_label")
@property
@pulumi.getter(name="systemGeneratedEntities")
def system_generated_entities(self) -> 'outputs.PredictionResponseSystemGeneratedEntities':
return pulumi.get(self, "system_generated_entities")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetPredictionResult(GetPredictionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPredictionResult(
auto_analyze=self.auto_analyze,
description=self.description,
display_name=self.display_name,
grades=self.grades,
id=self.id,
involved_interaction_types=self.involved_interaction_types,
involved_kpi_types=self.involved_kpi_types,
involved_relationships=self.involved_relationships,
mappings=self.mappings,
name=self.name,
negative_outcome_expression=self.negative_outcome_expression,
positive_outcome_expression=self.positive_outcome_expression,
prediction_name=self.prediction_name,
primary_profile_type=self.primary_profile_type,
provisioning_state=self.provisioning_state,
scope_expression=self.scope_expression,
score_label=self.score_label,
system_generated_entities=self.system_generated_entities,
tenant_id=self.tenant_id,
type=self.type)
def get_prediction(hub_name: Optional[str] = None,
prediction_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPredictionResult:
__args__ = dict()
__args__['hubName'] = hub_name
__args__['predictionName'] = prediction_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:customerinsights:getPrediction', __args__, opts=opts, typ=GetPredictionResult).value
return AwaitableGetPredictionResult(
auto_analyze=__ret__.auto_analyze,
description=__ret__.description,
display_name=__ret__.display_name,
grades=__ret__.grades,
id=__ret__.id,
involved_interaction_types=__ret__.involved_interaction_types,
involved_kpi_types=__ret__.involved_kpi_types,
involved_relationships=__ret__.involved_relationships,
mappings=__ret__.mappings,
name=__ret__.name,
negative_outcome_expression=__ret__.negative_outcome_expression,
positive_outcome_expression=__ret__.positive_outcome_expression,
prediction_name=__ret__.prediction_name,
primary_profile_type=__ret__.primary_profile_type,
provisioning_state=__ret__.provisioning_state,
scope_expression=__ret__.scope_expression,
score_label=__ret__.score_label,
system_generated_entities=__ret__.system_generated_entities,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
| true | true |
f7ff8bef525aba9cf35207b8d9e329ad82e68dd7 | 1,908 | py | Python | pinpayments/templatetags/pin_payment_tags.py | rossp/django-pinpayments | bf5aa733d2a3ab637a4431d409777a215ea9afc6 | [
"Unlicense"
] | 11 | 2015-02-01T08:22:47.000Z | 2021-04-15T03:52:17.000Z | pinpayments/templatetags/pin_payment_tags.py | rossp/django-pinpayments | bf5aa733d2a3ab637a4431d409777a215ea9afc6 | [
"Unlicense"
] | 18 | 2015-01-18T03:43:44.000Z | 2021-07-04T22:46:29.000Z | pinpayments/templatetags/pin_payment_tags.py | rossp/django-pinpayments | bf5aa733d2a3ab637a4431d409777a215ea9afc6 | [
"Unlicense"
] | 7 | 2015-05-30T08:41:06.000Z | 2020-03-09T07:09:39.000Z | """ The tags with which the JS components can be used """
from django import template
from django.conf import settings
register = template.Library()
def pin_header(context, environment=''):
"""
pin_header - Renders the JavaScript required for Pin.js payments.
This will also include the Pin.js file from pin.net.au.
Optionally accepts an 'environment' (eg test/live) as a parameter,
otherwise the default will be used.
"""
if environment == '':
environment = getattr(settings, 'PIN_DEFAULT_ENVIRONMENT', 'test')
pin_config = getattr(settings, 'PIN_ENVIRONMENTS', {})
if pin_config == {}:
raise template.TemplateSyntaxError(
"PIN_ENVIRONMENTS setting does not exist."
)
if environment not in pin_config.keys():
raise template.TemplateSyntaxError(
"Environment '{0}' not in PIN_ENVIRONMENTS".format(environment)
)
pin_env = pin_config[environment]
(pin_key, pin_host) = (pin_env.get('key', None), pin_env.get('host', None))
if not (pin_key and pin_host):
raise template.TemplateSyntaxError(
"Environment '{0}' does not have key and host configured.".format(
environment
)
)
return {
'pin_environment': environment,
'pin_public_key': pin_key,
'pin_host': pin_host,
'request': context,
}
def pin_form(context):
"""
pin_form - renders a simple HTML form
Should be inside existing <form class='pin'>...</form> tags.
"""
from datetime import datetime
current_year = datetime.now().year
return {
'pin_cc_years': range(current_year, current_year + 15),
'request': context,
}
register.inclusion_tag('pinpayments/pin_headers.html', takes_context=True)(pin_header)
register.inclusion_tag('pinpayments/pin_form.html', takes_context=True)(pin_form)
| 30.285714 | 86 | 0.654612 | from django import template
from django.conf import settings
register = template.Library()
def pin_header(context, environment=''):
if environment == '':
environment = getattr(settings, 'PIN_DEFAULT_ENVIRONMENT', 'test')
pin_config = getattr(settings, 'PIN_ENVIRONMENTS', {})
if pin_config == {}:
raise template.TemplateSyntaxError(
"PIN_ENVIRONMENTS setting does not exist."
)
if environment not in pin_config.keys():
raise template.TemplateSyntaxError(
"Environment '{0}' not in PIN_ENVIRONMENTS".format(environment)
)
pin_env = pin_config[environment]
(pin_key, pin_host) = (pin_env.get('key', None), pin_env.get('host', None))
if not (pin_key and pin_host):
raise template.TemplateSyntaxError(
"Environment '{0}' does not have key and host configured.".format(
environment
)
)
return {
'pin_environment': environment,
'pin_public_key': pin_key,
'pin_host': pin_host,
'request': context,
}
def pin_form(context):
from datetime import datetime
current_year = datetime.now().year
return {
'pin_cc_years': range(current_year, current_year + 15),
'request': context,
}
register.inclusion_tag('pinpayments/pin_headers.html', takes_context=True)(pin_header)
register.inclusion_tag('pinpayments/pin_form.html', takes_context=True)(pin_form)
| true | true |
f7ff8bfb5c2e133efcd2c7a039f14c5b0f6c91ab | 5,635 | py | Python | appengine/standard/localtesting/datastore_test.py | HoleCat/echarlosperros | b67460de0467e05b42a763c4430b26ecfd97c2aa | [
"Apache-2.0"
] | null | null | null | appengine/standard/localtesting/datastore_test.py | HoleCat/echarlosperros | b67460de0467e05b42a763c4430b26ecfd97c2aa | [
"Apache-2.0"
] | null | null | null | appengine/standard/localtesting/datastore_test.py | HoleCat/echarlosperros | b67460de0467e05b42a763c4430b26ecfd97c2aa | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START imports]
import unittest
from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.appengine.ext import testbed
# [END imports]
# [START datastore_example_1]
class TestModel(ndb.Model):
"""A model class used for testing."""
number = ndb.IntegerProperty(default=42)
text = ndb.StringProperty()
class TestEntityGroupRoot(ndb.Model):
"""Entity group root"""
pass
def GetEntityViaMemcache(entity_key):
"""Get entity from memcache if available, from datastore if not."""
entity = memcache.get(entity_key)
if entity is not None:
return entity
key = ndb.Key(urlsafe=entity_key)
entity = key.get()
if entity is not None:
memcache.set(entity_key, entity)
return entity
# [END datastore_example_1]
# [START datastore_example_test]
class DatastoreTestCase(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
# Clear ndb's in-context cache between tests.
# This prevents data from leaking between tests.
# Alternatively, you could disable caching by
# using ndb.get_context().set_cache_policy(False)
ndb.get_context().clear_cache()
# [END datastore_example_test]
# [START datastore_example_teardown]
def tearDown(self):
self.testbed.deactivate()
# [END datastore_example_teardown]
# [START datastore_example_insert]
def testInsertEntity(self):
TestModel().put()
self.assertEqual(1, len(TestModel.query().fetch(2)))
# [END datastore_example_insert]
# [START datastore_example_filter]
def testFilterByNumber(self):
root = TestEntityGroupRoot(id="root")
TestModel(parent=root.key).put()
TestModel(number=17, parent=root.key).put()
query = TestModel.query(ancestor=root.key).filter(
TestModel.number == 42)
results = query.fetch(2)
self.assertEqual(1, len(results))
self.assertEqual(42, results[0].number)
# [END datastore_example_filter]
# [START datastore_example_memcache]
def testGetEntityViaMemcache(self):
entity_key = TestModel(number=18).put().urlsafe()
retrieved_entity = GetEntityViaMemcache(entity_key)
self.assertNotEqual(None, retrieved_entity)
self.assertEqual(18, retrieved_entity.number)
# [END datastore_example_memcache]
# [START HRD_example_1]
from google.appengine.datastore import datastore_stub_util # noqa
class HighReplicationTestCaseOne(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Create a consistency policy that will simulate the High Replication
# consistency model.
self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=0)
# Initialize the datastore stub with this policy.
self.testbed.init_datastore_v3_stub(consistency_policy=self.policy)
# Initialize memcache stub too, since ndb also uses memcache
self.testbed.init_memcache_stub()
# Clear in-context cache before each test.
ndb.get_context().clear_cache()
def tearDown(self):
self.testbed.deactivate()
def testEventuallyConsistentGlobalQueryResult(self):
class TestModel(ndb.Model):
pass
user_key = ndb.Key('User', 'ryan')
# Put two entities
ndb.put_multi([
TestModel(parent=user_key),
TestModel(parent=user_key)
])
# Global query doesn't see the data.
self.assertEqual(0, TestModel.query().count(3))
# Ancestor query does see the data.
self.assertEqual(2, TestModel.query(ancestor=user_key).count(3))
# [END HRD_example_1]
# [START HRD_example_2]
def testDeterministicOutcome(self):
# 50% chance to apply.
self.policy.SetProbability(.5)
# Use the pseudo random sequence derived from seed=2.
self.policy.SetSeed(2)
class TestModel(ndb.Model):
pass
TestModel().put()
self.assertEqual(0, TestModel.query().count(3))
self.assertEqual(0, TestModel.query().count(3))
# Will always be applied before the third query.
self.assertEqual(1, TestModel.query().count(3))
# [END HRD_example_2]
# [START main]
if __name__ == '__main__':
unittest.main()
# [END main]
| 33.945783 | 79 | 0.659095 |
import unittest
from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.appengine.ext import testbed
class TestModel(ndb.Model):
number = ndb.IntegerProperty(default=42)
text = ndb.StringProperty()
class TestEntityGroupRoot(ndb.Model):
pass
def GetEntityViaMemcache(entity_key):
entity = memcache.get(entity_key)
if entity is not None:
return entity
key = ndb.Key(urlsafe=entity_key)
entity = key.get()
if entity is not None:
memcache.set(entity_key, entity)
return entity
class DatastoreTestCase(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
# This prevents data from leaking between tests.
# Alternatively, you could disable caching by
# using ndb.get_context().set_cache_policy(False)
ndb.get_context().clear_cache()
# [END datastore_example_test]
# [START datastore_example_teardown]
def tearDown(self):
self.testbed.deactivate()
# [END datastore_example_teardown]
# [START datastore_example_insert]
def testInsertEntity(self):
TestModel().put()
self.assertEqual(1, len(TestModel.query().fetch(2)))
# [END datastore_example_insert]
# [START datastore_example_filter]
def testFilterByNumber(self):
root = TestEntityGroupRoot(id="root")
TestModel(parent=root.key).put()
TestModel(number=17, parent=root.key).put()
query = TestModel.query(ancestor=root.key).filter(
TestModel.number == 42)
results = query.fetch(2)
self.assertEqual(1, len(results))
self.assertEqual(42, results[0].number)
# [END datastore_example_filter]
# [START datastore_example_memcache]
def testGetEntityViaMemcache(self):
entity_key = TestModel(number=18).put().urlsafe()
retrieved_entity = GetEntityViaMemcache(entity_key)
self.assertNotEqual(None, retrieved_entity)
self.assertEqual(18, retrieved_entity.number)
# [END datastore_example_memcache]
# [START HRD_example_1]
from google.appengine.datastore import datastore_stub_util # noqa
class HighReplicationTestCaseOne(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Create a consistency policy that will simulate the High Replication
# consistency model.
self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=0)
# Initialize the datastore stub with this policy.
self.testbed.init_datastore_v3_stub(consistency_policy=self.policy)
# Initialize memcache stub too, since ndb also uses memcache
self.testbed.init_memcache_stub()
# Clear in-context cache before each test.
ndb.get_context().clear_cache()
def tearDown(self):
self.testbed.deactivate()
def testEventuallyConsistentGlobalQueryResult(self):
class TestModel(ndb.Model):
pass
user_key = ndb.Key('User', 'ryan')
# Put two entities
ndb.put_multi([
TestModel(parent=user_key),
TestModel(parent=user_key)
])
# Global query doesn't see the data.
self.assertEqual(0, TestModel.query().count(3))
self.assertEqual(2, TestModel.query(ancestor=user_key).count(3))
def testDeterministicOutcome(self):
self.policy.SetProbability(.5)
self.policy.SetSeed(2)
class TestModel(ndb.Model):
pass
TestModel().put()
self.assertEqual(0, TestModel.query().count(3))
self.assertEqual(0, TestModel.query().count(3))
self.assertEqual(1, TestModel.query().count(3))
if __name__ == '__main__':
unittest.main()
| true | true |
f7ff8c0cf99f3eff44688e61cf27a773698679f1 | 271 | py | Python | 36_delete_dictionary.py | onowdev/python-selflearning | 3d7245de0207a5bfcbce4f7adde60e7316b70a8e | [
"MIT"
] | null | null | null | 36_delete_dictionary.py | onowdev/python-selflearning | 3d7245de0207a5bfcbce4f7adde60e7316b70a8e | [
"MIT"
] | null | null | null | 36_delete_dictionary.py | onowdev/python-selflearning | 3d7245de0207a5bfcbce4f7adde60e7316b70a8e | [
"MIT"
] | null | null | null | dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
del dict['Name'] # remove entry with key 'Name'
dict.clear() # remove all entries in dict
del dict # delete entire dictionary
print ("dict['Age']: ", dict['Age'])
print ("dict['School']: ", dict['School']) | 33.875 | 51 | 0.601476 | dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
del dict['Name']
dict.clear()
del dict
print ("dict['Age']: ", dict['Age'])
print ("dict['School']: ", dict['School']) | true | true |
f7ff8d5f3b3a8efae18b233903b6f163cd85eab4 | 3,093 | py | Python | ocdskingfisherprocess/signals/signals.py | matiasSanabria/kingfisher-process | 88cb768aaa562714c8bd53e05717639faf041501 | [
"BSD-3-Clause"
] | null | null | null | ocdskingfisherprocess/signals/signals.py | matiasSanabria/kingfisher-process | 88cb768aaa562714c8bd53e05717639faf041501 | [
"BSD-3-Clause"
] | null | null | null | ocdskingfisherprocess/signals/signals.py | matiasSanabria/kingfisher-process | 88cb768aaa562714c8bd53e05717639faf041501 | [
"BSD-3-Clause"
] | null | null | null | import json
import redis
from ocdskingfisherprocess.signals import KINGFISHER_SIGNALS
from ocdskingfisherprocess.transform import TRANSFORM_TYPE_COMPILE_RELEASES, TRANSFORM_TYPE_UPGRADE_1_0_TO_1_1
# Doing globals this way is hacky. Look into https://www.mattlayman.com/blog/2015/blinker/ instead.
our_database = None
our_config = None
def setup_signals(config, database):
global our_database, our_config
our_database = database
our_config = config
if config.run_standard_pipeline:
KINGFISHER_SIGNALS.signal('new_collection_created').connect(run_standard_pipeline_on_new_collection_created)
if config.is_redis_available():
KINGFISHER_SIGNALS.signal('collection-data-store-finished').connect(collection_data_store_finished_to_redis)
KINGFISHER_SIGNALS.signal('collection-store-finished').connect(collection_store_finished_to_redis)
def run_standard_pipeline_on_new_collection_created(sender, collection_id=None, **kwargs):
global our_database
collection = our_database.get_collection(collection_id)
if not collection.transform_from_collection_id:
# Create the transforms we want
second_collection_id = our_database.get_or_create_collection_id(
collection.source_id,
collection.data_version,
collection.sample,
transform_from_collection_id=collection.database_id,
transform_type=TRANSFORM_TYPE_UPGRADE_1_0_TO_1_1
)
our_database.get_or_create_collection_id(collection.source_id,
collection.data_version,
collection.sample,
transform_from_collection_id=second_collection_id,
transform_type=TRANSFORM_TYPE_COMPILE_RELEASES)
# Turn on the checks we want
our_database.mark_collection_check_data(collection_id, True)
our_database.mark_collection_check_older_data_with_schema_version_1_1(collection_id, True)
def collection_data_store_finished_to_redis(sender,
collection_id=None,
collection_file_item_id=None,
**kwargs):
redis_conn = redis.Redis(host=our_config.redis_host, port=our_config.redis_port, db=our_config.redis_database)
message = json.dumps({
'type': 'collection-data-store-finished',
'collection_id': collection_id,
'collection_file_item_id': collection_file_item_id,
})
redis_conn.rpush('kingfisher_work', message)
def collection_store_finished_to_redis(sender,
collection_id=None,
**kwargs):
redis_conn = redis.Redis(host=our_config.redis_host, port=our_config.redis_port, db=our_config.redis_database)
message = json.dumps({
'collection_id': collection_id,
})
redis_conn.rpush('kingfisher_work_collection_store_finished', message)
| 44.826087 | 116 | 0.682509 | import json
import redis
from ocdskingfisherprocess.signals import KINGFISHER_SIGNALS
from ocdskingfisherprocess.transform import TRANSFORM_TYPE_COMPILE_RELEASES, TRANSFORM_TYPE_UPGRADE_1_0_TO_1_1
our_database = None
our_config = None
def setup_signals(config, database):
global our_database, our_config
our_database = database
our_config = config
if config.run_standard_pipeline:
KINGFISHER_SIGNALS.signal('new_collection_created').connect(run_standard_pipeline_on_new_collection_created)
if config.is_redis_available():
KINGFISHER_SIGNALS.signal('collection-data-store-finished').connect(collection_data_store_finished_to_redis)
KINGFISHER_SIGNALS.signal('collection-store-finished').connect(collection_store_finished_to_redis)
def run_standard_pipeline_on_new_collection_created(sender, collection_id=None, **kwargs):
global our_database
collection = our_database.get_collection(collection_id)
if not collection.transform_from_collection_id:
second_collection_id = our_database.get_or_create_collection_id(
collection.source_id,
collection.data_version,
collection.sample,
transform_from_collection_id=collection.database_id,
transform_type=TRANSFORM_TYPE_UPGRADE_1_0_TO_1_1
)
our_database.get_or_create_collection_id(collection.source_id,
collection.data_version,
collection.sample,
transform_from_collection_id=second_collection_id,
transform_type=TRANSFORM_TYPE_COMPILE_RELEASES)
our_database.mark_collection_check_data(collection_id, True)
our_database.mark_collection_check_older_data_with_schema_version_1_1(collection_id, True)
def collection_data_store_finished_to_redis(sender,
collection_id=None,
collection_file_item_id=None,
**kwargs):
redis_conn = redis.Redis(host=our_config.redis_host, port=our_config.redis_port, db=our_config.redis_database)
message = json.dumps({
'type': 'collection-data-store-finished',
'collection_id': collection_id,
'collection_file_item_id': collection_file_item_id,
})
redis_conn.rpush('kingfisher_work', message)
def collection_store_finished_to_redis(sender,
collection_id=None,
**kwargs):
redis_conn = redis.Redis(host=our_config.redis_host, port=our_config.redis_port, db=our_config.redis_database)
message = json.dumps({
'collection_id': collection_id,
})
redis_conn.rpush('kingfisher_work_collection_store_finished', message)
| true | true |
f7ff8f95b00bb8f885e1e0d70e2b6eced1db0735 | 1,173 | py | Python | util/paths.py | weg-li-project/car-ml | 3c0f09c21fd14fa17e8f3faee84f7960e6e7f729 | [
"FTL",
"CNRI-Python"
] | null | null | null | util/paths.py | weg-li-project/car-ml | 3c0f09c21fd14fa17e8f3faee84f7960e6e7f729 | [
"FTL",
"CNRI-Python"
] | null | null | null | util/paths.py | weg-li-project/car-ml | 3c0f09c21fd14fa17e8f3faee84f7960e6e7f729 | [
"FTL",
"CNRI-Python"
] | null | null | null | import os
dirname = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.join(dirname, "..")
data_path = os.path.join(root_path, "data/")
city_ids_path = os.path.join(data_path, "city_ids.csv")
charges_schroeder_path = os.path.join(data_path, "charges_schroeder/")
charges_csv_filepath = os.path.join(charges_schroeder_path, "charges.csv")
checkpoints_path = os.path.join(data_path, "checkpoints/")
yolo_lp_model_path = os.path.join(checkpoints_path, "yolo_lp/")
yolo_car_model_path = os.path.join(checkpoints_path, "yolo_car/")
cnn_alpr_model_path = os.path.join(checkpoints_path, "cnn_alpr/training")
cnn_color_rec_model_path = os.path.join(checkpoints_path, "cnn_color_rec/training")
cnn_car_rec_model_path = os.path.join(checkpoints_path, "cnn_car_rec/training")
testdata_path = os.path.join(data_path, "testdata/")
vision_api_results_path = os.path.join(testdata_path, "vision_api_results.csv")
yolo_cnn_path = os.path.join(root_path, "yolo_cnn/")
car_brands_filepath = os.path.join(data_path, "car_brands.txt")
car_colors_filepath = os.path.join(data_path, "car_colors.txt")
class_names_yolo_car = os.path.join(data_path, "class_names_yolo_car.txt")
| 45.115385 | 83 | 0.793691 | import os
dirname = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.join(dirname, "..")
data_path = os.path.join(root_path, "data/")
city_ids_path = os.path.join(data_path, "city_ids.csv")
charges_schroeder_path = os.path.join(data_path, "charges_schroeder/")
charges_csv_filepath = os.path.join(charges_schroeder_path, "charges.csv")
checkpoints_path = os.path.join(data_path, "checkpoints/")
yolo_lp_model_path = os.path.join(checkpoints_path, "yolo_lp/")
yolo_car_model_path = os.path.join(checkpoints_path, "yolo_car/")
cnn_alpr_model_path = os.path.join(checkpoints_path, "cnn_alpr/training")
cnn_color_rec_model_path = os.path.join(checkpoints_path, "cnn_color_rec/training")
cnn_car_rec_model_path = os.path.join(checkpoints_path, "cnn_car_rec/training")
testdata_path = os.path.join(data_path, "testdata/")
vision_api_results_path = os.path.join(testdata_path, "vision_api_results.csv")
yolo_cnn_path = os.path.join(root_path, "yolo_cnn/")
car_brands_filepath = os.path.join(data_path, "car_brands.txt")
car_colors_filepath = os.path.join(data_path, "car_colors.txt")
class_names_yolo_car = os.path.join(data_path, "class_names_yolo_car.txt")
| true | true |
f7ff8feaadfea426f8f446a8b3e7180f1f38f203 | 2,496 | py | Python | ogindia/tests/test_data.py | pravinrawal/OG-India | 5903effc6ed2dad63ce149c54444f44323d35936 | [
"CC0-1.0"
] | 1 | 2018-05-31T11:19:15.000Z | 2018-05-31T11:19:15.000Z | ogindia/tests/test_data.py | dc4tpru/OG-India | b2d6b6a561c48d52a5aef0fff1c3b40691ee8680 | [
"CC0-1.0"
] | null | null | null | ogindia/tests/test_data.py | dc4tpru/OG-India | b2d6b6a561c48d52a5aef0fff1c3b40691ee8680 | [
"CC0-1.0"
] | null | null | null | import pytest
import os
from ogindia.utils import CPS_START_YEAR
def test_cps():
"""
Check that setting `data` to 'cps' uses cps data
"""
from ogindia import get_micro_data
baseline = False
start_year = 2016
reform = {"II_em": {2017: 10000}}
calc = get_micro_data.get_calculator(
baseline, start_year, reform=reform,
records_start_year=CPS_START_YEAR, data="cps")
# blind_head is only in the CPS file and e00700 is only in the PUF.
# See taxcalc/records_variables.json
assert (calc.array("blind_head").sum() > 0 and
calc.array("e00700").sum() == 0)
def test_set_path():
"""
Check that 'notapath.csv' is passed to taxcalc. An error
containing 'notapath.csv' is sufficient proof for this
"""
from ogindia import get_micro_data
baseline = False
start_year = 2016
reform = {"II_em": {2017: 10000}}
# In theory this path doesn't exist so there should be an IOError
# But taxcalc checks if the path exists and if it doesn't, it tries
# to read from an egg file. This raises a ValueError. At some point,
# this could change. So I think it's best to catch both errors
with pytest.raises((IOError, ValueError), match="notapath.csv"):
get_micro_data.get_calculator(
baseline, start_year, reform=reform,
records_start_year=CPS_START_YEAR, data="notapath.csv")
def test_puf_path():
"""
Check that setting `data` to None uses the puf file
"""
from ogindia import get_micro_data
baseline = False
start_year = 2016
reform = {"II_em": {2017: 10000}}
# get path to puf if puf.csv in ogindia/ directory
cur_dir = os.path.abspath(os.path.dirname(__file__))
puf_path = os.path.join(cur_dir, "../puf.csv")
# puf.csv in ogindia/
if os.path.exists(puf_path):
calc = get_micro_data.get_calculator(
baseline, start_year, reform=reform, data=puf_path)
# blind_head is only in the CPS file and e00700 is only in the PUF.
# See taxcalc/records_variables.json
assert (calc.array('blind_head').sum() == 0 and
calc.array('e00700').sum() > 0)
# we do not have puf.csv
else:
# make sure TC is looking for puf.csv
with pytest.raises((IOError, ValueError), match="puf.csv"):
get_micro_data.get_calculator(
baseline, start_year, reform=reform,
records_start_year=CPS_START_YEAR, data=None)
| 34.666667 | 75 | 0.653446 | import pytest
import os
from ogindia.utils import CPS_START_YEAR
def test_cps():
from ogindia import get_micro_data
baseline = False
start_year = 2016
reform = {"II_em": {2017: 10000}}
calc = get_micro_data.get_calculator(
baseline, start_year, reform=reform,
records_start_year=CPS_START_YEAR, data="cps")
assert (calc.array("blind_head").sum() > 0 and
calc.array("e00700").sum() == 0)
def test_set_path():
from ogindia import get_micro_data
baseline = False
start_year = 2016
reform = {"II_em": {2017: 10000}}
# But taxcalc checks if the path exists and if it doesn't, it tries
with pytest.raises((IOError, ValueError), match="notapath.csv"):
get_micro_data.get_calculator(
baseline, start_year, reform=reform,
records_start_year=CPS_START_YEAR, data="notapath.csv")
def test_puf_path():
from ogindia import get_micro_data
baseline = False
start_year = 2016
reform = {"II_em": {2017: 10000}}
# get path to puf if puf.csv in ogindia/ directory
cur_dir = os.path.abspath(os.path.dirname(__file__))
puf_path = os.path.join(cur_dir, "../puf.csv")
# puf.csv in ogindia/
if os.path.exists(puf_path):
calc = get_micro_data.get_calculator(
baseline, start_year, reform=reform, data=puf_path)
# blind_head is only in the CPS file and e00700 is only in the PUF.
# See taxcalc/records_variables.json
assert (calc.array('blind_head').sum() == 0 and
calc.array('e00700').sum() > 0)
# we do not have puf.csv
else:
# make sure TC is looking for puf.csv
with pytest.raises((IOError, ValueError), match="puf.csv"):
get_micro_data.get_calculator(
baseline, start_year, reform=reform,
records_start_year=CPS_START_YEAR, data=None)
| true | true |
f7ff8feebee93dede0167bc6be2ac2ebf5d558ac | 5,313 | py | Python | lark/parsers/lalr_parser.py | asellappen/lark | da6a4e4d00022452abf59df4b4232480608d4f7d | [
"MIT"
] | null | null | null | lark/parsers/lalr_parser.py | asellappen/lark | da6a4e4d00022452abf59df4b4232480608d4f7d | [
"MIT"
] | null | null | null | lark/parsers/lalr_parser.py | asellappen/lark | da6a4e4d00022452abf59df4b4232480608d4f7d | [
"MIT"
] | null | null | null | """This module implements a LALR(1) Parser
"""
# Author: Erez Shinan (2017)
# Email : erezshin@gmail.com
from copy import deepcopy, copy
from ..exceptions import UnexpectedInput, UnexpectedToken
from ..lexer import Token
from ..utils import Serialize
from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable
from .lalr_puppet import ParserPuppet
###{standalone
class LALR_Parser(Serialize):
def __init__(self, parser_conf, debug=False):
analysis = LALR_Analyzer(parser_conf, debug=debug)
analysis.compute_lalr()
callbacks = parser_conf.callbacks
self._parse_table = analysis.parse_table
self.parser_conf = parser_conf
self.parser = _Parser(analysis.parse_table, callbacks, debug)
@classmethod
def deserialize(cls, data, memo, callbacks, debug=False):
inst = cls.__new__(cls)
inst._parse_table = IntParseTable.deserialize(data, memo)
inst.parser = _Parser(inst._parse_table, callbacks, debug)
return inst
def serialize(self, memo):
return self._parse_table.serialize(memo)
def parse(self, *args):
return self.parser.parse(*args)
class ParseConf:
__slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states'
def __init__(self, parse_table, callbacks, start):
self.parse_table = parse_table
self.start_state = self.parse_table.start_states[start]
self.end_state = self.parse_table.end_states[start]
self.states = self.parse_table.states
self.callbacks = callbacks
self.start = start
class ParserState:
__slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack'
def __init__(self, parse_conf, lexer, state_stack=None, value_stack=None):
self.parse_conf = parse_conf
self.lexer = lexer
self.state_stack = state_stack or [self.parse_conf.start_state]
self.value_stack = value_stack or []
@property
def position(self):
return self.state_stack[-1]
# Necessary for match_examples() to work
def __eq__(self, other):
if not isinstance(other, ParserState):
return False
return self.position == other.position
def __copy__(self):
return type(self)(
self.parse_conf,
self.lexer, # XXX copy
copy(self.state_stack),
deepcopy(self.value_stack),
)
def copy(self):
return copy(self)
def feed_token(self, token, is_end=False):
state_stack = self.state_stack
value_stack = self.value_stack
states = self.parse_conf.states
end_state = self.parse_conf.end_state
callbacks = self.parse_conf.callbacks
while True:
state = state_stack[-1]
try:
action, arg = states[state][token.type]
except KeyError:
expected = {s for s in states[state].keys() if s.isupper()}
raise UnexpectedToken(token, expected, state=self, puppet=None)
assert arg != end_state
if action is Shift:
# shift once and return
assert not is_end
state_stack.append(arg)
value_stack.append(token)
return
else:
# reduce+shift as many times as necessary
rule = arg
size = len(rule.expansion)
if size:
s = value_stack[-size:]
del state_stack[-size:]
del value_stack[-size:]
else:
s = []
value = callbacks[rule](s)
_action, new_state = states[state_stack[-1]][rule.origin.name]
assert _action is Shift
state_stack.append(new_state)
value_stack.append(value)
if is_end and state_stack[-1] == end_state:
return value_stack[-1]
class _Parser:
def __init__(self, parse_table, callbacks, debug=False):
self.parse_table = parse_table
self.callbacks = callbacks
self.debug = debug
def parse(self, lexer, start, value_stack=None, state_stack=None):
parse_conf = ParseConf(self.parse_table, self.callbacks, start)
parser_state = ParserState(parse_conf, lexer, state_stack, value_stack)
return self.parse_from_state(parser_state)
def parse_from_state(self, state):
# Main LALR-parser loop
try:
token = None
for token in state.lexer.lex(state):
state.feed_token(token)
token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
return state.feed_token(token, True)
except UnexpectedInput as e:
try:
e.puppet = ParserPuppet(self, state, state.lexer)
except NameError:
pass
raise e
except Exception as e:
if self.debug:
print("")
print("STATE STACK DUMP")
print("----------------")
for i, s in enumerate(state.state_stack):
print('%d)' % i , s)
print("")
raise
###}
| 32.2 | 100 | 0.590627 |
from copy import deepcopy, copy
from ..exceptions import UnexpectedInput, UnexpectedToken
from ..lexer import Token
from ..utils import Serialize
from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable
from .lalr_puppet import ParserPuppet
lize):
def __init__(self, parser_conf, debug=False):
analysis = LALR_Analyzer(parser_conf, debug=debug)
analysis.compute_lalr()
callbacks = parser_conf.callbacks
self._parse_table = analysis.parse_table
self.parser_conf = parser_conf
self.parser = _Parser(analysis.parse_table, callbacks, debug)
@classmethod
def deserialize(cls, data, memo, callbacks, debug=False):
inst = cls.__new__(cls)
inst._parse_table = IntParseTable.deserialize(data, memo)
inst.parser = _Parser(inst._parse_table, callbacks, debug)
return inst
def serialize(self, memo):
return self._parse_table.serialize(memo)
def parse(self, *args):
return self.parser.parse(*args)
class ParseConf:
__slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states'
def __init__(self, parse_table, callbacks, start):
self.parse_table = parse_table
self.start_state = self.parse_table.start_states[start]
self.end_state = self.parse_table.end_states[start]
self.states = self.parse_table.states
self.callbacks = callbacks
self.start = start
class ParserState:
__slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack'
def __init__(self, parse_conf, lexer, state_stack=None, value_stack=None):
self.parse_conf = parse_conf
self.lexer = lexer
self.state_stack = state_stack or [self.parse_conf.start_state]
self.value_stack = value_stack or []
@property
def position(self):
return self.state_stack[-1]
def __eq__(self, other):
if not isinstance(other, ParserState):
return False
return self.position == other.position
def __copy__(self):
return type(self)(
self.parse_conf,
self.lexer,
copy(self.state_stack),
deepcopy(self.value_stack),
)
def copy(self):
return copy(self)
def feed_token(self, token, is_end=False):
state_stack = self.state_stack
value_stack = self.value_stack
states = self.parse_conf.states
end_state = self.parse_conf.end_state
callbacks = self.parse_conf.callbacks
while True:
state = state_stack[-1]
try:
action, arg = states[state][token.type]
except KeyError:
expected = {s for s in states[state].keys() if s.isupper()}
raise UnexpectedToken(token, expected, state=self, puppet=None)
assert arg != end_state
if action is Shift:
assert not is_end
state_stack.append(arg)
value_stack.append(token)
return
else:
rule = arg
size = len(rule.expansion)
if size:
s = value_stack[-size:]
del state_stack[-size:]
del value_stack[-size:]
else:
s = []
value = callbacks[rule](s)
_action, new_state = states[state_stack[-1]][rule.origin.name]
assert _action is Shift
state_stack.append(new_state)
value_stack.append(value)
if is_end and state_stack[-1] == end_state:
return value_stack[-1]
class _Parser:
def __init__(self, parse_table, callbacks, debug=False):
self.parse_table = parse_table
self.callbacks = callbacks
self.debug = debug
def parse(self, lexer, start, value_stack=None, state_stack=None):
parse_conf = ParseConf(self.parse_table, self.callbacks, start)
parser_state = ParserState(parse_conf, lexer, state_stack, value_stack)
return self.parse_from_state(parser_state)
def parse_from_state(self, state):
try:
token = None
for token in state.lexer.lex(state):
state.feed_token(token)
token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
return state.feed_token(token, True)
except UnexpectedInput as e:
try:
e.puppet = ParserPuppet(self, state, state.lexer)
except NameError:
pass
raise e
except Exception as e:
if self.debug:
print("")
print("STATE STACK DUMP")
print("----------------")
for i, s in enumerate(state.state_stack):
print('%d)' % i , s)
print("")
raise
| true | true |
f7ff90f22f20a2fc1aa1486b41f8828b018cc1e2 | 2,803 | py | Python | test/Python/Tracing/slice.py | cathyzhyi/mlir-npcomp | 79a3f639bfb449ba1409ff6dff161badf5a11b44 | [
"Apache-2.0"
] | null | null | null | test/Python/Tracing/slice.py | cathyzhyi/mlir-npcomp | 79a3f639bfb449ba1409ff6dff161badf5a11b44 | [
"Apache-2.0"
] | null | null | null | test/Python/Tracing/slice.py | cathyzhyi/mlir-npcomp | 79a3f639bfb449ba1409ff6dff161badf5a11b44 | [
"Apache-2.0"
] | null | null | null | # RUN: %PYTHON %s | FileCheck %s --dump-input=fail
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import numpy as np
import npcomp as npc
from npcomp.types import *
def slice_array1(a: np.ndarray) -> np.ndarray:
return a[1, 2:10:2, 3:4, ..., :, 0]
# TODO: Implement subclassing and deriving constraints by run
exp = npc.Exporter()
exp.slice_array1 = slice_array1
mb = npc.tracing.ModuleBuilder()
mb.trace(exp.slice_array1)
# TODO: The numpy.get_slice op emission should be analyzed: it probably
# needs to both accept and produce either arrays or tensors and the following
# narrow should do likewise.
# CHECK-LABEL: func @slice_array1(
# CHECK-SAME: %[[VAL_0:.*]]: tensor<*x!numpy.any_dtype>) -> tensor<*x!numpy.any_dtype> {
# CHECK: %[[VAL_1:.*]] = constant 1 : index
# CHECK: %[[VAL_2:.*]] = constant 2 : index
# CHECK: %[[VAL_3:.*]] = constant 10 : index
# CHECK: %[[VAL_4:.*]] = constant 2 : index
# CHECK: %[[VAL_5:.*]] = basicpy.slot_object_make(%[[VAL_2]], %[[VAL_3]], %[[VAL_4]]) -> !basicpy.SlotObject<slice, index, index, index>
# CHECK: %[[VAL_6:.*]] = constant 3 : index
# CHECK: %[[VAL_7:.*]] = constant 4 : index
# CHECK: %[[VAL_8:.*]] = basicpy.singleton : !basicpy.NoneType
# CHECK: %[[VAL_9:.*]] = basicpy.slot_object_make(%[[VAL_6]], %[[VAL_7]], %[[VAL_8]]) -> !basicpy.SlotObject<slice, index, index, !basicpy.NoneType>
# CHECK: %[[VAL_10:.*]] = basicpy.singleton : !basicpy.EllipsisType
# CHECK: %[[VAL_11:.*]] = basicpy.singleton : !basicpy.NoneType
# CHECK: %[[VAL_12:.*]] = basicpy.singleton : !basicpy.NoneType
# CHECK: %[[VAL_13:.*]] = basicpy.singleton : !basicpy.NoneType
# CHECK: %[[VAL_14:.*]] = basicpy.slot_object_make(%[[VAL_11]], %[[VAL_12]], %[[VAL_13]]) -> !basicpy.SlotObject<slice, !basicpy.NoneType, !basicpy.NoneType, !basicpy.NoneType>
# CHECK: %[[VAL_15:.*]] = constant 0 : index
# CHECK: %[[VAL_16:.*]] = numpy.get_slice %[[VAL_0]], %[[VAL_1]], %[[VAL_5]], %[[VAL_9]], %[[VAL_10]], %[[VAL_14]], %[[VAL_15]] : (tensor<*x!numpy.any_dtype>, index, !basicpy.SlotObject<slice, index, index, index>, !basicpy.SlotObject<slice, index, index, !basicpy.NoneType>, !basicpy.EllipsisType, !basicpy.SlotObject<slice, !basicpy.NoneType, !basicpy.NoneType, !basicpy.NoneType>, index) -> !numpy.ndarray<*:?>
# CHECK: %[[VAL_17:.*]] = numpy.narrow %[[VAL_16]] : (!numpy.ndarray<*:?>) -> tensor<*x!numpy.any_dtype>
# CHECK: return %[[VAL_17]] : tensor<*x!numpy.any_dtype>
# CHECK: }
print(mb.module)
| 57.204082 | 423 | 0.616126 |
import numpy as np
import npcomp as npc
from npcomp.types import *
def slice_array1(a: np.ndarray) -> np.ndarray:
return a[1, 2:10:2, 3:4, ..., :, 0]
exp = npc.Exporter()
exp.slice_array1 = slice_array1
mb = npc.tracing.ModuleBuilder()
mb.trace(exp.slice_array1)
print(mb.module)
| true | true |
f7ff919354d89409915048a72e9c703ef3ab68c9 | 228 | py | Python | tests/test_ws_timeout_error.py | gridsmartercities/pywsitest | 7d438477bfc61b6e3adeab6530f52a24359249d8 | [
"MIT"
] | 19 | 2019-07-31T14:51:25.000Z | 2021-12-10T08:43:46.000Z | tests/test_ws_timeout_error.py | gridsmartercities/pywsitest | 7d438477bfc61b6e3adeab6530f52a24359249d8 | [
"MIT"
] | 10 | 2019-07-30T12:07:24.000Z | 2020-12-27T18:33:07.000Z | tests/test_ws_timeout_error.py | gridsmartercities/pywsitest | 7d438477bfc61b6e3adeab6530f52a24359249d8 | [
"MIT"
] | 1 | 2021-03-29T09:33:45.000Z | 2021-03-29T09:33:45.000Z | import unittest
from pywsitest import WSTimeoutError
class WSTimeoutErrorTests(unittest.TestCase):
def test_receive_timeout_error(self):
with self.assertRaises(WSTimeoutError):
raise WSTimeoutError()
| 20.727273 | 47 | 0.758772 | import unittest
from pywsitest import WSTimeoutError
class WSTimeoutErrorTests(unittest.TestCase):
def test_receive_timeout_error(self):
with self.assertRaises(WSTimeoutError):
raise WSTimeoutError()
| true | true |
f7ff919e79118549fe71d1a4b1102a4ab7d17b01 | 2,060 | py | Python | find-atlas-review-errors/review_visits.py | gbabineau/find-atlas-coding-errors | d64601eaf5171e5f867194ff925bc06bbacd8cd6 | [
"MIT"
] | null | null | null | find-atlas-review-errors/review_visits.py | gbabineau/find-atlas-coding-errors | d64601eaf5171e5f867194ff925bc06bbacd8cd6 | [
"MIT"
] | null | null | null | find-atlas-review-errors/review_visits.py | gbabineau/find-atlas-coding-errors | d64601eaf5171e5f867194ff925bc06bbacd8cd6 | [
"MIT"
] | null | null | null | """Reviews visits in an area for checklists from a user and then checks the checklist."""
# Copyright (c) 2019 Guy Babineau
from .review_checklist import review_checklist
from ebird.api import get_visits
import sys
from time import sleep
import datetime
def review_visits(ebird_api_key, user, area, day, end_day, max_records, verbose):
"""Checks up to max_records checklists from an area between day and end_day
According to eBird, this API should not be abused so limit size of data
retrieved. It has a built in delay to help with this.
Args
ebird_api_key : access key from eBird tied to account
user : eBird user name (not id)
area : ebird region eg. Country-State-County = US-VA-003 for Albemarle County, Virginia, USA
day : day to start looking -e.g. 2018-04-08
end_day : day to end looking - e.g. 2018-04-15
max_records : maximum records to request from eBird
verbose : whether to print out additional information
Returns:
NA - prints to stdout
Raises:
NA - but can exit on max-records
"""
while day < end_day:
day_string = day.strftime("%Y-%m-%d")
records = get_visits(ebird_api_key, area, day_string, max_records)
if len(records) == max_records:
sys.exit("Error: got max_records(" + str(max_records) + ") in " + area + " on " + day_string
+ " - use a smaller area or larger max_records")
for record in records:
#
# ensure it is not anonymous and it is the user we are looking for
#
if 'userDisplayName' in record.keys():
if (record['userDisplayName'] == user):
print("Reviewing Checklist from ", day_string, record['loc']['name'])
review_checklist(ebird_api_key, record['subId'])
#
# Limit get_visit calls to eBird to once every two seconds
#
sleep(2)
day += datetime.timedelta(days=1)
| 36.785714 | 109 | 0.618932 |
from .review_checklist import review_checklist
from ebird.api import get_visits
import sys
from time import sleep
import datetime
def review_visits(ebird_api_key, user, area, day, end_day, max_records, verbose):
while day < end_day:
day_string = day.strftime("%Y-%m-%d")
records = get_visits(ebird_api_key, area, day_string, max_records)
if len(records) == max_records:
sys.exit("Error: got max_records(" + str(max_records) + ") in " + area + " on " + day_string
+ " - use a smaller area or larger max_records")
for record in records:
if 'userDisplayName' in record.keys():
if (record['userDisplayName'] == user):
print("Reviewing Checklist from ", day_string, record['loc']['name'])
review_checklist(ebird_api_key, record['subId'])
sleep(2)
day += datetime.timedelta(days=1)
| true | true |
f7ff9368d46a6ee205cf989e3f3614e7ddcfbd01 | 1,806 | py | Python | forms/widgets.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 206 | 2015-10-15T07:05:08.000Z | 2021-02-19T11:48:36.000Z | forms/widgets.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 8 | 2017-10-16T10:18:31.000Z | 2022-03-09T14:24:27.000Z | forms/widgets.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 61 | 2015-10-15T08:12:44.000Z | 2022-03-10T12:25:06.000Z | # Django Imports
from django import forms
from django.utils.html import format_html
from django.utils.safestring import mark_safe
# Deprecated in Django 1.11
# forms.widgets.RadioChoiceInput
# https://docs.djangoproject.com/en/2.0/releases/1.11/#changes-due-to-the-introduction-of-template-based-widget-rendering
#class StarRatingRadioChoiceInput(forms.widgets.RadioChoiceInput):
class StarRatingRadioChoiceInput(object):
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
# TODO: some kind of double encoding is happening, somehow
#result = format_html(
# '<label{}></label>{}', label_for, self.tag(attrs)
#)
result = mark_safe('<label%s></label>%s' % (label_for, self.tag(attrs),))
return result
# Deprecated
#class StarRatingRadioChoiceFieldRenderer(forms.widgets.RadioFieldRenderer):
class StarRatingRadioChoiceFieldRenderer(object):
choice_input_class = StarRatingRadioChoiceInput
outer_html = '<span{id_attr} class="star-rating">{content}</span>'
inner_html = '{choice_value}'
class StarRatingRadioSelect(forms.RadioSelect):
renderer = StarRatingRadioChoiceFieldRenderer
def __init__(self, *args, **kwargs):
#super(StarRatingRadioSelect, self).__init__(choices=self.get_choices(min_value, max_value), *args, **kwargs)
super(StarRatingRadioSelect, self).__init__(*args, **kwargs)
def get_choices(self, min_value, max_value):
choices = [('', '',),]
for rating in range(min_value, max_value + 1):
choices.append((rating, rating,))
return choices
| 41.045455 | 121 | 0.699336 |
from django import forms
from django.utils.html import format_html
from django.utils.safestring import mark_safe
me=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
result = mark_safe('<label%s></label>%s' % (label_for, self.tag(attrs),))
return result
class StarRatingRadioChoiceFieldRenderer(object):
choice_input_class = StarRatingRadioChoiceInput
outer_html = '<span{id_attr} class="star-rating">{content}</span>'
inner_html = '{choice_value}'
class StarRatingRadioSelect(forms.RadioSelect):
renderer = StarRatingRadioChoiceFieldRenderer
def __init__(self, *args, **kwargs):
super(StarRatingRadioSelect, self).__init__(*args, **kwargs)
def get_choices(self, min_value, max_value):
choices = [('', '',),]
for rating in range(min_value, max_value + 1):
choices.append((rating, rating,))
return choices
| true | true |
f7ff93a889c163a27fc4941239fce6a14517aee6 | 315 | py | Python | 01-Defining_classes_exercises/01-Car.py | Beshkov/OOP | 297edadb3e7801dfeee5752a20aae6aead8da610 | [
"MIT"
] | 1 | 2021-05-24T17:51:53.000Z | 2021-05-24T17:51:53.000Z | 01-Defining_classes_exercises/01-Car.py | Beshkov/Python_OOP | 297edadb3e7801dfeee5752a20aae6aead8da610 | [
"MIT"
] | null | null | null | 01-Defining_classes_exercises/01-Car.py | Beshkov/Python_OOP | 297edadb3e7801dfeee5752a20aae6aead8da610 | [
"MIT"
] | null | null | null | class Car:
def __init__(self, name: str, model: str, engine: str):
self.name = name
self.model = model
self.engine = engine
def get_info(self):
return f"This is {self.name} {self.model} with engine {self.engine}"
car = Car("Kia", "Rio", "1.3L B3 I4")
print(car.get_info())
| 26.25 | 76 | 0.6 | class Car:
def __init__(self, name: str, model: str, engine: str):
self.name = name
self.model = model
self.engine = engine
def get_info(self):
return f"This is {self.name} {self.model} with engine {self.engine}"
car = Car("Kia", "Rio", "1.3L B3 I4")
print(car.get_info())
| true | true |
f7ff942579a4801b676132b04a313b199a535b79 | 1,462 | py | Python | makeCourse/theme.py | dualspiral/makecourse | 96c0d3137b00a400df082f160eabf8a925953067 | [
"Apache-2.0"
] | null | null | null | makeCourse/theme.py | dualspiral/makecourse | 96c0d3137b00a400df082f160eabf8a925953067 | [
"Apache-2.0"
] | null | null | null | makeCourse/theme.py | dualspiral/makecourse | 96c0d3137b00a400df082f160eabf8a925953067 | [
"Apache-2.0"
] | null | null | null | import logging
from distutils.dir_util import copy_tree
logger = logging.getLogger(__name__)
class Theme(object):
def __init__(self, course, name, source, theme_data):
self.course = course
self.hidden = False
self.name = name
self.source = source
keys = ('title','path','hidden')
for key in keys:
if key in theme_data:
setattr(self, key, theme_data[key])
def __str__(self):
return '{}'.format(self.title)
def __repr__(self):
return '<makeCourse.theme.Theme: {}>'.format(self.title)
@property
def template_path(self):
return self.source / 'templates'
def alt_themes_contexts(self):
return [t.get_context() for t in self.course.themes if not (t.hidden or t == self)]
def get_context(self):
return {
'title': self.title,
'source': self.name,
'path': self.path,
'hidden': self.hidden,
}
def copy_static_files(self):
srcPath = self.source / 'static'
dstPath = self.course.get_build_dir() / 'static'
logger.debug("Copying theme's static directory to the build's static directory...")
logger.debug(" {src} => {dest}".format(src=srcPath, dest=dstPath))
try:
copy_tree(str(srcPath), str(dstPath))
except Exception:
logger.warning("Warning: Problem copying the theme's static files")
| 28.666667 | 91 | 0.597127 | import logging
from distutils.dir_util import copy_tree
logger = logging.getLogger(__name__)
class Theme(object):
def __init__(self, course, name, source, theme_data):
self.course = course
self.hidden = False
self.name = name
self.source = source
keys = ('title','path','hidden')
for key in keys:
if key in theme_data:
setattr(self, key, theme_data[key])
def __str__(self):
return '{}'.format(self.title)
def __repr__(self):
return '<makeCourse.theme.Theme: {}>'.format(self.title)
@property
def template_path(self):
return self.source / 'templates'
def alt_themes_contexts(self):
return [t.get_context() for t in self.course.themes if not (t.hidden or t == self)]
def get_context(self):
return {
'title': self.title,
'source': self.name,
'path': self.path,
'hidden': self.hidden,
}
def copy_static_files(self):
srcPath = self.source / 'static'
dstPath = self.course.get_build_dir() / 'static'
logger.debug("Copying theme's static directory to the build's static directory...")
logger.debug(" {src} => {dest}".format(src=srcPath, dest=dstPath))
try:
copy_tree(str(srcPath), str(dstPath))
except Exception:
logger.warning("Warning: Problem copying the theme's static files")
| true | true |
f7ff9471450d2bad8c34351cb527a5d4e836e49d | 1,647 | py | Python | consumer_markethistory.py | brentnowak/spotmarket | b19982e6bbf595f43bb200802aa24da0802b447a | [
"Unlicense"
] | 20 | 2016-01-29T19:39:46.000Z | 2018-05-05T08:58:21.000Z | consumer_markethistory.py | brentnowak/spotmarket | b19982e6bbf595f43bb200802aa24da0802b447a | [
"Unlicense"
] | 2 | 2016-03-18T13:32:08.000Z | 2016-06-12T01:00:36.000Z | consumer_markethistory.py | brentnowak/spotmarket | b19982e6bbf595f43bb200802aa24da0802b447a | [
"Unlicense"
] | 11 | 2016-01-29T18:59:04.000Z | 2021-01-31T09:39:11.000Z | #-----------------------------------------------------------------------------
# consumer_markethistory.py -
# https://github.com/brentnowak/spotmarket
#-----------------------------------------------------------------------------
# Version: 0.1
# - Initial release
# Version: 0.2
# - Migration to market.history
# Version: 0.3
# - Migration to concurrent.futures
#-----------------------------------------------------------------------------
#
# Input: List of typeIDs from 'market.tracking' table that have 'enabled' set to 1.
# Output: Populate 'market.history' table.
#-----------------------------------------------------------------------------
import concurrent.futures
import multiprocessing
from time import sleep
from _market import *
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
# Suppress InsecurePlatformWarning messages
maxWorkers = multiprocessing.cpu_count() # Scale workers to machine size
def main():
for regionID in regionIDs:
currentItems = 1
with concurrent.futures.ProcessPoolExecutor(max_workers=maxWorkers) as executor:
future_to_typeid = {executor.submit(market_getcrestdata, regionID[0], typeID[0]): typeID[0] for typeID in typeIDs}
for future in concurrent.futures.as_completed(future_to_typeid):
currentItems += 1
market_setimportresult(regionID[0], 1) # Set import to true so we can skip this region if we crash
if __name__ == "__main__":
typeIDs = market_typeids()
regionIDs = market_regionids()
main()
print("[Completed Run:Sleeping for 1 Hour]")
sys.stdout.flush()
sleep(3600)
| 33.612245 | 126 | 0.585307 |
import concurrent.futures
import multiprocessing
from time import sleep
from _market import *
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
maxWorkers = multiprocessing.cpu_count()
def main():
for regionID in regionIDs:
currentItems = 1
with concurrent.futures.ProcessPoolExecutor(max_workers=maxWorkers) as executor:
future_to_typeid = {executor.submit(market_getcrestdata, regionID[0], typeID[0]): typeID[0] for typeID in typeIDs}
for future in concurrent.futures.as_completed(future_to_typeid):
currentItems += 1
market_setimportresult(regionID[0], 1)
if __name__ == "__main__":
typeIDs = market_typeids()
regionIDs = market_regionids()
main()
print("[Completed Run:Sleeping for 1 Hour]")
sys.stdout.flush()
sleep(3600)
| true | true |
f7ff95485672645e78447f87866304c52504f253 | 202 | py | Python | Modules/regression/classes/termcol.py | williamclot/PrivacyPreservingRidgeRegression | 5e0379162bdf114d862982a5ac438f076aa3c81b | [
"X11"
] | 1 | 2019-08-19T13:38:51.000Z | 2019-08-19T13:38:51.000Z | Modules/regression/classes/termcol.py | williamclot/PrivacyPreservingRidgeRegression | 5e0379162bdf114d862982a5ac438f076aa3c81b | [
"X11"
] | null | null | null | Modules/regression/classes/termcol.py | williamclot/PrivacyPreservingRidgeRegression | 5e0379162bdf114d862982a5ac438f076aa3c81b | [
"X11"
] | 1 | 2021-03-05T06:46:01.000Z | 2021-03-05T06:46:01.000Z | class termcol:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m' | 22.444444 | 25 | 0.514851 | class termcol:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m' | true | true |
f7ff964837facfd0d3166dcc02e325121e8c1e97 | 1,588 | py | Python | blueapps/account/utils/sms.py | TencentBlueKing/bk-chatbot | da37fb2197142eae32158cdb5c2b658100133fff | [
"MIT"
] | 11 | 2021-05-27T11:45:02.000Z | 2022-03-29T15:03:28.000Z | blueapps/account/utils/sms.py | hchicken/bk-chatbot | da37fb2197142eae32158cdb5c2b658100133fff | [
"MIT"
] | 2 | 2021-08-16T03:59:19.000Z | 2021-09-29T09:31:39.000Z | blueapps/account/utils/sms.py | hchicken/bk-chatbot | da37fb2197142eae32158cdb5c2b658100133fff | [
"MIT"
] | 12 | 2021-05-27T11:59:18.000Z | 2022-03-17T07:21:53.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from blueapps.account.conf import ConfFixture
from blueapps.utils import client
from blueapps.utils.esbclient import CustomComponentAPI
"""
发送短信工具文件,开发者可以直接调用此处的send_sms函数,屏蔽环境之间的差异
"""
def send_sms(user_list, content):
"""
发送短信给指定的用户,
:param user_list: 用户列表,list
:param content: 消息内容
:return: True | raise Exception
"""
# 1. 获取发送短信的函数实际句柄
sms_module = client.__getattr__(ConfFixture.SMS_CLIENT_MODULE)
sms_func = sms_module.__getattr__(ConfFixture.SMS_CLIENT_FUNC)
# 2. 拼接发送函数的内容
request_args = {
ConfFixture.SMS_CLIENT_USER_ARGS_NAME: ','.join(user_list),
ConfFixture.SMS_CLIENT_CONTENT_ARGS_NAME: content
}
# 3. 发送短信
if type(sms_func) == CustomComponentAPI:
result = sms_func.post(request_args)
else:
result = sms_func(request_args)
return result
| 31.76 | 115 | 0.745592 |
from blueapps.account.conf import ConfFixture
from blueapps.utils import client
from blueapps.utils.esbclient import CustomComponentAPI
def send_sms(user_list, content):
sms_module = client.__getattr__(ConfFixture.SMS_CLIENT_MODULE)
sms_func = sms_module.__getattr__(ConfFixture.SMS_CLIENT_FUNC)
request_args = {
ConfFixture.SMS_CLIENT_USER_ARGS_NAME: ','.join(user_list),
ConfFixture.SMS_CLIENT_CONTENT_ARGS_NAME: content
}
if type(sms_func) == CustomComponentAPI:
result = sms_func.post(request_args)
else:
result = sms_func(request_args)
return result
| true | true |
f7ff96fb054f634cdbe2acbb1c32a07b7bf004a6 | 801 | py | Python | examples/ServiceMonitor/monitor/filters/timeout.py | frikyalong/vnpy | d8ea554e34ff285c97cc2ddb4e881a1f0a6f02d3 | [
"MIT"
] | 1 | 2018-11-05T07:34:36.000Z | 2018-11-05T07:34:36.000Z | examples/ServiceMonitor/monitor/filters/timeout.py | frikyalong/vnpy | d8ea554e34ff285c97cc2ddb4e881a1f0a6f02d3 | [
"MIT"
] | null | null | null | examples/ServiceMonitor/monitor/filters/timeout.py | frikyalong/vnpy | d8ea554e34ff285c97cc2ddb4e881a1f0a6f02d3 | [
"MIT"
] | null | null | null | import datetime
from .base import BaseFilter
class Timeout(BaseFilter):
def get_default_options(self):
return {
"key": "timestamp",
"timestamp_format": "%Y-%m-%d %H:%M:%S",
}
def __call__(self, event):
key = self.options["key"]
fmt = self.options["timestamp_format"]
if key in event:
last_dt = datetime.datetime.strptime(event[key], fmt)
curr_dt = datetime.datetime.now()
delta = curr_dt - last_dt
print(delta)
if delta.total_seconds() > self.options["timeout"]:
yield {
"msg": "超时",
"last_timestamp": last_dt.strftime(fmt),
"current_timestamp": curr_dt.strftime(fmt),
}
| 27.62069 | 65 | 0.520599 | import datetime
from .base import BaseFilter
class Timeout(BaseFilter):
def get_default_options(self):
return {
"key": "timestamp",
"timestamp_format": "%Y-%m-%d %H:%M:%S",
}
def __call__(self, event):
key = self.options["key"]
fmt = self.options["timestamp_format"]
if key in event:
last_dt = datetime.datetime.strptime(event[key], fmt)
curr_dt = datetime.datetime.now()
delta = curr_dt - last_dt
print(delta)
if delta.total_seconds() > self.options["timeout"]:
yield {
"msg": "超时",
"last_timestamp": last_dt.strftime(fmt),
"current_timestamp": curr_dt.strftime(fmt),
}
| true | true |
f7ff97037fe05e4ebb3b0c42d212be0d1a25687e | 6,513 | py | Python | src/model.py | tadeoos/gpt-2 | 32e29f1acca9ade9913b5d0d2b71384c31357eed | [
"MIT"
] | null | null | null | src/model.py | tadeoos/gpt-2 | 32e29f1acca9ade9913b5d0d2b71384c31357eed | [
"MIT"
] | null | null | null | src/model.py | tadeoos/gpt-2 | 32e29f1acca9ade9913b5d0d2b71384c31357eed | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow.contrib.training import HParams
def default_hparams():
return HParams(
n_vocab=0,
n_ctx=1024,
n_embd=768,
n_head=12,
n_layer=12,
)
def shape_list(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def softmax(x, axis=-1):
x = x - tf.reduce_max(x, axis=axis, keepdims=True)
ex = tf.exp(x)
return ex / tf.reduce_sum(ex, axis=axis, keepdims=True)
def gelu(x):
return 0.5*x*(1+tf.tanh(np.sqrt(2/np.pi)*(x+0.044715*tf.pow(x, 3))))
def norm(x, scope, *, axis=-1, epsilon=1e-5):
"""Normalize to mean = 0, std = 1, then do a diagonal affine transform."""
with tf.variable_scope(scope):
n_state = x.shape[-1].value
g = tf.get_variable('g', [n_state], initializer=tf.constant_initializer(1))
b = tf.get_variable('b', [n_state], initializer=tf.constant_initializer(0))
u = tf.reduce_mean(x, axis=axis, keepdims=True)
s = tf.reduce_mean(tf.square(x-u), axis=axis, keepdims=True)
x = (x - u) * tf.rsqrt(s + epsilon)
x = x*g + b
return x
def split_states(x, n):
"""Reshape the last dimension of x into [n, x.shape[-1]/n]."""
*start, m = shape_list(x)
return tf.reshape(x, start + [n, m//n])
def merge_states(x):
"""Smash the last two dimensions of x into a single dimension."""
*start, a, b = shape_list(x)
return tf.reshape(x, start + [a*b])
def conv1d(x, scope, nf, *, w_init_stdev=0.02):
with tf.variable_scope(scope):
*start, nx = shape_list(x)
w = tf.get_variable('w', [1, nx, nf], initializer=tf.random_normal_initializer(stddev=w_init_stdev))
b = tf.get_variable('b', [nf], initializer=tf.constant_initializer(0))
c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, start+[nf])
return c
def attention_mask(nd, ns, *, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:,None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def attn(x, scope, n_state, *, past, hparams):
assert x.shape.ndims == 3 # Should be [batch, sequence, features]
assert n_state % hparams.n_head == 0
if past is not None:
assert past.shape.ndims == 5 # Should be [batch, 2, heads, sequence, features], where 2 is [k, v]
def split_heads(x):
# From [batch, sequence, features] to [batch, heads, sequence, features]
return tf.transpose(split_states(x, hparams.n_head), [0, 2, 1, 3])
def merge_heads(x):
# Reverse of split_heads
return merge_states(tf.transpose(x, [0, 2, 1, 3]))
def mask_attn_weights(w):
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w*b - tf.cast(1e10, w.dtype)*(1-b)
return w
def multihead_attn(q, k, v):
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
w = w * tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype))
w = mask_attn_weights(w)
w = softmax(w)
a = tf.matmul(w, v)
return a
with tf.variable_scope(scope):
c = conv1d(x, 'c_attn', n_state*3)
q, k, v = map(split_heads, tf.split(c, 3, axis=2))
present = tf.stack([k, v], axis=1)
if past is not None:
pk, pv = tf.unstack(past, axis=1)
k = tf.concat([pk, k], axis=-2)
v = tf.concat([pv, v], axis=-2)
a = multihead_attn(q, k, v)
a = merge_heads(a)
a = conv1d(a, 'c_proj', n_state)
return a, present
def mlp(x, scope, n_state, *, hparams):
with tf.variable_scope(scope):
nx = x.shape[-1].value
h = gelu(conv1d(x, 'c_fc', n_state))
h2 = conv1d(h, 'c_proj', nx)
return h2
def block(x, scope, *, past, hparams):
with tf.variable_scope(scope):
nx = x.shape[-1].value
a, present = attn(norm(x, 'ln_1'), 'attn', nx, past=past, hparams=hparams)
x = x + a
m = mlp(norm(x, 'ln_2'), 'mlp', nx*4, hparams=hparams)
x = x + m
return x, present
def past_shape(*, hparams, batch_size=None, sequence=None):
return [batch_size, hparams.n_layer, 2, hparams.n_head, sequence, hparams.n_embd // hparams.n_head]
def expand_tile(value, size):
"""Add a new axis of given size."""
value = tf.convert_to_tensor(value, name='value')
ndims = value.shape.ndims
return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims)
def positions_for(tokens, past_length):
batch_size = tf.shape(tokens)[0]
nsteps = tf.shape(tokens)[1]
return expand_tile(past_length + tf.range(nsteps), batch_size)
def model(hparams, X, past=None, scope='model', reuse=False):
with tf.compat.v1.variable_scope(scope, reuse=reuse):
results = {}
batch, sequence = shape_list(X)
wpe = tf.get_variable('wpe', [hparams.n_ctx, hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.01))
wte = tf.get_variable('wte', [hparams.n_vocab, hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.02))
past_length = 0 if past is None else tf.shape(past)[-2]
h = tf.gather(wte, X) + tf.gather(wpe, positions_for(X, past_length))
# Transformer
presents = []
pasts = tf.unstack(past, axis=1) if past is not None else [None] * hparams.n_layer
assert len(pasts) == hparams.n_layer
for layer, past in enumerate(pasts):
h, present = block(h, 'h%d' % layer, past=past, hparams=hparams)
presents.append(present)
results['present'] = tf.stack(presents, axis=1)
h = norm(h, 'ln_f')
# Language model loss. Do tokens <n predict token n?
h_flat = tf.reshape(h, [batch*sequence, hparams.n_embd])
logits = tf.matmul(h_flat, wte, transpose_b=True)
logits = tf.reshape(logits, [batch, sequence, hparams.n_vocab])
results['logits'] = logits
return results
| 37.217143 | 108 | 0.600952 | import numpy as np
import tensorflow as tf
from tensorflow.contrib.training import HParams
def default_hparams():
return HParams(
n_vocab=0,
n_ctx=1024,
n_embd=768,
n_head=12,
n_layer=12,
)
def shape_list(x):
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def softmax(x, axis=-1):
x = x - tf.reduce_max(x, axis=axis, keepdims=True)
ex = tf.exp(x)
return ex / tf.reduce_sum(ex, axis=axis, keepdims=True)
def gelu(x):
return 0.5*x*(1+tf.tanh(np.sqrt(2/np.pi)*(x+0.044715*tf.pow(x, 3))))
def norm(x, scope, *, axis=-1, epsilon=1e-5):
with tf.variable_scope(scope):
n_state = x.shape[-1].value
g = tf.get_variable('g', [n_state], initializer=tf.constant_initializer(1))
b = tf.get_variable('b', [n_state], initializer=tf.constant_initializer(0))
u = tf.reduce_mean(x, axis=axis, keepdims=True)
s = tf.reduce_mean(tf.square(x-u), axis=axis, keepdims=True)
x = (x - u) * tf.rsqrt(s + epsilon)
x = x*g + b
return x
def split_states(x, n):
*start, m = shape_list(x)
return tf.reshape(x, start + [n, m//n])
def merge_states(x):
*start, a, b = shape_list(x)
return tf.reshape(x, start + [a*b])
def conv1d(x, scope, nf, *, w_init_stdev=0.02):
with tf.variable_scope(scope):
*start, nx = shape_list(x)
w = tf.get_variable('w', [1, nx, nf], initializer=tf.random_normal_initializer(stddev=w_init_stdev))
b = tf.get_variable('b', [nf], initializer=tf.constant_initializer(0))
c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, start+[nf])
return c
def attention_mask(nd, ns, *, dtype):
i = tf.range(nd)[:,None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def attn(x, scope, n_state, *, past, hparams):
assert x.shape.ndims == 3
assert n_state % hparams.n_head == 0
if past is not None:
assert past.shape.ndims == 5
def split_heads(x):
return tf.transpose(split_states(x, hparams.n_head), [0, 2, 1, 3])
def merge_heads(x):
return merge_states(tf.transpose(x, [0, 2, 1, 3]))
def mask_attn_weights(w):
_, _, nd, ns = shape_list(w)
b = attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w*b - tf.cast(1e10, w.dtype)*(1-b)
return w
def multihead_attn(q, k, v):
w = tf.matmul(q, k, transpose_b=True)
w = w * tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype))
w = mask_attn_weights(w)
w = softmax(w)
a = tf.matmul(w, v)
return a
with tf.variable_scope(scope):
c = conv1d(x, 'c_attn', n_state*3)
q, k, v = map(split_heads, tf.split(c, 3, axis=2))
present = tf.stack([k, v], axis=1)
if past is not None:
pk, pv = tf.unstack(past, axis=1)
k = tf.concat([pk, k], axis=-2)
v = tf.concat([pv, v], axis=-2)
a = multihead_attn(q, k, v)
a = merge_heads(a)
a = conv1d(a, 'c_proj', n_state)
return a, present
def mlp(x, scope, n_state, *, hparams):
with tf.variable_scope(scope):
nx = x.shape[-1].value
h = gelu(conv1d(x, 'c_fc', n_state))
h2 = conv1d(h, 'c_proj', nx)
return h2
def block(x, scope, *, past, hparams):
with tf.variable_scope(scope):
nx = x.shape[-1].value
a, present = attn(norm(x, 'ln_1'), 'attn', nx, past=past, hparams=hparams)
x = x + a
m = mlp(norm(x, 'ln_2'), 'mlp', nx*4, hparams=hparams)
x = x + m
return x, present
def past_shape(*, hparams, batch_size=None, sequence=None):
return [batch_size, hparams.n_layer, 2, hparams.n_head, sequence, hparams.n_embd // hparams.n_head]
def expand_tile(value, size):
value = tf.convert_to_tensor(value, name='value')
ndims = value.shape.ndims
return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims)
def positions_for(tokens, past_length):
batch_size = tf.shape(tokens)[0]
nsteps = tf.shape(tokens)[1]
return expand_tile(past_length + tf.range(nsteps), batch_size)
def model(hparams, X, past=None, scope='model', reuse=False):
with tf.compat.v1.variable_scope(scope, reuse=reuse):
results = {}
batch, sequence = shape_list(X)
wpe = tf.get_variable('wpe', [hparams.n_ctx, hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.01))
wte = tf.get_variable('wte', [hparams.n_vocab, hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.02))
past_length = 0 if past is None else tf.shape(past)[-2]
h = tf.gather(wte, X) + tf.gather(wpe, positions_for(X, past_length))
presents = []
pasts = tf.unstack(past, axis=1) if past is not None else [None] * hparams.n_layer
assert len(pasts) == hparams.n_layer
for layer, past in enumerate(pasts):
h, present = block(h, 'h%d' % layer, past=past, hparams=hparams)
presents.append(present)
results['present'] = tf.stack(presents, axis=1)
h = norm(h, 'ln_f')
h_flat = tf.reshape(h, [batch*sequence, hparams.n_embd])
logits = tf.matmul(h_flat, wte, transpose_b=True)
logits = tf.reshape(logits, [batch, sequence, hparams.n_vocab])
results['logits'] = logits
return results
| true | true |
f7ff970541213aae1643b6660e4fc2a38f053040 | 8,660 | py | Python | AutomatedTesting/Gem/PythonTests/Atom/atom_utils/material_editor_utils.py | LB-JakubSkorupka/o3de | e224fc2ee5ec2a12e75a10acae268b7b38ae3a32 | [
"Apache-2.0",
"MIT"
] | 11 | 2021-07-08T09:58:26.000Z | 2022-03-17T17:59:26.000Z | AutomatedTesting/Gem/PythonTests/Atom/atom_utils/material_editor_utils.py | LB-JakubSkorupka/o3de | e224fc2ee5ec2a12e75a10acae268b7b38ae3a32 | [
"Apache-2.0",
"MIT"
] | 29 | 2021-07-06T19:33:52.000Z | 2022-03-22T10:27:49.000Z | AutomatedTesting/Gem/PythonTests/Atom/atom_utils/material_editor_utils.py | LB-JakubSkorupka/o3de | e224fc2ee5ec2a12e75a10acae268b7b38ae3a32 | [
"Apache-2.0",
"MIT"
] | 4 | 2021-07-06T19:24:43.000Z | 2022-03-31T12:42:27.000Z | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
import azlmbr.materialeditor will fail with a ModuleNotFound error when using this script with Editor.exe
This is because azlmbr.materialeditor only binds to MaterialEditor.exe and not Editor.exe
You need to launch this script with MaterialEditor.exe in order for azlmbr.materialeditor to appear.
"""
import os
import sys
import time
import azlmbr.atom
import azlmbr.atomtools as atomtools
import azlmbr.materialeditor as materialeditor
import azlmbr.bus as bus
def is_close(actual, expected, buffer=sys.float_info.min):
"""
:param actual: actual value
:param expected: expected value
:param buffer: acceptable variation from expected
:return: bool
"""
return abs(actual - expected) < buffer
def compare_colors(color1, color2, buffer=0.00001):
"""
Compares the red, green and blue properties of a color allowing a slight variance of buffer
:param color1: first color to compare
:param color2: second color
:param buffer: allowed variance in individual color value
:return: bool
"""
return (
is_close(color1.r, color2.r, buffer)
and is_close(color1.g, color2.g, buffer)
and is_close(color1.b, color2.b, buffer)
)
def open_material(file_path):
"""
:return: uuid of material document opened
"""
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "OpenDocument", file_path)
def is_open(document_id):
"""
:return: bool
"""
return azlmbr.atomtools.AtomToolsDocumentRequestBus(bus.Event, "IsOpen", document_id)
def save_document(document_id):
"""
:return: bool success
"""
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "SaveDocument", document_id)
def save_document_as_copy(document_id, target_path):
"""
:return: bool success
"""
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(
bus.Broadcast, "SaveDocumentAsCopy", document_id, target_path
)
def save_document_as_child(document_id, target_path):
"""
:return: bool success
"""
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(
bus.Broadcast, "SaveDocumentAsChild", document_id, target_path
)
def save_all():
"""
:return: bool success
"""
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "SaveAllDocuments")
def close_document(document_id):
"""
:return: bool success
"""
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "CloseDocument", document_id)
def close_all_documents():
"""
:return: bool success
"""
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "CloseAllDocuments")
def close_all_except_selected(document_id):
"""
:return: bool success
"""
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "CloseAllDocumentsExcept", document_id)
def get_property(document_id, property_name):
"""
:return: property value or invalid value if the document is not open or the property_name can't be found
"""
return azlmbr.atomtools.AtomToolsDocumentRequestBus(bus.Event, "GetPropertyValue", document_id, property_name)
def set_property(document_id, property_name, value):
azlmbr.atomtools.AtomToolsDocumentRequestBus(bus.Event, "SetPropertyValue", document_id, property_name, value)
def is_pane_visible(pane_name):
"""
:return: bool
"""
return atomtools.AtomToolsWindowRequestBus(bus.Broadcast, "IsDockWidgetVisible", pane_name)
def set_pane_visibility(pane_name, value):
atomtools.AtomToolsWindowRequestBus(bus.Broadcast, "SetDockWidgetVisible", pane_name, value)
def select_lighting_config(config_name):
azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "SelectLightingPresetByName", config_name)
def set_grid_enable_disable(value):
azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "SetGridEnabled", value)
def get_grid_enable_disable():
"""
:return: bool
"""
return azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "GetGridEnabled")
def set_shadowcatcher_enable_disable(value):
azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "SetShadowCatcherEnabled", value)
def get_shadowcatcher_enable_disable():
"""
:return: bool
"""
return azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "GetShadowCatcherEnabled")
def select_model_config(configname):
azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "SelectModelPresetByName", configname)
def destroy_main_window():
"""
Closes the Material Editor window
"""
azlmbr.atomtools.AtomToolsMainWindowFactoryRequestBus(azlmbr.bus.Broadcast, "DestroyMainWindow")
def wait_for_condition(function, timeout_in_seconds=1.0):
# type: (function, float) -> bool
"""
Function to run until it returns True or timeout is reached
the function can have no parameters and
waiting idle__wait_* is handled here not in the function
:param function: a function that returns a boolean indicating a desired condition is achieved
:param timeout_in_seconds: when reached, function execution is abandoned and False is returned
"""
with Timeout(timeout_in_seconds) as t:
while True:
try:
azlmbr.atomtools.general.idle_wait_frames(1)
except Exception:
print("WARNING: Couldn't wait for frame")
if t.timed_out:
return False
ret = function()
if not isinstance(ret, bool):
raise TypeError("return value for wait_for_condition function must be a bool")
if ret:
return True
class Timeout:
# type: (float) -> None
"""
contextual timeout
:param seconds: float seconds to allow before timed_out is True
"""
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
screenshotsFolder = os.path.join(azlmbr.paths.products, "Screenshots")
class ScreenshotHelper:
"""
A helper to capture screenshots and wait for them.
"""
def __init__(self, idle_wait_frames_callback):
super().__init__()
self.done = False
self.capturedScreenshot = False
self.max_frames_to_wait = 60
self.idle_wait_frames_callback = idle_wait_frames_callback
def capture_screenshot_blocking(self, filename):
"""
Capture a screenshot and block the execution until the screenshot has been written to the disk.
"""
self.handler = azlmbr.atom.FrameCaptureNotificationBusHandler()
self.handler.connect()
self.handler.add_callback("OnCaptureFinished", self.on_screenshot_captured)
self.done = False
self.capturedScreenshot = False
success = azlmbr.atom.FrameCaptureRequestBus(azlmbr.bus.Broadcast, "CaptureScreenshot", filename)
if success:
self.wait_until_screenshot()
print("Screenshot taken.")
else:
print("screenshot failed")
return self.capturedScreenshot
def on_screenshot_captured(self, parameters):
# the parameters come in as a tuple
if parameters[0]:
print("screenshot saved: {}".format(parameters[1]))
self.capturedScreenshot = True
else:
print("screenshot failed: {}".format(parameters[1]))
self.done = True
self.handler.disconnect()
def wait_until_screenshot(self):
frames_waited = 0
while self.done == False:
self.idle_wait_frames_callback(1)
if frames_waited > self.max_frames_to_wait:
print("timeout while waiting for the screenshot to be written")
self.handler.disconnect()
break
else:
frames_waited = frames_waited + 1
print("(waited {} frames)".format(frames_waited))
def capture_screenshot(file_path):
return ScreenshotHelper(azlmbr.atomtools.general.idle_wait_frames).capture_screenshot_blocking(
os.path.join(file_path)
)
| 30.70922 | 117 | 0.703811 |
import os
import sys
import time
import azlmbr.atom
import azlmbr.atomtools as atomtools
import azlmbr.materialeditor as materialeditor
import azlmbr.bus as bus
def is_close(actual, expected, buffer=sys.float_info.min):
return abs(actual - expected) < buffer
def compare_colors(color1, color2, buffer=0.00001):
return (
is_close(color1.r, color2.r, buffer)
and is_close(color1.g, color2.g, buffer)
and is_close(color1.b, color2.b, buffer)
)
def open_material(file_path):
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "OpenDocument", file_path)
def is_open(document_id):
return azlmbr.atomtools.AtomToolsDocumentRequestBus(bus.Event, "IsOpen", document_id)
def save_document(document_id):
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "SaveDocument", document_id)
def save_document_as_copy(document_id, target_path):
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(
bus.Broadcast, "SaveDocumentAsCopy", document_id, target_path
)
def save_document_as_child(document_id, target_path):
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(
bus.Broadcast, "SaveDocumentAsChild", document_id, target_path
)
def save_all():
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "SaveAllDocuments")
def close_document(document_id):
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "CloseDocument", document_id)
def close_all_documents():
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "CloseAllDocuments")
def close_all_except_selected(document_id):
return azlmbr.atomtools.AtomToolsDocumentSystemRequestBus(bus.Broadcast, "CloseAllDocumentsExcept", document_id)
def get_property(document_id, property_name):
return azlmbr.atomtools.AtomToolsDocumentRequestBus(bus.Event, "GetPropertyValue", document_id, property_name)
def set_property(document_id, property_name, value):
azlmbr.atomtools.AtomToolsDocumentRequestBus(bus.Event, "SetPropertyValue", document_id, property_name, value)
def is_pane_visible(pane_name):
return atomtools.AtomToolsWindowRequestBus(bus.Broadcast, "IsDockWidgetVisible", pane_name)
def set_pane_visibility(pane_name, value):
atomtools.AtomToolsWindowRequestBus(bus.Broadcast, "SetDockWidgetVisible", pane_name, value)
def select_lighting_config(config_name):
azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "SelectLightingPresetByName", config_name)
def set_grid_enable_disable(value):
azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "SetGridEnabled", value)
def get_grid_enable_disable():
return azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "GetGridEnabled")
def set_shadowcatcher_enable_disable(value):
azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "SetShadowCatcherEnabled", value)
def get_shadowcatcher_enable_disable():
return azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "GetShadowCatcherEnabled")
def select_model_config(configname):
azlmbr.materialeditor.MaterialViewportRequestBus(azlmbr.bus.Broadcast, "SelectModelPresetByName", configname)
def destroy_main_window():
azlmbr.atomtools.AtomToolsMainWindowFactoryRequestBus(azlmbr.bus.Broadcast, "DestroyMainWindow")
def wait_for_condition(function, timeout_in_seconds=1.0):
with Timeout(timeout_in_seconds) as t:
while True:
try:
azlmbr.atomtools.general.idle_wait_frames(1)
except Exception:
print("WARNING: Couldn't wait for frame")
if t.timed_out:
return False
ret = function()
if not isinstance(ret, bool):
raise TypeError("return value for wait_for_condition function must be a bool")
if ret:
return True
class Timeout:
# type: (float) -> None
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
self.die_after = time.time() + self.seconds
return self
def __exit__(self, type, value, traceback):
pass
@property
def timed_out(self):
return time.time() > self.die_after
screenshotsFolder = os.path.join(azlmbr.paths.products, "Screenshots")
class ScreenshotHelper:
def __init__(self, idle_wait_frames_callback):
super().__init__()
self.done = False
self.capturedScreenshot = False
self.max_frames_to_wait = 60
self.idle_wait_frames_callback = idle_wait_frames_callback
def capture_screenshot_blocking(self, filename):
self.handler = azlmbr.atom.FrameCaptureNotificationBusHandler()
self.handler.connect()
self.handler.add_callback("OnCaptureFinished", self.on_screenshot_captured)
self.done = False
self.capturedScreenshot = False
success = azlmbr.atom.FrameCaptureRequestBus(azlmbr.bus.Broadcast, "CaptureScreenshot", filename)
if success:
self.wait_until_screenshot()
print("Screenshot taken.")
else:
print("screenshot failed")
return self.capturedScreenshot
def on_screenshot_captured(self, parameters):
# the parameters come in as a tuple
if parameters[0]:
print("screenshot saved: {}".format(parameters[1]))
self.capturedScreenshot = True
else:
print("screenshot failed: {}".format(parameters[1]))
self.done = True
self.handler.disconnect()
def wait_until_screenshot(self):
frames_waited = 0
while self.done == False:
self.idle_wait_frames_callback(1)
if frames_waited > self.max_frames_to_wait:
print("timeout while waiting for the screenshot to be written")
self.handler.disconnect()
break
else:
frames_waited = frames_waited + 1
print("(waited {} frames)".format(frames_waited))
def capture_screenshot(file_path):
return ScreenshotHelper(azlmbr.atomtools.general.idle_wait_frames).capture_screenshot_blocking(
os.path.join(file_path)
)
| true | true |
f7ff97137f9a1ddaea3def32100e9bb4a61d73a5 | 8,225 | py | Python | appengine-mapreduce/python/test/mapreduce/shuffler_end_to_end_test.py | bslatkin/8-bits | 1608a53bdd5ff491519396212679dc79cc07fca4 | [
"Apache-2.0"
] | 2 | 2015-02-18T08:12:23.000Z | 2015-09-24T20:35:41.000Z | appengine-mapreduce/python/test/mapreduce/shuffler_end_to_end_test.py | bslatkin/8-bits | 1608a53bdd5ff491519396212679dc79cc07fca4 | [
"Apache-2.0"
] | null | null | null | appengine-mapreduce/python/test/mapreduce/shuffler_end_to_end_test.py | bslatkin/8-bits | 1608a53bdd5ff491519396212679dc79cc07fca4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2011 Google Inc. All Rights Reserved.
import unittest
from mapreduce.lib import pipeline
from google.appengine.api import files
from google.appengine.api.files import file_service_pb
from google.appengine.api.files import records
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import control
from mapreduce import mapreduce_pipeline
from mapreduce import model
from mapreduce import output_writers
from mapreduce import shuffler
from mapreduce import test_support
from testlib import testutil
import unittest
class SortFileEndToEndTest(testutil.HandlerTestBase):
"""End-to-end test for _SortFilePipeline."""
def setUp(self):
testutil.HandlerTestBase.setUp(self)
pipeline.Pipeline._send_mail = self._send_mail
self.emails = []
def _send_mail(self, sender, subject, body, html=None):
"""Callback function for sending mail."""
self.emails.append((sender, subject, body, html))
def testSortFile(self):
"""Test sorting a file."""
input_file = files.blobstore.create()
input_data = [
(str(i), "_" + str(i)) for i in range(100)]
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for (k, v) in input_data:
proto = file_service_pb.KeyValue()
proto.set_key(k)
proto.set_value(v)
w.write(proto.Encode())
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
p = shuffler._SortChunksPipeline("testjob", [input_file])
p.start()
test_support.execute_until_empty(self.taskqueue)
p = shuffler._SortChunksPipeline.from_id(p.pipeline_id)
input_data.sort()
output_files = p.outputs.default.value[0]
output_data = []
for output_file in output_files:
with files.open(output_file, "r") as f:
for binary_record in records.RecordsReader(f):
proto = file_service_pb.KeyValue()
proto.ParseFromString(binary_record)
output_data.append((proto.key(), proto.value()))
self.assertEquals(input_data, output_data)
def test_handler_yield_str(key, value, partial):
"""Test handler that yields parameters converted to string."""
yield str((key, value, partial))
class TestMergePipeline(base_handler.PipelineBase):
"""A pipeline to merge-sort multiple sorted files.
Args:
filenames: list of input file names as string. Each file is of records
format with file_service_pb.KeyValue protocol messages. All files should
be sorted by key value.
Returns:
The list of filenames as string. Resulting files contain records with
str((key, values)) obtained from MergingReader.
"""
def run(self, filenames):
yield mapreduce_pipeline.MapperPipeline(
"sort",
__name__ + ".test_handler_yield_str",
shuffler.__name__ + "._MergingReader",
output_writers.__name__ + ".BlobstoreRecordsOutputWriter",
params={
shuffler._MergingReader.FILES_PARAM:[filenames],
shuffler._MergingReader.MAX_VALUES_COUNT_PARAM:
shuffler._MergePipeline._MAX_VALUES_COUNT,
shuffler._MergingReader.MAX_VALUES_SIZE_PARAM:
shuffler._MergePipeline._MAX_VALUES_SIZE,
},
)
class MergingReaderEndToEndTest(testutil.HandlerTestBase):
"""End-to-end test for MergingReader."""
def setUp(self):
testutil.HandlerTestBase.setUp(self)
pipeline.Pipeline._send_mail = self._send_mail
self.emails = []
def _send_mail(self, sender, subject, body, html=None):
"""Callback function for sending mail."""
self.emails.append((sender, subject, body, html))
def testMergeFiles(self):
"""Test merging multiple files."""
input_data = [(str(i), "_" + str(i)) for i in range(100)]
input_data.sort()
input_file = files.blobstore.create()
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for (k, v) in input_data:
proto = file_service_pb.KeyValue()
proto.set_key(k)
proto.set_value(v)
w.write(proto.Encode())
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
p = TestMergePipeline([input_file, input_file, input_file])
p.start()
test_support.execute_until_empty(self.taskqueue)
p = TestMergePipeline.from_id(p.pipeline_id)
output_file = p.outputs.default.value[0]
output_data = []
with files.open(output_file, "r") as f:
for record in records.RecordsReader(f):
output_data.append(record)
expected_data = [
str((k, [v, v, v], False)) for (k, v) in input_data]
self.assertEquals(expected_data, output_data)
def testPartialRecords(self):
"""Test merging into partial key values."""
try:
self._prev_max_values_count = shuffler._MergePipeline._MAX_VALUES_COUNT
# force max values count to extremely low value.
shuffler._MergePipeline._MAX_VALUES_COUNT = 1
input_data = [('1', 'a'), ('2', 'b'), ('3', 'c')]
input_data.sort()
input_file = files.blobstore.create()
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for (k, v) in input_data:
proto = file_service_pb.KeyValue()
proto.set_key(k)
proto.set_value(v)
w.write(proto.Encode())
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
p = TestMergePipeline([input_file, input_file, input_file])
p.start()
test_support.execute_until_empty(self.taskqueue)
p = TestMergePipeline.from_id(p.pipeline_id)
output_file = p.outputs.default.value[0]
output_data = []
with files.open(output_file, "r") as f:
for record in records.RecordsReader(f):
output_data.append(record)
expected_data = [
('1', ['a'], True),
('1', ['a'], True),
('1', ['a'], False),
('2', ['b'], True),
('2', ['b'], True),
('2', ['b'], False),
('3', ['c'], True),
('3', ['c'], True),
('3', ['c'], False),
]
self.assertEquals([str(e) for e in expected_data], output_data)
finally:
shuffler._MergePipeline._MAX_VALUES_COUNT = self._prev_max_values_count
class ShuffleEndToEndTest(testutil.HandlerTestBase):
"""End-to-end test for ShufflePipeline."""
def setUp(self):
testutil.HandlerTestBase.setUp(self)
pipeline.Pipeline._send_mail = self._send_mail
self.emails = []
def _send_mail(self, sender, subject, body, html=None):
"""Callback function for sending mail."""
self.emails.append((sender, subject, body, html))
def testShuffleFiles(self):
"""Test shuffling multiple files."""
input_data = [(str(i), str(i)) for i in range(100)]
input_data.sort()
input_file = files.blobstore.create()
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for (k, v) in input_data:
proto = file_service_pb.KeyValue()
proto.set_key(k)
proto.set_value(v)
w.write(proto.Encode())
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
p = shuffler.ShufflePipeline(
"testjob", [input_file, input_file, input_file])
p.start()
test_support.execute_until_empty(self.taskqueue)
p = shuffler.ShufflePipeline.from_id(p.pipeline_id)
output_files = p.outputs.default.value
output_data = []
for output_file in output_files:
with files.open(output_file, "r") as f:
for record in records.RecordsReader(f):
proto = file_service_pb.KeyValues()
proto.ParseFromString(record)
output_data.append((proto.key(), proto.value_list()))
output_data.sort()
expected_data = sorted([
(str(k), [str(v), str(v), str(v)]) for (k, v) in input_data])
self.assertEquals(expected_data, output_data)
if __name__ == "__main__":
unittest.main()
| 32.003891 | 77 | 0.665897 |
import unittest
from mapreduce.lib import pipeline
from google.appengine.api import files
from google.appengine.api.files import file_service_pb
from google.appengine.api.files import records
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import control
from mapreduce import mapreduce_pipeline
from mapreduce import model
from mapreduce import output_writers
from mapreduce import shuffler
from mapreduce import test_support
from testlib import testutil
import unittest
class SortFileEndToEndTest(testutil.HandlerTestBase):
def setUp(self):
testutil.HandlerTestBase.setUp(self)
pipeline.Pipeline._send_mail = self._send_mail
self.emails = []
def _send_mail(self, sender, subject, body, html=None):
self.emails.append((sender, subject, body, html))
def testSortFile(self):
input_file = files.blobstore.create()
input_data = [
(str(i), "_" + str(i)) for i in range(100)]
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for (k, v) in input_data:
proto = file_service_pb.KeyValue()
proto.set_key(k)
proto.set_value(v)
w.write(proto.Encode())
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
p = shuffler._SortChunksPipeline("testjob", [input_file])
p.start()
test_support.execute_until_empty(self.taskqueue)
p = shuffler._SortChunksPipeline.from_id(p.pipeline_id)
input_data.sort()
output_files = p.outputs.default.value[0]
output_data = []
for output_file in output_files:
with files.open(output_file, "r") as f:
for binary_record in records.RecordsReader(f):
proto = file_service_pb.KeyValue()
proto.ParseFromString(binary_record)
output_data.append((proto.key(), proto.value()))
self.assertEquals(input_data, output_data)
def test_handler_yield_str(key, value, partial):
yield str((key, value, partial))
class TestMergePipeline(base_handler.PipelineBase):
def run(self, filenames):
yield mapreduce_pipeline.MapperPipeline(
"sort",
__name__ + ".test_handler_yield_str",
shuffler.__name__ + "._MergingReader",
output_writers.__name__ + ".BlobstoreRecordsOutputWriter",
params={
shuffler._MergingReader.FILES_PARAM:[filenames],
shuffler._MergingReader.MAX_VALUES_COUNT_PARAM:
shuffler._MergePipeline._MAX_VALUES_COUNT,
shuffler._MergingReader.MAX_VALUES_SIZE_PARAM:
shuffler._MergePipeline._MAX_VALUES_SIZE,
},
)
class MergingReaderEndToEndTest(testutil.HandlerTestBase):
def setUp(self):
testutil.HandlerTestBase.setUp(self)
pipeline.Pipeline._send_mail = self._send_mail
self.emails = []
def _send_mail(self, sender, subject, body, html=None):
self.emails.append((sender, subject, body, html))
def testMergeFiles(self):
input_data = [(str(i), "_" + str(i)) for i in range(100)]
input_data.sort()
input_file = files.blobstore.create()
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for (k, v) in input_data:
proto = file_service_pb.KeyValue()
proto.set_key(k)
proto.set_value(v)
w.write(proto.Encode())
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
p = TestMergePipeline([input_file, input_file, input_file])
p.start()
test_support.execute_until_empty(self.taskqueue)
p = TestMergePipeline.from_id(p.pipeline_id)
output_file = p.outputs.default.value[0]
output_data = []
with files.open(output_file, "r") as f:
for record in records.RecordsReader(f):
output_data.append(record)
expected_data = [
str((k, [v, v, v], False)) for (k, v) in input_data]
self.assertEquals(expected_data, output_data)
def testPartialRecords(self):
try:
self._prev_max_values_count = shuffler._MergePipeline._MAX_VALUES_COUNT
shuffler._MergePipeline._MAX_VALUES_COUNT = 1
input_data = [('1', 'a'), ('2', 'b'), ('3', 'c')]
input_data.sort()
input_file = files.blobstore.create()
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for (k, v) in input_data:
proto = file_service_pb.KeyValue()
proto.set_key(k)
proto.set_value(v)
w.write(proto.Encode())
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
p = TestMergePipeline([input_file, input_file, input_file])
p.start()
test_support.execute_until_empty(self.taskqueue)
p = TestMergePipeline.from_id(p.pipeline_id)
output_file = p.outputs.default.value[0]
output_data = []
with files.open(output_file, "r") as f:
for record in records.RecordsReader(f):
output_data.append(record)
expected_data = [
('1', ['a'], True),
('1', ['a'], True),
('1', ['a'], False),
('2', ['b'], True),
('2', ['b'], True),
('2', ['b'], False),
('3', ['c'], True),
('3', ['c'], True),
('3', ['c'], False),
]
self.assertEquals([str(e) for e in expected_data], output_data)
finally:
shuffler._MergePipeline._MAX_VALUES_COUNT = self._prev_max_values_count
class ShuffleEndToEndTest(testutil.HandlerTestBase):
def setUp(self):
testutil.HandlerTestBase.setUp(self)
pipeline.Pipeline._send_mail = self._send_mail
self.emails = []
def _send_mail(self, sender, subject, body, html=None):
self.emails.append((sender, subject, body, html))
def testShuffleFiles(self):
input_data = [(str(i), str(i)) for i in range(100)]
input_data.sort()
input_file = files.blobstore.create()
with files.open(input_file, "a") as f:
with records.RecordsWriter(f) as w:
for (k, v) in input_data:
proto = file_service_pb.KeyValue()
proto.set_key(k)
proto.set_value(v)
w.write(proto.Encode())
files.finalize(input_file)
input_file = files.blobstore.get_file_name(
files.blobstore.get_blob_key(input_file))
p = shuffler.ShufflePipeline(
"testjob", [input_file, input_file, input_file])
p.start()
test_support.execute_until_empty(self.taskqueue)
p = shuffler.ShufflePipeline.from_id(p.pipeline_id)
output_files = p.outputs.default.value
output_data = []
for output_file in output_files:
with files.open(output_file, "r") as f:
for record in records.RecordsReader(f):
proto = file_service_pb.KeyValues()
proto.ParseFromString(record)
output_data.append((proto.key(), proto.value_list()))
output_data.sort()
expected_data = sorted([
(str(k), [str(v), str(v), str(v)]) for (k, v) in input_data])
self.assertEquals(expected_data, output_data)
if __name__ == "__main__":
unittest.main()
| true | true |
f7ff9714c765e0542e3601d9a4160a715926b8f5 | 1,191 | py | Python | zun/tests/unit/scheduler/fakes.py | wanghuiict/zun | 2f4a3a2ba06d7ca83002418d4003ee5dece70952 | [
"Apache-2.0"
] | 83 | 2016-09-14T22:06:26.000Z | 2022-01-27T03:49:52.000Z | zun/tests/unit/scheduler/fakes.py | wanghuiict/zun | 2f4a3a2ba06d7ca83002418d4003ee5dece70952 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | zun/tests/unit/scheduler/fakes.py | wanghuiict/zun | 2f4a3a2ba06d7ca83002418d4003ee5dece70952 | [
"Apache-2.0"
] | 54 | 2016-09-29T10:16:02.000Z | 2022-01-28T19:12:49.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zun.scheduler import driver
from zun.scheduler import host_state
class FakeScheduler(driver.Scheduler):
def select_destinations(self, context, containers):
return []
class FakeHostState(host_state.HostState):
def __init__(self, host, attribute_dict=None):
super(FakeHostState, self).__init__(host)
if attribute_dict:
for (key, val) in attribute_dict.items():
setattr(self, key, val)
class FakeService(object):
def __init__(self, name, host, disabled=False):
self.name = name
self.host = host
self.disabled = disabled
| 32.189189 | 78 | 0.697733 |
from zun.scheduler import driver
from zun.scheduler import host_state
class FakeScheduler(driver.Scheduler):
def select_destinations(self, context, containers):
return []
class FakeHostState(host_state.HostState):
def __init__(self, host, attribute_dict=None):
super(FakeHostState, self).__init__(host)
if attribute_dict:
for (key, val) in attribute_dict.items():
setattr(self, key, val)
class FakeService(object):
def __init__(self, name, host, disabled=False):
self.name = name
self.host = host
self.disabled = disabled
| true | true |
f7ff98278ed3aea357fb128d2c1891cb33488220 | 46,629 | py | Python | tests/unit/commands/local/lib/test_cfn_api_provider.py | G4brym/aws-sam-cli | 0601140f031f5b325b1861b298a6a589cf9c072b | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | tests/unit/commands/local/lib/test_cfn_api_provider.py | G4brym/aws-sam-cli | 0601140f031f5b325b1861b298a6a589cf9c072b | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | tests/unit/commands/local/lib/test_cfn_api_provider.py | G4brym/aws-sam-cli | 0601140f031f5b325b1861b298a6a589cf9c072b | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | import json
import tempfile
from collections import OrderedDict
from unittest import TestCase
from unittest.mock import patch
from samcli.lib.providers.api_provider import ApiProvider
from samcli.lib.providers.cfn_api_provider import CfnApiProvider
from samcli.local.apigw.local_apigw_service import Route
from tests.unit.commands.local.lib.test_sam_api_provider import make_swagger
from samcli.lib.providers.provider import Cors
class TestApiProviderWithApiGatewayRestRoute(TestCase):
def setUp(self):
self.binary_types = ["image/png", "image/jpg"]
self.input_routes = [
Route(path="/path1", methods=["GET", "POST"], function_name="SamFunc1"),
Route(path="/path2", methods=["PUT", "GET"], function_name="SamFunc1"),
Route(path="/path3", methods=["DELETE"], function_name="SamFunc1"),
]
def test_with_no_apis(self):
template = {"Resources": {"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {}}}}
provider = ApiProvider(template)
self.assertEqual(provider.routes, [])
def test_with_inline_swagger_apis(self):
template = {
"Resources": {
"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"Body": make_swagger(self.input_routes)}}
}
}
provider = ApiProvider(template)
self.assertCountEqual(self.input_routes, provider.routes)
def test_with_swagger_as_local_file(self):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp:
filename = fp.name
swagger = make_swagger(self.input_routes)
json.dump(swagger, fp)
fp.flush()
template = {
"Resources": {"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"BodyS3Location": filename}}}
}
provider = ApiProvider(template)
self.assertCountEqual(self.input_routes, provider.routes)
def test_body_with_swagger_as_local_file_expect_fail(self):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp:
filename = fp.name
swagger = make_swagger(self.input_routes)
json.dump(swagger, fp)
fp.flush()
template = {"Resources": {"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"Body": filename}}}}
self.assertRaises(Exception, ApiProvider, template)
@patch("samcli.lib.providers.cfn_base_api_provider.SwaggerReader")
def test_with_swagger_as_both_body_and_uri_called(self, SwaggerReaderMock):
body = {"some": "body"}
filename = "somefile.txt"
template = {
"Resources": {
"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"BodyS3Location": filename, "Body": body}}
}
}
SwaggerReaderMock.return_value.read.return_value = make_swagger(self.input_routes)
cwd = "foo"
provider = ApiProvider(template, cwd=cwd)
self.assertCountEqual(self.input_routes, provider.routes)
SwaggerReaderMock.assert_called_with(definition_body=body, definition_uri=filename, working_dir=cwd)
def test_swagger_with_any_method(self):
routes = [Route(path="/path", methods=["any"], function_name="SamFunc1")]
expected_routes = [
Route(
path="/path",
methods=["GET", "DELETE", "PUT", "POST", "HEAD", "OPTIONS", "PATCH"],
function_name="SamFunc1",
)
]
template = {
"Resources": {"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"Body": make_swagger(routes)}}}
}
provider = ApiProvider(template)
self.assertCountEqual(expected_routes, provider.routes)
def test_with_binary_media_types(self):
template = {
"Resources": {
"Api1": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {"Body": make_swagger(self.input_routes, binary_media_types=self.binary_types)},
}
}
}
expected_binary_types = sorted(self.binary_types)
expected_apis = [
Route(path="/path1", methods=["GET", "POST"], function_name="SamFunc1"),
Route(path="/path2", methods=["PUT", "GET"], function_name="SamFunc1"),
Route(path="/path3", methods=["DELETE"], function_name="SamFunc1"),
]
provider = ApiProvider(template)
self.assertCountEqual(expected_apis, provider.routes)
self.assertCountEqual(provider.api.binary_media_types, expected_binary_types)
def test_with_binary_media_types_in_swagger_and_on_resource(self):
input_routes = [Route(path="/path", methods=["OPTIONS"], function_name="SamFunc1")]
extra_binary_types = ["text/html"]
template = {
"Resources": {
"Api1": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"BinaryMediaTypes": extra_binary_types,
"Body": make_swagger(input_routes, binary_media_types=self.binary_types),
},
}
}
}
expected_binary_types = sorted(self.binary_types + extra_binary_types)
expected_routes = [Route(path="/path", methods=["OPTIONS"], function_name="SamFunc1")]
provider = ApiProvider(template)
self.assertCountEqual(expected_routes, provider.routes)
self.assertCountEqual(provider.api.binary_media_types, expected_binary_types)
class TestCloudFormationStageValues(TestCase):
def setUp(self):
self.binary_types = ["image/png", "image/jpg"]
self.input_routes = [
Route(path="/path1", methods=["GET", "POST"], function_name="SamFunc1"),
Route(path="/path2", methods=["PUT", "GET"], function_name="SamFunc1"),
Route(path="/path3", methods=["DELETE"], function_name="SamFunc1"),
]
def test_provider_parse_stage_name(self):
template = {
"Resources": {
"Stage": {"Type": "AWS::ApiGateway::Stage", "Properties": {"StageName": "dev", "RestApiId": "TestApi"}},
"TestApi": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
}
}
}
},
},
}
}
provider = ApiProvider(template)
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
self.assertIn(route1, provider.routes)
self.assertEqual(provider.api.stage_name, "dev")
self.assertEqual(provider.api.stage_variables, None)
def test_provider_stage_variables(self):
template = {
"Resources": {
"Stage": {
"Type": "AWS::ApiGateway::Stage",
"Properties": {
"StageName": "dev",
"Variables": {"vis": "data", "random": "test", "foo": "bar"},
"RestApiId": "TestApi",
},
},
"TestApi": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
}
}
}
},
},
}
}
provider = ApiProvider(template)
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
self.assertIn(route1, provider.routes)
self.assertEqual(provider.api.stage_name, "dev")
self.assertEqual(provider.api.stage_variables, {"vis": "data", "random": "test", "foo": "bar"})
def test_multi_stage_get_all(self):
resources = OrderedDict(
{
"ProductionApi": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
},
"/anotherpath": {
"post": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
},
}
}
},
}
}
)
resources["StageDev"] = {
"Type": "AWS::ApiGateway::Stage",
"Properties": {
"StageName": "dev",
"Variables": {"vis": "data", "random": "test", "foo": "bar"},
"RestApiId": "ProductionApi",
},
}
resources["StageProd"] = {
"Type": "AWS::ApiGateway::Stage",
"Properties": {
"StageName": "Production",
"Variables": {"vis": "prod data", "random": "test", "foo": "bar"},
"RestApiId": "ProductionApi",
},
}
template = {"Resources": resources}
provider = ApiProvider(template)
result = [f for f in provider.get_all()]
routes = result[0].routes
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
route2 = Route(path="/anotherpath", methods=["POST"], function_name="NoApiEventFunction")
self.assertEqual(len(routes), 2)
self.assertIn(route1, routes)
self.assertIn(route2, routes)
self.assertEqual(provider.api.stage_name, "Production")
self.assertEqual(provider.api.stage_variables, {"vis": "prod data", "random": "test", "foo": "bar"})
class TestCloudFormationResourceMethod(TestCase):
def setUp(self):
self.binary_types = ["image/png", "image/jpg"]
self.input_routes = [
Route(path="/path1", methods=["GET", "POST"], function_name="SamFunc1"),
Route(path="/path2", methods=["PUT", "GET"], function_name="SamFunc1"),
Route(path="/path3", methods=["DELETE"], function_name="SamFunc1"),
]
def test_basic_rest_api_resource_method(self):
template = {
"Resources": {
"TestApi": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"StageName": "Prod"}},
"ApiResource": {"Properties": {"PathPart": "{proxy+}", "RestApiId": "TestApi"}},
"ApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {"HttpMethod": "POST", "RestApiId": "TestApi", "ResourceId": "ApiResource"},
},
}
}
provider = ApiProvider(template)
self.assertEqual(provider.routes, [Route(function_name=None, path="/{proxy+}", methods=["POST"])])
def test_resolve_correct_resource_path(self):
resources = {
"RootApiResource": {
"Tyoe": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "root", "ResourceId": "TestApi"},
}
}
beta_resource = {
"Tyoe": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "beta", "ResourceId": "TestApi", "ParentId": "RootApiResource"},
}
resources["BetaApiResource"] = beta_resource
provider = CfnApiProvider()
full_path = provider.resolve_resource_path(resources, beta_resource, "/test")
self.assertEqual(full_path, "/root/beta/test")
def test_resolve_correct_multi_parent_resource_path(self):
template = {
"Resources": {
"TestApi": {"Type": "AWS::ApiGateway::Resource", "Properties": {"StageName": "Prod"}},
"RootApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "root", "ResourceId": "TestApi"},
},
"V1ApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "v1", "ResourceId": "TestApi", "ParentId": "RootApiResource"},
},
"AlphaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "alpha", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"BetaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "beta", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"AlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {"HttpMethod": "GET", "RestApiId": "TestApi", "ResourceId": "AlphaApiResource"},
},
"BetaAlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {"HttpMethod": "POST", "RestApiId": "TestApi", "ResourceId": "BetaApiResource"},
},
}
}
provider = ApiProvider(template)
self.assertCountEqual(
provider.routes,
[
Route(path="/root/v1/beta", methods=["POST"], function_name=None),
Route(path="/root/v1/alpha", methods=["GET"], function_name=None),
],
)
def test_resource_with_method_correct_routes(self):
template = {
"Resources": {
"TestApi": {"Type": "AWS::ApiGateway::Resource", "Properties": {"StageName": "Prod"}},
"BetaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "beta", "ResourceId": "TestApi"},
},
"BetaAlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {"HttpMethod": "ANY", "RestApiId": "TestApi", "ResourceId": "BetaApiResource"},
},
}
}
provider = ApiProvider(template)
self.assertCountEqual(
provider.routes,
[
Route(
path="/beta",
methods=["POST", "GET", "DELETE", "HEAD", "OPTIONS", "PATCH", "PUT"],
function_name=None,
)
],
)
def test_method_integration_uri(self):
template = {
"Resources": {
"TestApi": {"Type": "AWS::ApiGateway::Resource", "Properties": {"StageName": "Prod"}},
"RootApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "root", "ResourceId": "TestApi"},
},
"V1ApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "v1", "ResourceId": "TestApi", "ParentId": "RootApiResource"},
},
"AlphaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "alpha", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"BetaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "beta", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"AlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "GET",
"RestApiId": "TestApi",
"ResourceId": "AlphaApiResource",
"Integration": {
"Uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/"
"functions"
"/${AWSBetaLambdaFunction.Arn}/invocations} "
}
},
},
},
"BetaAlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "POST",
"RestApiId": "TestApi",
"ResourceId": "BetaApiResource",
"Integration": {
"Uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/"
"functions"
"/${AWSLambdaFunction.Arn}/invocations}"
}
},
},
},
"AWSAlphaLambdaFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": ".", "Handler": "main.run_test", "Runtime": "Python3.6"},
},
"AWSBetaLambdaFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": ".", "Handler": "main.run_test", "Runtime": "Python3.6"},
},
}
}
provider = ApiProvider(template)
self.assertCountEqual(
provider.routes,
[
Route(path="/root/v1/beta", methods=["POST"], function_name="AWSLambdaFunction"),
Route(path="/root/v1/alpha", methods=["GET"], function_name="AWSBetaLambdaFunction"),
],
)
def test_binary_media_types_method(self):
template = {
"Resources": {
"TestApi": {"Type": "AWS::ApiGateway::Resource", "Properties": {"StageName": "Prod"}},
"RootApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "root", "ResourceId": "TestApi"},
},
"V1ApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "v1", "ResourceId": "TestApi", "ParentId": "RootApiResource"},
},
"AlphaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "alpha", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"BetaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "beta", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"AlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "GET",
"RestApiId": "TestApi",
"ResourceId": "AlphaApiResource",
"Integration": {
"Uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/"
"functions"
"/${AWSBetaLambdaFunction.Arn}/invocations} "
},
"ContentHandling": "CONVERT_TO_BINARY",
"ContentType": "image~1jpg",
},
},
},
"BetaAlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "POST",
"RestApiId": "TestApi",
"ResourceId": "BetaApiResource",
"Integration": {
"Uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/"
"functions"
"/${AWSLambdaFunction.Arn}/invocations}"
},
"ContentHandling": "CONVERT_TO_BINARY",
"ContentType": "image~1png",
},
},
},
"AWSAlphaLambdaFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": ".", "Handler": "main.run_test", "Runtime": "Python3.6"},
},
"AWSBetaLambdaFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": ".", "Handler": "main.run_test", "Runtime": "Python3.6"},
},
}
}
provider = ApiProvider(template)
self.assertCountEqual(provider.api.binary_media_types, ["image/png", "image/jpg"])
def test_cdk(self):
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"HelloHandler2E4FBA4D": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": ".", "Handler": "main.handler", "Runtime": "python3.6"},
"DependsOn": ["HelloHandlerServiceRole11EF7C63"],
},
"EndpointEEF1FD8F": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"Name": "Endpoint"}},
"EndpointDeploymentStageprodB78BEEA0": {
"Type": "AWS::ApiGateway::Stage",
"Properties": {
"RestApiId": {"Ref": "EndpointEEF1FD8F"},
"DeploymentId": {"Ref": "EndpointDeployment318525DA37c0e38727e25b4317827bf43e918fbf"},
"StageName": "prod",
},
},
"Endpointproxy39E2174E": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {
"ParentId": {"Fn::GetAtt": ["EndpointEEF1FD8F", "RootResourceId"]},
"PathPart": "{proxy+}",
"RestApiId": {"Ref": "EndpointEEF1FD8F"},
},
},
"EndpointproxyANYC09721C5": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "ANY",
"ResourceId": {"Ref": "Endpointproxy39E2174E"},
"RestApiId": {"Ref": "EndpointEEF1FD8F"},
"AuthorizationType": "NONE",
"Integration": {
"IntegrationHttpMethod": "POST",
"Type": "AWS_PROXY",
"Uri": {
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":apigateway:",
{"Ref": "AWS::Region"},
"lambda:path/2015-03-31/functions/",
{"Fn::GetAtt": ["HelloHandler2E4FBA4D", "Arn"]},
"/invocations",
],
]
},
},
},
},
"EndpointANY485C938B": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "ANY",
"ResourceId": {"Fn::GetAtt": ["EndpointEEF1FD8F", "RootResourceId"]},
"RestApiId": {"Ref": "EndpointEEF1FD8F"},
"AuthorizationType": "NONE",
"Integration": {
"IntegrationHttpMethod": "POST",
"Type": "AWS_PROXY",
"Uri": {
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":apigateway:",
{"Ref": "AWS::Region"},
"lambda:path/2015-03-31/functions/",
{"Fn::GetAtt": ["HelloHandler2E4FBA4D", "Arn"]},
"/invocations",
],
]
},
},
},
},
},
"Parameters": {
"HelloHandlerCodeS3Bucket4359A483": {
"Type": "String",
"Description": 'S3 bucket for asset "CdkWorkshopStack/HelloHandler/Code"',
},
"HelloHandlerCodeS3VersionKey07D12610": {
"Type": "String",
"Description": 'S3 key for asset version "CdkWorkshopStack/HelloHandler/Code"',
},
},
"Outputs": {
"Endpoint8024A810": {
"Value": {
"Fn::Join": [
"",
[
"https://",
{"Ref": "EndpointEEF1FD8F"},
".execute-api.",
{"Ref": "AWS::Region"},
".",
{"Ref": "AWS::URLSuffix"},
"/",
{"Ref": "EndpointDeploymentStageprodB78BEEA0"},
"/",
],
]
},
"Export": {"Name": "CdkWorkshopStack:Endpoint8024A810"},
}
},
}
provider = ApiProvider(template)
proxy_paths = [Route(path="/{proxy+}", methods=Route.ANY_HTTP_METHODS, function_name="HelloHandler2E4FBA4D")]
root_paths = [Route(path="/", methods=Route.ANY_HTTP_METHODS, function_name="HelloHandler2E4FBA4D")]
self.assertCountEqual(provider.routes, proxy_paths + root_paths)
class TestCloudFormationProviderWithApiGatewayV2(TestCase):
def setUp(self):
self.input_routes = [
Route(path="/path1", methods=["GET", "POST"], function_name="SamFunc1"),
Route(path="/path2", methods=["PUT", "GET"], function_name="SamFunc1"),
Route(path="/path3", methods=["DELETE"], function_name="SamFunc1"),
]
def test_with_no_apis(self):
template = {"Resources": {"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {}}}}
provider = ApiProvider(template)
self.assertEqual(provider.routes, [])
def test_with_inline_swagger_apis(self):
template = {
"Resources": {
"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {"Body": make_swagger(self.input_routes)}}
}
}
provider = ApiProvider(template)
self.assertCountEqual(self.input_routes, provider.routes)
def test_with_swagger_as_local_file(self):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp:
filename = fp.name
swagger = make_swagger(self.input_routes)
json.dump(swagger, fp)
fp.flush()
template = {
"Resources": {"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {"BodyS3Location": filename}}}
}
provider = ApiProvider(template)
self.assertCountEqual(self.input_routes, provider.routes)
def test_body_with_swagger_as_local_file_expect_fail(self):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp:
filename = fp.name
swagger = make_swagger(self.input_routes)
json.dump(swagger, fp)
fp.flush()
template = {"Resources": {"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {"Body": filename}}}}
self.assertRaises(Exception, ApiProvider, template)
@patch("samcli.lib.providers.cfn_base_api_provider.SwaggerReader")
def test_with_swagger_as_both_body_and_uri_called(self, SwaggerReaderMock):
body = {"some": "body"}
filename = "somefile.txt"
template = {
"Resources": {
"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {"BodyS3Location": filename, "Body": body}}
}
}
SwaggerReaderMock.return_value.read.return_value = make_swagger(self.input_routes)
cwd = "foo"
provider = ApiProvider(template, cwd=cwd)
self.assertCountEqual(self.input_routes, provider.routes)
SwaggerReaderMock.assert_called_with(definition_body=body, definition_uri=filename, working_dir=cwd)
def test_swagger_with_any_method(self):
routes = [Route(path="$default", methods=["any"], function_name="SamFunc1")]
expected_routes = [
Route(
path="$default",
methods=["GET", "DELETE", "PUT", "POST", "HEAD", "OPTIONS", "PATCH"],
function_name="SamFunc1",
)
]
template = {
"Resources": {"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {"Body": make_swagger(routes)}}}
}
provider = ApiProvider(template)
self.assertEqual(expected_routes, provider.routes)
def test_with_quick_create_default_route(self):
expected_routes = [
Route(
path="$default",
methods=["X-AMAZON-APIGATEWAY-ANY-METHOD"],
function_name="SamFunc1",
)
]
template = {
"Resources": {
"Api1": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"Target": "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1"
":123456789012:function:SamFunc1/invocations",
"ProtocolType": "HTTP",
},
}
}
}
provider = ApiProvider(template)
self.assertEqual(expected_routes, provider.routes)
def test_with_quick_create_defined_route(self):
expected_routes = [
Route(
path="/path1",
methods=["GET"],
function_name="SamFunc1",
)
]
template = {
"Resources": {
"Api1": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"Target": "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1"
":123456789012:function:SamFunc1/invocations",
"RouteKey": "GET /path1",
"ProtocolType": "HTTP",
},
}
}
}
provider = ApiProvider(template)
self.assertEqual(expected_routes, provider.routes)
def test_with_cors(self):
expected_cors = Cors(
allow_origin="https://example.com",
allow_methods="GET,OPTIONS",
allow_headers="x-apigateway-header",
max_age=600,
)
template = {
"Resources": {
"Api1": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"CorsConfiguration": {
"AllowHeaders": ["x-apigateway-header"],
"AllowMethods": ["GET"],
"AllowOrigins": ["https://example.com"],
"MaxAge": 600,
}
},
}
}
}
provider = ApiProvider(template)
self.assertEqual(expected_cors, provider.api.cors)
class TestCloudFormationProviderWithApiGatewayV2Route(TestCase):
def test_basic_http_api_routes(self):
template = {
"Resources": {
"TestHttpApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {"name": "testHttpApi", "ProtocolType": "HTTP"},
},
"HttpApiRoute": {
"Type": "AWS::ApiGatewayV2::Route",
"Properties": {
"ApiId": "TestHttpApi",
"RouteKey": "POST /{proxy+}",
"Target": "integrations/HttpApiIntegration",
},
},
"HttpApiIntegration": {
"Type": "AWS::ApiGatewayV2::Integration",
"Properties": {
"ApiId": "TestHttpApi",
"PayloadFormatVersion": "2.0",
"IntegrationUri": "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
"arn:aws:lambda:us-east-1:123456789012:function:SamFunc1/invocations",
},
},
}
}
provider = ApiProvider(template)
self.assertEqual(provider.routes, [Route(function_name="SamFunc1", path="/{proxy+}", methods=["POST"])])
def test_http_api_routes_no_integration(self):
template = {
"Resources": {
"TestHttpApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {"name": "testHttpApi", "ProtocolType": "HTTP"},
},
"HttpApiRoute": {
"Type": "AWS::ApiGatewayV2::Route",
"Properties": {"ApiId": "TestHttpApi", "RouteKey": "POST /{proxy+}"},
},
}
}
provider = ApiProvider(template)
self.assertEqual(provider.routes, [])
def test_http_api_default_route(self):
template = {
"Resources": {
"TestHttpApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {"name": "testHttpApi", "ProtocolType": "HTTP"},
},
"HttpApiRoute": {
"Type": "AWS::ApiGatewayV2::Route",
"Properties": {
"ApiId": "TestHttpApi",
"RouteKey": "$default",
"Target": "integrations/HttpApiIntegration",
},
},
"HttpApiIntegration": {
"Type": "AWS::ApiGatewayV2::Integration",
"Properties": {
"ApiId": "TestHttpApi",
"PayloadFormatVersion": "2.0",
"IntegrationUri": "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
"arn:aws:lambda:us-east-1:123456789012:function:SamFunc1/invocations",
},
},
}
}
provider = ApiProvider(template)
self.assertEqual(
provider.routes,
[Route(function_name="SamFunc1", path="$default", methods=["X-AMAZON-APIGATEWAY-ANY-METHOD"])],
)
def test_http_api_wrong_route(self):
template = {
"Resources": {
"TestHttpApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {"name": "testHttpApi", "ProtocolType": "HTTP"},
},
"HttpApiRoute": {
"Type": "AWS::ApiGatewayV2::Route",
"Properties": {
"ApiId": "TestHttpApi",
"RouteKey": "GET",
"Target": "integrations/HttpApiIntegration",
},
},
"HttpApiIntegration": {
"Type": "AWS::ApiGatewayV2::Integration",
"Properties": {
"ApiId": "TestHttpApi",
"PayloadFormatVersion": "2.0",
"IntegrationUri": "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
"arn:aws:lambda:us-east-1:123456789012:function:SamFunc1/invocations",
},
},
}
}
self.assertRaises(Exception, ApiProvider, template)
class TestCloudFormationWithApiGatewayV2Stage(TestCase):
def test_provider_parse_stage_name(self):
template = {
"Resources": {
"Stage": {"Type": "AWS::ApiGatewayV2::Stage", "Properties": {"StageName": "dev", "ApiId": "TestApi"}},
"TestApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
}
}
}
},
},
}
}
provider = ApiProvider(template)
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
self.assertIn(route1, provider.routes)
self.assertEqual(provider.api.stage_name, "dev")
self.assertEqual(provider.api.stage_variables, None)
def test_provider_stage_variables(self):
template = {
"Resources": {
"Stage": {
"Type": "AWS::ApiGatewayV2::Stage",
"Properties": {
"StageName": "dev",
"Variables": {"vis": "data", "random": "test", "foo": "bar"},
"ApiId": "TestApi",
},
},
"TestApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
}
}
}
},
},
}
}
provider = ApiProvider(template)
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
self.assertIn(route1, provider.routes)
self.assertEqual(provider.api.stage_name, "dev")
self.assertEqual(provider.api.stage_variables, {"vis": "data", "random": "test", "foo": "bar"})
def test_multi_stage_get_all(self):
resources = OrderedDict(
{
"ProductionApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
},
"/anotherpath": {
"post": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
},
}
}
},
}
}
)
resources["StageDev"] = {
"Type": "AWS::ApiGatewayV2::Stage",
"Properties": {
"StageName": "dev",
"Variables": {"vis": "data", "random": "test", "foo": "bar"},
"ApiId": "ProductionApi",
},
}
resources["StageProd"] = {
"Type": "AWS::ApiGatewayV2::Stage",
"Properties": {
"StageName": "Production",
"Variables": {"vis": "prod data", "random": "test", "foo": "bar"},
"ApiId": "ProductionApi",
},
}
template = {"Resources": resources}
provider = ApiProvider(template)
result = [f for f in provider.get_all()]
routes = result[0].routes
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
route2 = Route(path="/anotherpath", methods=["POST"], function_name="NoApiEventFunction")
self.assertEqual(len(routes), 2)
self.assertIn(route1, routes)
self.assertIn(route2, routes)
self.assertEqual(provider.api.stage_name, "Production")
self.assertEqual(provider.api.stage_variables, {"vis": "prod data", "random": "test", "foo": "bar"})
| 42.39 | 122 | 0.427524 | import json
import tempfile
from collections import OrderedDict
from unittest import TestCase
from unittest.mock import patch
from samcli.lib.providers.api_provider import ApiProvider
from samcli.lib.providers.cfn_api_provider import CfnApiProvider
from samcli.local.apigw.local_apigw_service import Route
from tests.unit.commands.local.lib.test_sam_api_provider import make_swagger
from samcli.lib.providers.provider import Cors
class TestApiProviderWithApiGatewayRestRoute(TestCase):
def setUp(self):
self.binary_types = ["image/png", "image/jpg"]
self.input_routes = [
Route(path="/path1", methods=["GET", "POST"], function_name="SamFunc1"),
Route(path="/path2", methods=["PUT", "GET"], function_name="SamFunc1"),
Route(path="/path3", methods=["DELETE"], function_name="SamFunc1"),
]
def test_with_no_apis(self):
template = {"Resources": {"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {}}}}
provider = ApiProvider(template)
self.assertEqual(provider.routes, [])
def test_with_inline_swagger_apis(self):
template = {
"Resources": {
"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"Body": make_swagger(self.input_routes)}}
}
}
provider = ApiProvider(template)
self.assertCountEqual(self.input_routes, provider.routes)
def test_with_swagger_as_local_file(self):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp:
filename = fp.name
swagger = make_swagger(self.input_routes)
json.dump(swagger, fp)
fp.flush()
template = {
"Resources": {"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"BodyS3Location": filename}}}
}
provider = ApiProvider(template)
self.assertCountEqual(self.input_routes, provider.routes)
def test_body_with_swagger_as_local_file_expect_fail(self):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp:
filename = fp.name
swagger = make_swagger(self.input_routes)
json.dump(swagger, fp)
fp.flush()
template = {"Resources": {"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"Body": filename}}}}
self.assertRaises(Exception, ApiProvider, template)
@patch("samcli.lib.providers.cfn_base_api_provider.SwaggerReader")
def test_with_swagger_as_both_body_and_uri_called(self, SwaggerReaderMock):
body = {"some": "body"}
filename = "somefile.txt"
template = {
"Resources": {
"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"BodyS3Location": filename, "Body": body}}
}
}
SwaggerReaderMock.return_value.read.return_value = make_swagger(self.input_routes)
cwd = "foo"
provider = ApiProvider(template, cwd=cwd)
self.assertCountEqual(self.input_routes, provider.routes)
SwaggerReaderMock.assert_called_with(definition_body=body, definition_uri=filename, working_dir=cwd)
def test_swagger_with_any_method(self):
routes = [Route(path="/path", methods=["any"], function_name="SamFunc1")]
expected_routes = [
Route(
path="/path",
methods=["GET", "DELETE", "PUT", "POST", "HEAD", "OPTIONS", "PATCH"],
function_name="SamFunc1",
)
]
template = {
"Resources": {"Api1": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"Body": make_swagger(routes)}}}
}
provider = ApiProvider(template)
self.assertCountEqual(expected_routes, provider.routes)
def test_with_binary_media_types(self):
template = {
"Resources": {
"Api1": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {"Body": make_swagger(self.input_routes, binary_media_types=self.binary_types)},
}
}
}
expected_binary_types = sorted(self.binary_types)
expected_apis = [
Route(path="/path1", methods=["GET", "POST"], function_name="SamFunc1"),
Route(path="/path2", methods=["PUT", "GET"], function_name="SamFunc1"),
Route(path="/path3", methods=["DELETE"], function_name="SamFunc1"),
]
provider = ApiProvider(template)
self.assertCountEqual(expected_apis, provider.routes)
self.assertCountEqual(provider.api.binary_media_types, expected_binary_types)
def test_with_binary_media_types_in_swagger_and_on_resource(self):
input_routes = [Route(path="/path", methods=["OPTIONS"], function_name="SamFunc1")]
extra_binary_types = ["text/html"]
template = {
"Resources": {
"Api1": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"BinaryMediaTypes": extra_binary_types,
"Body": make_swagger(input_routes, binary_media_types=self.binary_types),
},
}
}
}
expected_binary_types = sorted(self.binary_types + extra_binary_types)
expected_routes = [Route(path="/path", methods=["OPTIONS"], function_name="SamFunc1")]
provider = ApiProvider(template)
self.assertCountEqual(expected_routes, provider.routes)
self.assertCountEqual(provider.api.binary_media_types, expected_binary_types)
class TestCloudFormationStageValues(TestCase):
def setUp(self):
self.binary_types = ["image/png", "image/jpg"]
self.input_routes = [
Route(path="/path1", methods=["GET", "POST"], function_name="SamFunc1"),
Route(path="/path2", methods=["PUT", "GET"], function_name="SamFunc1"),
Route(path="/path3", methods=["DELETE"], function_name="SamFunc1"),
]
def test_provider_parse_stage_name(self):
template = {
"Resources": {
"Stage": {"Type": "AWS::ApiGateway::Stage", "Properties": {"StageName": "dev", "RestApiId": "TestApi"}},
"TestApi": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
}
}
}
},
},
}
}
provider = ApiProvider(template)
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
self.assertIn(route1, provider.routes)
self.assertEqual(provider.api.stage_name, "dev")
self.assertEqual(provider.api.stage_variables, None)
def test_provider_stage_variables(self):
template = {
"Resources": {
"Stage": {
"Type": "AWS::ApiGateway::Stage",
"Properties": {
"StageName": "dev",
"Variables": {"vis": "data", "random": "test", "foo": "bar"},
"RestApiId": "TestApi",
},
},
"TestApi": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
}
}
}
},
},
}
}
provider = ApiProvider(template)
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
self.assertIn(route1, provider.routes)
self.assertEqual(provider.api.stage_name, "dev")
self.assertEqual(provider.api.stage_variables, {"vis": "data", "random": "test", "foo": "bar"})
def test_multi_stage_get_all(self):
resources = OrderedDict(
{
"ProductionApi": {
"Type": "AWS::ApiGateway::RestApi",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
},
"/anotherpath": {
"post": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
},
}
}
},
}
}
)
resources["StageDev"] = {
"Type": "AWS::ApiGateway::Stage",
"Properties": {
"StageName": "dev",
"Variables": {"vis": "data", "random": "test", "foo": "bar"},
"RestApiId": "ProductionApi",
},
}
resources["StageProd"] = {
"Type": "AWS::ApiGateway::Stage",
"Properties": {
"StageName": "Production",
"Variables": {"vis": "prod data", "random": "test", "foo": "bar"},
"RestApiId": "ProductionApi",
},
}
template = {"Resources": resources}
provider = ApiProvider(template)
result = [f for f in provider.get_all()]
routes = result[0].routes
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
route2 = Route(path="/anotherpath", methods=["POST"], function_name="NoApiEventFunction")
self.assertEqual(len(routes), 2)
self.assertIn(route1, routes)
self.assertIn(route2, routes)
self.assertEqual(provider.api.stage_name, "Production")
self.assertEqual(provider.api.stage_variables, {"vis": "prod data", "random": "test", "foo": "bar"})
class TestCloudFormationResourceMethod(TestCase):
def setUp(self):
self.binary_types = ["image/png", "image/jpg"]
self.input_routes = [
Route(path="/path1", methods=["GET", "POST"], function_name="SamFunc1"),
Route(path="/path2", methods=["PUT", "GET"], function_name="SamFunc1"),
Route(path="/path3", methods=["DELETE"], function_name="SamFunc1"),
]
def test_basic_rest_api_resource_method(self):
template = {
"Resources": {
"TestApi": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"StageName": "Prod"}},
"ApiResource": {"Properties": {"PathPart": "{proxy+}", "RestApiId": "TestApi"}},
"ApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {"HttpMethod": "POST", "RestApiId": "TestApi", "ResourceId": "ApiResource"},
},
}
}
provider = ApiProvider(template)
self.assertEqual(provider.routes, [Route(function_name=None, path="/{proxy+}", methods=["POST"])])
def test_resolve_correct_resource_path(self):
resources = {
"RootApiResource": {
"Tyoe": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "root", "ResourceId": "TestApi"},
}
}
beta_resource = {
"Tyoe": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "beta", "ResourceId": "TestApi", "ParentId": "RootApiResource"},
}
resources["BetaApiResource"] = beta_resource
provider = CfnApiProvider()
full_path = provider.resolve_resource_path(resources, beta_resource, "/test")
self.assertEqual(full_path, "/root/beta/test")
def test_resolve_correct_multi_parent_resource_path(self):
template = {
"Resources": {
"TestApi": {"Type": "AWS::ApiGateway::Resource", "Properties": {"StageName": "Prod"}},
"RootApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "root", "ResourceId": "TestApi"},
},
"V1ApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "v1", "ResourceId": "TestApi", "ParentId": "RootApiResource"},
},
"AlphaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "alpha", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"BetaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "beta", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"AlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {"HttpMethod": "GET", "RestApiId": "TestApi", "ResourceId": "AlphaApiResource"},
},
"BetaAlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {"HttpMethod": "POST", "RestApiId": "TestApi", "ResourceId": "BetaApiResource"},
},
}
}
provider = ApiProvider(template)
self.assertCountEqual(
provider.routes,
[
Route(path="/root/v1/beta", methods=["POST"], function_name=None),
Route(path="/root/v1/alpha", methods=["GET"], function_name=None),
],
)
def test_resource_with_method_correct_routes(self):
template = {
"Resources": {
"TestApi": {"Type": "AWS::ApiGateway::Resource", "Properties": {"StageName": "Prod"}},
"BetaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "beta", "ResourceId": "TestApi"},
},
"BetaAlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {"HttpMethod": "ANY", "RestApiId": "TestApi", "ResourceId": "BetaApiResource"},
},
}
}
provider = ApiProvider(template)
self.assertCountEqual(
provider.routes,
[
Route(
path="/beta",
methods=["POST", "GET", "DELETE", "HEAD", "OPTIONS", "PATCH", "PUT"],
function_name=None,
)
],
)
def test_method_integration_uri(self):
template = {
"Resources": {
"TestApi": {"Type": "AWS::ApiGateway::Resource", "Properties": {"StageName": "Prod"}},
"RootApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "root", "ResourceId": "TestApi"},
},
"V1ApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "v1", "ResourceId": "TestApi", "ParentId": "RootApiResource"},
},
"AlphaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "alpha", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"BetaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "beta", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"AlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "GET",
"RestApiId": "TestApi",
"ResourceId": "AlphaApiResource",
"Integration": {
"Uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/"
"functions"
"/${AWSBetaLambdaFunction.Arn}/invocations} "
}
},
},
},
"BetaAlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "POST",
"RestApiId": "TestApi",
"ResourceId": "BetaApiResource",
"Integration": {
"Uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/"
"functions"
"/${AWSLambdaFunction.Arn}/invocations}"
}
},
},
},
"AWSAlphaLambdaFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": ".", "Handler": "main.run_test", "Runtime": "Python3.6"},
},
"AWSBetaLambdaFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": ".", "Handler": "main.run_test", "Runtime": "Python3.6"},
},
}
}
provider = ApiProvider(template)
self.assertCountEqual(
provider.routes,
[
Route(path="/root/v1/beta", methods=["POST"], function_name="AWSLambdaFunction"),
Route(path="/root/v1/alpha", methods=["GET"], function_name="AWSBetaLambdaFunction"),
],
)
def test_binary_media_types_method(self):
template = {
"Resources": {
"TestApi": {"Type": "AWS::ApiGateway::Resource", "Properties": {"StageName": "Prod"}},
"RootApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "root", "ResourceId": "TestApi"},
},
"V1ApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "v1", "ResourceId": "TestApi", "ParentId": "RootApiResource"},
},
"AlphaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "alpha", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"BetaApiResource": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {"PathPart": "beta", "ResourceId": "TestApi", "ParentId": "V1ApiResource"},
},
"AlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "GET",
"RestApiId": "TestApi",
"ResourceId": "AlphaApiResource",
"Integration": {
"Uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/"
"functions"
"/${AWSBetaLambdaFunction.Arn}/invocations} "
},
"ContentHandling": "CONVERT_TO_BINARY",
"ContentType": "image~1jpg",
},
},
},
"BetaAlphaApiMethod": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "POST",
"RestApiId": "TestApi",
"ResourceId": "BetaApiResource",
"Integration": {
"Uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/"
"functions"
"/${AWSLambdaFunction.Arn}/invocations}"
},
"ContentHandling": "CONVERT_TO_BINARY",
"ContentType": "image~1png",
},
},
},
"AWSAlphaLambdaFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": ".", "Handler": "main.run_test", "Runtime": "Python3.6"},
},
"AWSBetaLambdaFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": ".", "Handler": "main.run_test", "Runtime": "Python3.6"},
},
}
}
provider = ApiProvider(template)
self.assertCountEqual(provider.api.binary_media_types, ["image/png", "image/jpg"])
def test_cdk(self):
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"HelloHandler2E4FBA4D": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": ".", "Handler": "main.handler", "Runtime": "python3.6"},
"DependsOn": ["HelloHandlerServiceRole11EF7C63"],
},
"EndpointEEF1FD8F": {"Type": "AWS::ApiGateway::RestApi", "Properties": {"Name": "Endpoint"}},
"EndpointDeploymentStageprodB78BEEA0": {
"Type": "AWS::ApiGateway::Stage",
"Properties": {
"RestApiId": {"Ref": "EndpointEEF1FD8F"},
"DeploymentId": {"Ref": "EndpointDeployment318525DA37c0e38727e25b4317827bf43e918fbf"},
"StageName": "prod",
},
},
"Endpointproxy39E2174E": {
"Type": "AWS::ApiGateway::Resource",
"Properties": {
"ParentId": {"Fn::GetAtt": ["EndpointEEF1FD8F", "RootResourceId"]},
"PathPart": "{proxy+}",
"RestApiId": {"Ref": "EndpointEEF1FD8F"},
},
},
"EndpointproxyANYC09721C5": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "ANY",
"ResourceId": {"Ref": "Endpointproxy39E2174E"},
"RestApiId": {"Ref": "EndpointEEF1FD8F"},
"AuthorizationType": "NONE",
"Integration": {
"IntegrationHttpMethod": "POST",
"Type": "AWS_PROXY",
"Uri": {
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":apigateway:",
{"Ref": "AWS::Region"},
"lambda:path/2015-03-31/functions/",
{"Fn::GetAtt": ["HelloHandler2E4FBA4D", "Arn"]},
"/invocations",
],
]
},
},
},
},
"EndpointANY485C938B": {
"Type": "AWS::ApiGateway::Method",
"Properties": {
"HttpMethod": "ANY",
"ResourceId": {"Fn::GetAtt": ["EndpointEEF1FD8F", "RootResourceId"]},
"RestApiId": {"Ref": "EndpointEEF1FD8F"},
"AuthorizationType": "NONE",
"Integration": {
"IntegrationHttpMethod": "POST",
"Type": "AWS_PROXY",
"Uri": {
"Fn::Join": [
"",
[
"arn:",
{"Ref": "AWS::Partition"},
":apigateway:",
{"Ref": "AWS::Region"},
"lambda:path/2015-03-31/functions/",
{"Fn::GetAtt": ["HelloHandler2E4FBA4D", "Arn"]},
"/invocations",
],
]
},
},
},
},
},
"Parameters": {
"HelloHandlerCodeS3Bucket4359A483": {
"Type": "String",
"Description": 'S3 bucket for asset "CdkWorkshopStack/HelloHandler/Code"',
},
"HelloHandlerCodeS3VersionKey07D12610": {
"Type": "String",
"Description": 'S3 key for asset version "CdkWorkshopStack/HelloHandler/Code"',
},
},
"Outputs": {
"Endpoint8024A810": {
"Value": {
"Fn::Join": [
"",
[
"https://",
{"Ref": "EndpointEEF1FD8F"},
".execute-api.",
{"Ref": "AWS::Region"},
".",
{"Ref": "AWS::URLSuffix"},
"/",
{"Ref": "EndpointDeploymentStageprodB78BEEA0"},
"/",
],
]
},
"Export": {"Name": "CdkWorkshopStack:Endpoint8024A810"},
}
},
}
provider = ApiProvider(template)
proxy_paths = [Route(path="/{proxy+}", methods=Route.ANY_HTTP_METHODS, function_name="HelloHandler2E4FBA4D")]
root_paths = [Route(path="/", methods=Route.ANY_HTTP_METHODS, function_name="HelloHandler2E4FBA4D")]
self.assertCountEqual(provider.routes, proxy_paths + root_paths)
class TestCloudFormationProviderWithApiGatewayV2(TestCase):
def setUp(self):
self.input_routes = [
Route(path="/path1", methods=["GET", "POST"], function_name="SamFunc1"),
Route(path="/path2", methods=["PUT", "GET"], function_name="SamFunc1"),
Route(path="/path3", methods=["DELETE"], function_name="SamFunc1"),
]
def test_with_no_apis(self):
template = {"Resources": {"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {}}}}
provider = ApiProvider(template)
self.assertEqual(provider.routes, [])
def test_with_inline_swagger_apis(self):
template = {
"Resources": {
"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {"Body": make_swagger(self.input_routes)}}
}
}
provider = ApiProvider(template)
self.assertCountEqual(self.input_routes, provider.routes)
def test_with_swagger_as_local_file(self):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp:
filename = fp.name
swagger = make_swagger(self.input_routes)
json.dump(swagger, fp)
fp.flush()
template = {
"Resources": {"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {"BodyS3Location": filename}}}
}
provider = ApiProvider(template)
self.assertCountEqual(self.input_routes, provider.routes)
def test_body_with_swagger_as_local_file_expect_fail(self):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp:
filename = fp.name
swagger = make_swagger(self.input_routes)
json.dump(swagger, fp)
fp.flush()
template = {"Resources": {"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {"Body": filename}}}}
self.assertRaises(Exception, ApiProvider, template)
@patch("samcli.lib.providers.cfn_base_api_provider.SwaggerReader")
def test_with_swagger_as_both_body_and_uri_called(self, SwaggerReaderMock):
body = {"some": "body"}
filename = "somefile.txt"
template = {
"Resources": {
"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {"BodyS3Location": filename, "Body": body}}
}
}
SwaggerReaderMock.return_value.read.return_value = make_swagger(self.input_routes)
cwd = "foo"
provider = ApiProvider(template, cwd=cwd)
self.assertCountEqual(self.input_routes, provider.routes)
SwaggerReaderMock.assert_called_with(definition_body=body, definition_uri=filename, working_dir=cwd)
def test_swagger_with_any_method(self):
routes = [Route(path="$default", methods=["any"], function_name="SamFunc1")]
expected_routes = [
Route(
path="$default",
methods=["GET", "DELETE", "PUT", "POST", "HEAD", "OPTIONS", "PATCH"],
function_name="SamFunc1",
)
]
template = {
"Resources": {"Api1": {"Type": "AWS::ApiGatewayV2::Api", "Properties": {"Body": make_swagger(routes)}}}
}
provider = ApiProvider(template)
self.assertEqual(expected_routes, provider.routes)
def test_with_quick_create_default_route(self):
expected_routes = [
Route(
path="$default",
methods=["X-AMAZON-APIGATEWAY-ANY-METHOD"],
function_name="SamFunc1",
)
]
template = {
"Resources": {
"Api1": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"Target": "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1"
":123456789012:function:SamFunc1/invocations",
"ProtocolType": "HTTP",
},
}
}
}
provider = ApiProvider(template)
self.assertEqual(expected_routes, provider.routes)
def test_with_quick_create_defined_route(self):
expected_routes = [
Route(
path="/path1",
methods=["GET"],
function_name="SamFunc1",
)
]
template = {
"Resources": {
"Api1": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"Target": "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1"
":123456789012:function:SamFunc1/invocations",
"RouteKey": "GET /path1",
"ProtocolType": "HTTP",
},
}
}
}
provider = ApiProvider(template)
self.assertEqual(expected_routes, provider.routes)
def test_with_cors(self):
expected_cors = Cors(
allow_origin="https://example.com",
allow_methods="GET,OPTIONS",
allow_headers="x-apigateway-header",
max_age=600,
)
template = {
"Resources": {
"Api1": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"CorsConfiguration": {
"AllowHeaders": ["x-apigateway-header"],
"AllowMethods": ["GET"],
"AllowOrigins": ["https://example.com"],
"MaxAge": 600,
}
},
}
}
}
provider = ApiProvider(template)
self.assertEqual(expected_cors, provider.api.cors)
class TestCloudFormationProviderWithApiGatewayV2Route(TestCase):
def test_basic_http_api_routes(self):
template = {
"Resources": {
"TestHttpApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {"name": "testHttpApi", "ProtocolType": "HTTP"},
},
"HttpApiRoute": {
"Type": "AWS::ApiGatewayV2::Route",
"Properties": {
"ApiId": "TestHttpApi",
"RouteKey": "POST /{proxy+}",
"Target": "integrations/HttpApiIntegration",
},
},
"HttpApiIntegration": {
"Type": "AWS::ApiGatewayV2::Integration",
"Properties": {
"ApiId": "TestHttpApi",
"PayloadFormatVersion": "2.0",
"IntegrationUri": "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
"arn:aws:lambda:us-east-1:123456789012:function:SamFunc1/invocations",
},
},
}
}
provider = ApiProvider(template)
self.assertEqual(provider.routes, [Route(function_name="SamFunc1", path="/{proxy+}", methods=["POST"])])
def test_http_api_routes_no_integration(self):
template = {
"Resources": {
"TestHttpApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {"name": "testHttpApi", "ProtocolType": "HTTP"},
},
"HttpApiRoute": {
"Type": "AWS::ApiGatewayV2::Route",
"Properties": {"ApiId": "TestHttpApi", "RouteKey": "POST /{proxy+}"},
},
}
}
provider = ApiProvider(template)
self.assertEqual(provider.routes, [])
def test_http_api_default_route(self):
template = {
"Resources": {
"TestHttpApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {"name": "testHttpApi", "ProtocolType": "HTTP"},
},
"HttpApiRoute": {
"Type": "AWS::ApiGatewayV2::Route",
"Properties": {
"ApiId": "TestHttpApi",
"RouteKey": "$default",
"Target": "integrations/HttpApiIntegration",
},
},
"HttpApiIntegration": {
"Type": "AWS::ApiGatewayV2::Integration",
"Properties": {
"ApiId": "TestHttpApi",
"PayloadFormatVersion": "2.0",
"IntegrationUri": "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
"arn:aws:lambda:us-east-1:123456789012:function:SamFunc1/invocations",
},
},
}
}
provider = ApiProvider(template)
self.assertEqual(
provider.routes,
[Route(function_name="SamFunc1", path="$default", methods=["X-AMAZON-APIGATEWAY-ANY-METHOD"])],
)
def test_http_api_wrong_route(self):
template = {
"Resources": {
"TestHttpApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {"name": "testHttpApi", "ProtocolType": "HTTP"},
},
"HttpApiRoute": {
"Type": "AWS::ApiGatewayV2::Route",
"Properties": {
"ApiId": "TestHttpApi",
"RouteKey": "GET",
"Target": "integrations/HttpApiIntegration",
},
},
"HttpApiIntegration": {
"Type": "AWS::ApiGatewayV2::Integration",
"Properties": {
"ApiId": "TestHttpApi",
"PayloadFormatVersion": "2.0",
"IntegrationUri": "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
"arn:aws:lambda:us-east-1:123456789012:function:SamFunc1/invocations",
},
},
}
}
self.assertRaises(Exception, ApiProvider, template)
class TestCloudFormationWithApiGatewayV2Stage(TestCase):
def test_provider_parse_stage_name(self):
template = {
"Resources": {
"Stage": {"Type": "AWS::ApiGatewayV2::Stage", "Properties": {"StageName": "dev", "ApiId": "TestApi"}},
"TestApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
}
}
}
},
},
}
}
provider = ApiProvider(template)
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
self.assertIn(route1, provider.routes)
self.assertEqual(provider.api.stage_name, "dev")
self.assertEqual(provider.api.stage_variables, None)
def test_provider_stage_variables(self):
template = {
"Resources": {
"Stage": {
"Type": "AWS::ApiGatewayV2::Stage",
"Properties": {
"StageName": "dev",
"Variables": {"vis": "data", "random": "test", "foo": "bar"},
"ApiId": "TestApi",
},
},
"TestApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
}
}
}
},
},
}
}
provider = ApiProvider(template)
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
self.assertIn(route1, provider.routes)
self.assertEqual(provider.api.stage_name, "dev")
self.assertEqual(provider.api.stage_variables, {"vis": "data", "random": "test", "foo": "bar"})
def test_multi_stage_get_all(self):
resources = OrderedDict(
{
"ProductionApi": {
"Type": "AWS::ApiGatewayV2::Api",
"Properties": {
"Body": {
"paths": {
"/path": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
},
"/anotherpath": {
"post": {
"x-amazon-apigateway-integration": {
"httpMethod": "POST",
"type": "aws_proxy",
"uri": {
"Fn::Sub": "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31"
"/functions/${NoApiEventFunction.Arn}/invocations"
},
"responses": {},
}
}
},
}
}
},
}
}
)
resources["StageDev"] = {
"Type": "AWS::ApiGatewayV2::Stage",
"Properties": {
"StageName": "dev",
"Variables": {"vis": "data", "random": "test", "foo": "bar"},
"ApiId": "ProductionApi",
},
}
resources["StageProd"] = {
"Type": "AWS::ApiGatewayV2::Stage",
"Properties": {
"StageName": "Production",
"Variables": {"vis": "prod data", "random": "test", "foo": "bar"},
"ApiId": "ProductionApi",
},
}
template = {"Resources": resources}
provider = ApiProvider(template)
result = [f for f in provider.get_all()]
routes = result[0].routes
route1 = Route(path="/path", methods=["GET"], function_name="NoApiEventFunction")
route2 = Route(path="/anotherpath", methods=["POST"], function_name="NoApiEventFunction")
self.assertEqual(len(routes), 2)
self.assertIn(route1, routes)
self.assertIn(route2, routes)
self.assertEqual(provider.api.stage_name, "Production")
self.assertEqual(provider.api.stage_variables, {"vis": "prod data", "random": "test", "foo": "bar"})
| true | true |
f7ff9829419919ac745e63240c388ce1ca72f8c0 | 4,385 | py | Python | mdn_cell/mdn_train_K4_fullnet.py | fregu856/ebms_proposals | e3e1cc35d5419ca61e25decb243a0b8bebd0d700 | [
"MIT"
] | 2 | 2021-11-07T23:00:38.000Z | 2021-12-14T16:17:50.000Z | mdn_cell/mdn_train_K4_fullnet.py | fregu856/ebms_proposals | e3e1cc35d5419ca61e25decb243a0b8bebd0d700 | [
"MIT"
] | null | null | null | mdn_cell/mdn_train_K4_fullnet.py | fregu856/ebms_proposals | e3e1cc35d5419ca61e25decb243a0b8bebd0d700 | [
"MIT"
] | null | null | null | # camera-ready
from datasets import DatasetTrain # (this needs to be imported before torch, because cv2 needs to be imported before torch for some reason)
from mdn_model_K4 import ToyNet
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.distributions
import math
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import cv2
# NOTE! change this to not overwrite all log data when you train the model:
model_id = "mdn_train_K4_fullnet"
num_epochs = 75
batch_size = 32
learning_rate = 0.001
train_dataset = DatasetTrain()
num_train_batches = int(len(train_dataset)/batch_size)
print ("num_train_batches:", num_train_batches)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
num_models = 20
for i in range(num_models):
network = ToyNet(model_id + "_%d" % i, project_dir="/root/ebms_proposals/mdn_cell").cuda()
K = network.noise_net.K
print (K)
optimizer = torch.optim.Adam(network.parameters(), lr=learning_rate)
epoch_losses_train = []
for epoch in range(num_epochs):
print ("###########################")
print ("######## NEW EPOCH ########")
print ("###########################")
print ("model: %d/%d | epoch: %d/%d" % (i+1, num_models, epoch+1, num_epochs))
network.train() # (set in training mode, this affects BatchNorm and dropout)
batch_losses = []
for step, (xs, ys) in enumerate(train_loader):
xs = xs.cuda() # (shape: (batch_size, 3, img_size, img_size))
ys = ys.cuda().unsqueeze(1) # (shape: (batch_size, 1))
x_features = network.feature_net(xs) # (shape: (batch_size, hidden_dim))
if epoch < 20:
####################################################################
# make sure we do NOT train the resnet feature extractor:
####################################################################
x_features = x_features.detach()
####################################################################
means, log_sigma2s, weights = network.noise_net(x_features) # (all have shape: (batch_size, K))
sigmas = torch.exp(log_sigma2s/2.0) # (shape: (batch_size, K))
q_distr = torch.distributions.normal.Normal(loc=means, scale=sigmas)
q_ys_K = torch.exp(q_distr.log_prob(torch.transpose(ys, 1, 0).unsqueeze(2))) # (shape: (1, batch_size, K))
q_ys = torch.sum(weights.unsqueeze(0)*q_ys_K, dim=2) # (shape: (1, batch_size))
q_ys = q_ys.squeeze(0) # (shape: (batch_size))
########################################################################
# compute loss:
########################################################################
loss = torch.mean(-torch.log(q_ys))
loss_value = loss.data.cpu().numpy()
batch_losses.append(loss_value)
########################################################################
# optimization step:
########################################################################
optimizer.zero_grad() # (reset gradients)
loss.backward() # (compute gradients)
optimizer.step() # (perform optimization step)
# print ("model: %d/%d | epoch: %d/%d | step: %d/%d | loss: %g" % (i, num_models-1, epoch+1, num_epochs, step+1, num_train_batches, loss_value))
epoch_loss = np.mean(batch_losses)
epoch_losses_train.append(epoch_loss)
with open("%s/epoch_losses_train.pkl" % network.model_dir, "wb") as file:
pickle.dump(epoch_losses_train, file)
print ("train loss: %g" % epoch_loss)
plt.figure(1)
plt.plot(epoch_losses_train, "k^")
plt.plot(epoch_losses_train, "k")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.title("train loss per epoch")
plt.savefig("%s/epoch_losses_train.png" % network.model_dir)
plt.close(1)
# save the model weights to disk:
checkpoint_path = network.checkpoints_dir + "/model_" + model_id +"_epoch_" + str(epoch+1) + ".pth"
torch.save(network.state_dict(), checkpoint_path)
| 40.981308 | 162 | 0.549373 |
from datasets import DatasetTrain
from mdn_model_K4 import ToyNet
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.distributions
import math
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import cv2
model_id = "mdn_train_K4_fullnet"
num_epochs = 75
batch_size = 32
learning_rate = 0.001
train_dataset = DatasetTrain()
num_train_batches = int(len(train_dataset)/batch_size)
print ("num_train_batches:", num_train_batches)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
num_models = 20
for i in range(num_models):
network = ToyNet(model_id + "_%d" % i, project_dir="/root/ebms_proposals/mdn_cell").cuda()
K = network.noise_net.K
print (K)
optimizer = torch.optim.Adam(network.parameters(), lr=learning_rate)
epoch_losses_train = []
for epoch in range(num_epochs):
print ("###########################")
print ("######## NEW EPOCH ########")
print ("###########################")
print ("model: %d/%d | epoch: %d/%d" % (i+1, num_models, epoch+1, num_epochs))
network.train()
batch_losses = []
for step, (xs, ys) in enumerate(train_loader):
xs = xs.cuda()
ys = ys.cuda().unsqueeze(1)
x_features = network.feature_net(xs)
if epoch < 20:
| true | true |
f7ff9b6c52541d9375bf8ae450b329c17fa442d9 | 3,545 | py | Python | 16-ratios/test.py | Yu-Nie/YOLOV3 | 09db1d551d293dcfa7a638fd6693920840d28a74 | [
"MIT"
] | null | null | null | 16-ratios/test.py | Yu-Nie/YOLOV3 | 09db1d551d293dcfa7a638fd6693920840d28a74 | [
"MIT"
] | null | null | null | 16-ratios/test.py | Yu-Nie/YOLOV3 | 09db1d551d293dcfa7a638fd6693920840d28a74 | [
"MIT"
] | null | null | null | from torch.utils.data import DataLoader
import utils.gpu as gpu
from model.yolov3 import Yolov3
from tqdm import tqdm
from utils.tools import *
from eval.evaluator import Evaluator
import argparse
import os
import config.yolov3_config_voc as cfg
from utils.visualize import *
# import os
# os.environ["CUDA_VISIBLE_DEVICES"]='0'
class Tester(object):
def __init__(self,
weight_path=None,
gpu_id=0,
img_size=544,
visiual=None,
eval=False
):
self.img_size = img_size
self.__num_class = cfg.DATA["NUM"]
self.__conf_threshold = cfg.TEST["CONF_THRESH"]
self.__nms_threshold = cfg.TEST["NMS_THRESH"]
self.__device = gpu.select_device(gpu_id)
self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
self.__flip_test = cfg.TEST["FLIP_TEST"]
self.__visiual = visiual
self.__eval = eval
self.__classes = cfg.DATA["CLASSES"]
self.__model = Yolov3().to(self.__device)
self.__load_model_weights(weight_path)
self.__evalter = Evaluator(self.__model, visiual=False)
def __load_model_weights(self, weight_path):
print("loading weight file from : {}".format(weight_path))
weight = os.path.join(weight_path)
chkpt = torch.load(weight, map_location=self.__device)
self.__model.load_state_dict(chkpt)
print("loading weight file is done")
del chkpt
def test(self):
if self.__visiual:
imgs = os.listdir(self.__visiual)
for v in imgs:
path = os.path.join(self.__visiual, v)
print("test images : {}".format(path))
img = cv2.imread(path)
assert img is not None
bboxes_prd = self.__evalter.get_bbox(img)
if bboxes_prd.shape[0] != 0:
boxes = bboxes_prd[..., :4]
class_inds = bboxes_prd[..., 5].astype(np.int32)
scores = bboxes_prd[..., 4]
ratios = bboxes_prd[..., 6:]
visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes, ratios=ratios)
path = os.path.join(cfg.PROJECT_PATH, "data/predictions/{}".format(v))
cv2.imwrite(path, img)
print("saved images : {}".format(path))
if self.__eval:
mAP = 0
print('*' * 20 + "Validate" + '*' * 20)
with torch.no_grad():
APs = Evaluator(self.__model).APs_voc(self.__multi_scale_test, self.__flip_test)
for i in APs:
print("{} --> mAP : {}".format(i, APs[i]))
mAP += APs[i]
mAP = mAP / self.__num_class
print('mAP:%g' % (mAP))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--weight_path', type=str, default=r"C:\Users\mrcry\Documents\weight\best_all.pt", help='weight file path')
parser.add_argument('--visiual', type=str, default='./data/test', help='test data path or None')
parser.add_argument('--eval', action='store_true', default=True, help='eval the mAP or not')
parser.add_argument('--gpu_id', type=int, default=0, help='gpu id')
opt = parser.parse_args()
Tester( weight_path=opt.weight_path,
gpu_id=opt.gpu_id,
eval=opt.eval,
visiual=opt.visiual).test()
| 34.086538 | 136 | 0.5811 | from torch.utils.data import DataLoader
import utils.gpu as gpu
from model.yolov3 import Yolov3
from tqdm import tqdm
from utils.tools import *
from eval.evaluator import Evaluator
import argparse
import os
import config.yolov3_config_voc as cfg
from utils.visualize import *
class Tester(object):
def __init__(self,
weight_path=None,
gpu_id=0,
img_size=544,
visiual=None,
eval=False
):
self.img_size = img_size
self.__num_class = cfg.DATA["NUM"]
self.__conf_threshold = cfg.TEST["CONF_THRESH"]
self.__nms_threshold = cfg.TEST["NMS_THRESH"]
self.__device = gpu.select_device(gpu_id)
self.__multi_scale_test = cfg.TEST["MULTI_SCALE_TEST"]
self.__flip_test = cfg.TEST["FLIP_TEST"]
self.__visiual = visiual
self.__eval = eval
self.__classes = cfg.DATA["CLASSES"]
self.__model = Yolov3().to(self.__device)
self.__load_model_weights(weight_path)
self.__evalter = Evaluator(self.__model, visiual=False)
def __load_model_weights(self, weight_path):
print("loading weight file from : {}".format(weight_path))
weight = os.path.join(weight_path)
chkpt = torch.load(weight, map_location=self.__device)
self.__model.load_state_dict(chkpt)
print("loading weight file is done")
del chkpt
def test(self):
if self.__visiual:
imgs = os.listdir(self.__visiual)
for v in imgs:
path = os.path.join(self.__visiual, v)
print("test images : {}".format(path))
img = cv2.imread(path)
assert img is not None
bboxes_prd = self.__evalter.get_bbox(img)
if bboxes_prd.shape[0] != 0:
boxes = bboxes_prd[..., :4]
class_inds = bboxes_prd[..., 5].astype(np.int32)
scores = bboxes_prd[..., 4]
ratios = bboxes_prd[..., 6:]
visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.__classes, ratios=ratios)
path = os.path.join(cfg.PROJECT_PATH, "data/predictions/{}".format(v))
cv2.imwrite(path, img)
print("saved images : {}".format(path))
if self.__eval:
mAP = 0
print('*' * 20 + "Validate" + '*' * 20)
with torch.no_grad():
APs = Evaluator(self.__model).APs_voc(self.__multi_scale_test, self.__flip_test)
for i in APs:
print("{} --> mAP : {}".format(i, APs[i]))
mAP += APs[i]
mAP = mAP / self.__num_class
print('mAP:%g' % (mAP))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--weight_path', type=str, default=r"C:\Users\mrcry\Documents\weight\best_all.pt", help='weight file path')
parser.add_argument('--visiual', type=str, default='./data/test', help='test data path or None')
parser.add_argument('--eval', action='store_true', default=True, help='eval the mAP or not')
parser.add_argument('--gpu_id', type=int, default=0, help='gpu id')
opt = parser.parse_args()
Tester( weight_path=opt.weight_path,
gpu_id=opt.gpu_id,
eval=opt.eval,
visiual=opt.visiual).test()
| true | true |
f7ff9e691e95a169021cf669c7b683fe01f402df | 7,834 | py | Python | cub_attr1.py | elviswf/pytorch_cv | a7f11f857a0c1d5e5a807aeed5e594659212fba0 | [
"Apache-2.0"
] | null | null | null | cub_attr1.py | elviswf/pytorch_cv | a7f11f857a0c1d5e5a807aeed5e594659212fba0 | [
"Apache-2.0"
] | null | null | null | cub_attr1.py | elviswf/pytorch_cv | a7f11f857a0c1d5e5a807aeed5e594659212fba0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Time : 2017/12/4 15:42
@Author : Elvis
cub.py
watch --color -n1 gpustat -cpu
CUDA_VISIBLE_DEVICES=3 python cub_attr1.py
zsl_resnet18_fc00 : Sigmoid + dropout 0.5 74.789% (1329/1777) ZSL_Acc: 53.354% (1583/2967) 200 epoch
zsl_resnet18_fc01 : Sigmoid with fc pretrain Acc: 73.044% (1298/1777) ZSL_Acc: 24.537% (728/2967)
zsl_resnet18_fc02 : Sigmoid with fc pretrain + dropout 0.5 full 150 60 epoch: Acc: 50.792% (1507/2967)
zsl_resnet18_fc03 : Sigmoid + dropout 0.5 weight_decay=0.005 full 150 60 epoch: Acc: 50.792% (1507/2967)
100 epoch: Acc: 53.758% (1595/2967) 192 epoch: Acc: 54.803% (1626/2967)
zsl_resnet50_fc00 : Sigmoid + dropout 0.5 weight_decay=0.005 full 150 44epoch Acc: 57.162% (1696/2967)
Acc: 75.842% (6690/8821) | Test Acc: 95.948% (1705/1777)
zsl_resnet50_fc01 : Sigmoid + dropout 0.5 weight_decay=0.005 half100
zsl_resnet50_fc02 : Sigmoid + dropout 0.5 weight_decay=0.005 full dropout 0.4 Acc: 58.140% (1725/2967) 24
zsl_resnet50_fc03 : Sigmoid + dropout 0.5 weight_decay=0.005 full dropout 0.25 Acc: 56.421% (1674/2967)
zsl_resnet50_fc04 : BN + Sigmoid + norm weight
"""
import torch
from torch import nn
from torch import optim
from torch.backends import cudnn
from torch.autograd import Variable
import os
import argparse
from data.data_loader import DataLoader
from models.zsl_resnet import attrCNN
from utils.logger import progress_bar
# from utils.param_count import torch_summarize, lr_scheduler
# import pickle
# Learning rate parameters
BASE_LR = 0.01
NUM_CLASSES = 150 # set the number of classes in your dataset
NUM_ATTR = 312
DATA_DIR = "/home/elvis/data/attribute/CUB_200_2011/zsl/trainval0"
BATCH_SIZE = 64
IMAGE_SIZE = 224
# MODEL_NAME = "zsl_resnet18_fc1"
# MODEL_NAME = "zsl_resnet18_fc1_end"
MODEL_NAME = "zsl_resnet50_fc04"
USE_GPU = torch.cuda.is_available()
MODEL_SAVE_FILE = MODEL_NAME + '.pth'
parser = argparse.ArgumentParser(description='PyTorch zsl_resnet18_attr1 Training')
parser.add_argument('--lr', default=BASE_LR, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', default=False, help='resume from checkpoint')
parser.add_argument('--data', default=DATA_DIR, type=str, help='file path of the dataset')
args = parser.parse_args()
best_acc = 0.
start_epoch = 0
print("Model: " + MODEL_NAME)
if args.resume:
print("==> Resuming from checkpoint...")
checkpoint = torch.load("./checkpoints/" + MODEL_SAVE_FILE)
net = checkpoint["net"]
best_acc = checkpoint["acc"]
start_epoch = checkpoint["epoch"]
optimizer = checkpoint["optimizer"]
else:
print("==> Building model...")
net = attrCNN(num_attr=312, num_classes=150)
# optimizer = optim.Adam(net.parameters())
# optimizer = optim.SGD(net.get_config_optim(BASE_LR / 10.),
# lr=BASE_LR,
# momentum=0.9,
# weight_decay=0.0005)
# print(torch_summarize(net))
# print(net)
if USE_GPU:
net.cuda()
# net = torch.nn.DataParallel(net.module, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
log = open("./log/" + MODEL_NAME + '_cub.txt', 'a')
print("==> Preparing data...")
data_loader = DataLoader(data_dir=args.data, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE)
inputs, classes = next(iter(data_loader.load_data()))
# out = torchvision.utils.make_grid(inputs)
# data_loader.show_image(out, title=[data_loader.data_classes[c] for c in classes])
train_loader = data_loader.load_data(data_set='train')
test_loader = data_loader.load_data(data_set='val')
criterion = nn.CrossEntropyLoss()
# def one_hot_emb(batch, depth=NUM_CLASSES):
# emb = nn.Embedding(depth, depth)
# emb.weight.data = torch.eye(depth)
# return emb(batch).data
def one_hot_emb(y, depth=NUM_CLASSES):
y = y.view((-1, 1))
one_hot = torch.FloatTensor(y.size(0), depth).zero_()
one_hot.scatter_(1, y, 1)
return one_hot
def train(epoch, net, optimizer):
print("\nEpoch: %d" % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
# optimizer = lr_scheduler(optimizer, epoch, init_lr=0.002, decay_epoch=start_epoch)
for batch_idx, (inputs, targets) in enumerate(train_loader):
if USE_GPU:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
optimizer.zero_grad()
out, attr = net(inputs)
loss = criterion(out, targets)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(out.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(train_loader), "Loss: %.3f | Acc: %.3f%% (%d/%d)"
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
log.write(str(epoch) + ' ' + str(correct / total) + ' ')
def test(epoch, net):
global best_acc
net.eval()
test_loss, correct, total, loss = 0, 0, 0, 0
for batch_idx, (inputs, targets) in enumerate(test_loader):
if USE_GPU:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
out, attr = net(inputs)
loss = criterion(out, targets)
test_loss = loss.data[0]
_, predicted = torch.max(out.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
acc = 100. * correct / total
progress_bar(batch_idx, len(test_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), acc, correct, total))
log.write(str(correct / total) + ' ' + str(test_loss) + '\n')
log.flush()
acc = 100. * correct / total
if epoch > 9 and acc > best_acc:
print("Saving checkpoint")
state = {
'net': net,
'acc': acc,
'epoch': epoch,
'optimizer': optimizer
}
if not os.path.isdir("checkpoints"):
os.mkdir('checkpoints')
torch.save(state, "./checkpoints/" + MODEL_SAVE_FILE)
best_acc = acc
for param in net.parameters():
param.requires_grad = False
optim_params = list(net.cnn.fc.parameters())
for param in optim_params:
param.requires_grad = True
epoch1 = 15
# optimizer = optim.Adagrad(optim_params, lr=0.001, weight_decay=0.005)
optimizer = optim.Adam(optim_params, weight_decay=0.005)
if start_epoch < epoch1:
for epoch in range(start_epoch, epoch1):
train(epoch, net, optimizer)
test(epoch, net)
start_epoch = epoch1
for param in net.cnn.parameters():
param.requires_grad = True
# fc_params = list(map(id, net.cnn.fc.parameters()))
# base_params = list(filter(lambda p: id(p) not in fc_params, net.cnn.parameters()))
# optimizer = optim.Adagrad([{'params': base_params},
# {'params': net.cnn.fc.parameters(), 'lr': 0.005}
# ], lr=0.0005, weight_decay=0.005)
# start_epoch = 0
# optimizer = optim.Adam(net.cnn.fc.parameters(), weight_decay=0.0005)
# optimizer = torch.optim.SGD([
# {'params': base_params},
# {'params': net.cnn.fc.parameters(), 'lr': 1}
# ], lr=1e-4, momentum=0.9, weight_decay=0.0005)
from zeroshot.cub_test import zsl_test, gzsl_test
import copy
optimizer = optim.Adagrad(net.cnn.parameters(), lr=0.001, weight_decay=0.005)
for epoch in range(start_epoch, 500):
train(epoch, net, optimizer)
test(epoch, net)
if epoch > 10:
net1 = copy.deepcopy(net)
zsl_test(epoch, net1, optimizer)
del net1
# net2 = copy.deepcopy(net)
# gzsl_test(epoch, net2, optimizer)
# del net2
log.close()
| 36.268519 | 107 | 0.654583 |
import torch
from torch import nn
from torch import optim
from torch.backends import cudnn
from torch.autograd import Variable
import os
import argparse
from data.data_loader import DataLoader
from models.zsl_resnet import attrCNN
from utils.logger import progress_bar
BASE_LR = 0.01
NUM_CLASSES = 150
NUM_ATTR = 312
DATA_DIR = "/home/elvis/data/attribute/CUB_200_2011/zsl/trainval0"
BATCH_SIZE = 64
IMAGE_SIZE = 224
MODEL_NAME = "zsl_resnet50_fc04"
USE_GPU = torch.cuda.is_available()
MODEL_SAVE_FILE = MODEL_NAME + '.pth'
parser = argparse.ArgumentParser(description='PyTorch zsl_resnet18_attr1 Training')
parser.add_argument('--lr', default=BASE_LR, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', default=False, help='resume from checkpoint')
parser.add_argument('--data', default=DATA_DIR, type=str, help='file path of the dataset')
args = parser.parse_args()
best_acc = 0.
start_epoch = 0
print("Model: " + MODEL_NAME)
if args.resume:
print("==> Resuming from checkpoint...")
checkpoint = torch.load("./checkpoints/" + MODEL_SAVE_FILE)
net = checkpoint["net"]
best_acc = checkpoint["acc"]
start_epoch = checkpoint["epoch"]
optimizer = checkpoint["optimizer"]
else:
print("==> Building model...")
net = attrCNN(num_attr=312, num_classes=150)
if USE_GPU:
net.cuda()
cudnn.benchmark = True
log = open("./log/" + MODEL_NAME + '_cub.txt', 'a')
print("==> Preparing data...")
data_loader = DataLoader(data_dir=args.data, image_size=IMAGE_SIZE, batch_size=BATCH_SIZE)
inputs, classes = next(iter(data_loader.load_data()))
train_loader = data_loader.load_data(data_set='train')
test_loader = data_loader.load_data(data_set='val')
criterion = nn.CrossEntropyLoss()
def one_hot_emb(y, depth=NUM_CLASSES):
y = y.view((-1, 1))
one_hot = torch.FloatTensor(y.size(0), depth).zero_()
one_hot.scatter_(1, y, 1)
return one_hot
def train(epoch, net, optimizer):
print("\nEpoch: %d" % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(train_loader):
if USE_GPU:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
optimizer.zero_grad()
out, attr = net(inputs)
loss = criterion(out, targets)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(out.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(train_loader), "Loss: %.3f | Acc: %.3f%% (%d/%d)"
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
log.write(str(epoch) + ' ' + str(correct / total) + ' ')
def test(epoch, net):
global best_acc
net.eval()
test_loss, correct, total, loss = 0, 0, 0, 0
for batch_idx, (inputs, targets) in enumerate(test_loader):
if USE_GPU:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
out, attr = net(inputs)
loss = criterion(out, targets)
test_loss = loss.data[0]
_, predicted = torch.max(out.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
acc = 100. * correct / total
progress_bar(batch_idx, len(test_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), acc, correct, total))
log.write(str(correct / total) + ' ' + str(test_loss) + '\n')
log.flush()
acc = 100. * correct / total
if epoch > 9 and acc > best_acc:
print("Saving checkpoint")
state = {
'net': net,
'acc': acc,
'epoch': epoch,
'optimizer': optimizer
}
if not os.path.isdir("checkpoints"):
os.mkdir('checkpoints')
torch.save(state, "./checkpoints/" + MODEL_SAVE_FILE)
best_acc = acc
for param in net.parameters():
param.requires_grad = False
optim_params = list(net.cnn.fc.parameters())
for param in optim_params:
param.requires_grad = True
epoch1 = 15
optimizer = optim.Adam(optim_params, weight_decay=0.005)
if start_epoch < epoch1:
for epoch in range(start_epoch, epoch1):
train(epoch, net, optimizer)
test(epoch, net)
start_epoch = epoch1
for param in net.cnn.parameters():
param.requires_grad = True
from zeroshot.cub_test import zsl_test, gzsl_test
import copy
optimizer = optim.Adagrad(net.cnn.parameters(), lr=0.001, weight_decay=0.005)
for epoch in range(start_epoch, 500):
train(epoch, net, optimizer)
test(epoch, net)
if epoch > 10:
net1 = copy.deepcopy(net)
zsl_test(epoch, net1, optimizer)
del net1
log.close()
| true | true |
f7ff9e7a1ee17cc4704a27a27776b87141cc4a26 | 1,102 | py | Python | prody/chromatin/__init__.py | grandevelia/ProDy | 7c725640a94c16543423c0756388998cb86a97ae | [
"MIT"
] | 210 | 2015-01-26T08:17:56.000Z | 2022-03-30T01:40:34.000Z | prody/chromatin/__init__.py | grandevelia/ProDy | 7c725640a94c16543423c0756388998cb86a97ae | [
"MIT"
] | 555 | 2015-01-05T21:51:54.000Z | 2022-03-31T16:51:41.000Z | prody/chromatin/__init__.py | grandevelia/ProDy | 7c725640a94c16543423c0756388998cb86a97ae | [
"MIT"
] | 99 | 2015-02-09T18:00:39.000Z | 2022-03-07T12:52:51.000Z | # -*- coding: utf-8 -*-
"""This module defines classes and functions to parse and write Hi-C data files,
visualize, and perform elastic network analysis on Hi-C data.
Parse/write Hi-C files
=====================
Following ProDy functions are for parsing and writing Hi-C files:
* :func:`.parseHiC` - parse Hi-C data file
* :func:`.parseHiCStream` - parse Hi-C data stream
* :func:`.writeMap` - write Hi-C data to text file
Visualize Hi-C data
=====================
Following ProDy functions are for visualizing Hi-C data:
* :func:`.showMap` - show Hi-C contact map
* :func:`.showDomains` - show Hi-C structural domains
Save/load HiC class
-------------------
* :func:`.saveHiC`
* :func:`.loadHiC`
"""
import prody
LOGGER = prody.LOGGER
SETTINGS = prody.SETTINGS
__all__ = []
from . import hic
from .hic import *
__all__.extend(hic.__all__)
from . import norm
from .norm import *
__all__.extend(norm.__all__)
from . import cluster
from .cluster import *
__all__.extend(cluster.__all__)
from . import functions
from .functions import *
__all__.extend(functions.__all__)
| 21.607843 | 81 | 0.676951 |
import prody
LOGGER = prody.LOGGER
SETTINGS = prody.SETTINGS
__all__ = []
from . import hic
from .hic import *
__all__.extend(hic.__all__)
from . import norm
from .norm import *
__all__.extend(norm.__all__)
from . import cluster
from .cluster import *
__all__.extend(cluster.__all__)
from . import functions
from .functions import *
__all__.extend(functions.__all__)
| true | true |
f7ff9ef7751a1e2fa272c6a3b3e1c6487709744c | 1,976 | py | Python | sequence_search/db/tests/test_base.py | RNAcentral/sequence_search | e0319e384cc9dea017f165e2c4c5143ee232f9fd | [
"Apache-2.0"
] | 2 | 2019-02-13T16:33:46.000Z | 2019-10-22T16:27:00.000Z | sequence_search/db/tests/test_base.py | RNAcentral/sequence_search | e0319e384cc9dea017f165e2c4c5143ee232f9fd | [
"Apache-2.0"
] | 110 | 2019-02-15T15:06:05.000Z | 2022-03-04T16:03:38.000Z | sequence_search/db/tests/test_base.py | RNAcentral/sequence_search | e0319e384cc9dea017f165e2c4c5143ee232f9fd | [
"Apache-2.0"
] | 1 | 2021-06-30T21:39:35.000Z | 2021-06-30T21:39:35.000Z | """
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from aiohttp import web, web_middlewares
from aiohttp.test_utils import AioHTTPTestCase
from sequence_search.db.models import init_pg
from sequence_search.db.settings import get_postgres_credentials
class DBTestCase(AioHTTPTestCase):
"""
Base unit-test for all the tests in db. Run all tests with the following command:
ENVIRONMENT=TEST python -m unittest sequence_search.db.tests
"""
async def get_application(self):
logging.basicConfig(level=logging.ERROR) # subdue messages like 'DEBUG:asyncio:Using selector: KqueueSelector'
app = web.Application(middlewares=[web_middlewares.normalize_path_middleware(append_slash=True)])
settings = get_postgres_credentials(ENVIRONMENT='TEST')
app.update(name='test', settings=settings)
app.on_startup.append(init_pg)
return app
async def tearDownAsync(self):
async with self.app['engine'].acquire() as connection:
await connection.execute('DELETE FROM job_chunk_results')
await connection.execute('DELETE FROM job_chunks')
await connection.execute('DELETE FROM infernal_result')
await connection.execute('DELETE FROM infernal_job')
await connection.execute('DELETE FROM jobs')
await connection.execute('DELETE FROM consumer')
await super().tearDownAsync()
| 42.042553 | 119 | 0.742915 |
import logging
from aiohttp import web, web_middlewares
from aiohttp.test_utils import AioHTTPTestCase
from sequence_search.db.models import init_pg
from sequence_search.db.settings import get_postgres_credentials
class DBTestCase(AioHTTPTestCase):
async def get_application(self):
logging.basicConfig(level=logging.ERROR)
app = web.Application(middlewares=[web_middlewares.normalize_path_middleware(append_slash=True)])
settings = get_postgres_credentials(ENVIRONMENT='TEST')
app.update(name='test', settings=settings)
app.on_startup.append(init_pg)
return app
async def tearDownAsync(self):
async with self.app['engine'].acquire() as connection:
await connection.execute('DELETE FROM job_chunk_results')
await connection.execute('DELETE FROM job_chunks')
await connection.execute('DELETE FROM infernal_result')
await connection.execute('DELETE FROM infernal_job')
await connection.execute('DELETE FROM jobs')
await connection.execute('DELETE FROM consumer')
await super().tearDownAsync()
| true | true |
f7ff9f287cc5eace3114bff34cc4d5ea8716e13d | 4,876 | py | Python | fdk_client/application/models/FeedbackValidator.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/application/models/FeedbackValidator.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/application/models/FeedbackValidator.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | """Class Validators."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class FeedbackValidator:
class createAbuseReport(BaseSchema):
pass
class updateAbuseReport(BaseSchema):
pass
class getAbuseReports(BaseSchema):
entity_id = fields.Str(required=False)
entity_type = fields.Str(required=False)
id = fields.Str(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class getAttributes(BaseSchema):
page_no = fields.Int(required=False)
page_size = fields.Int(required=False)
class createAttribute(BaseSchema):
pass
class getAttribute(BaseSchema):
slug = fields.Str(required=False)
class updateAttribute(BaseSchema):
slug = fields.Str(required=False)
class createComment(BaseSchema):
pass
class updateComment(BaseSchema):
pass
class getComments(BaseSchema):
entity_type = fields.Str(required=False)
id = fields.Str(required=False)
entity_id = fields.Str(required=False)
user_id = fields.Str(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class checkEligibility(BaseSchema):
entity_type = fields.Str(required=False)
entity_id = fields.Str(required=False)
class deleteMedia(BaseSchema):
ids = fields.List(fields.Str(required=False), required=False)
class createMedia(BaseSchema):
pass
class updateMedia(BaseSchema):
pass
class getMedias(BaseSchema):
entity_type = fields.Str(required=False)
entity_id = fields.Str(required=False)
id = fields.Str(required=False)
type = fields.Str(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class getReviewSummaries(BaseSchema):
entity_type = fields.Str(required=False)
entity_id = fields.Str(required=False)
id = fields.Str(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class createReview(BaseSchema):
pass
class updateReview(BaseSchema):
pass
class getReviews(BaseSchema):
entity_type = fields.Str(required=False)
entity_id = fields.Str(required=False)
id = fields.Str(required=False)
user_id = fields.Str(required=False)
media = fields.Str(required=False)
rating = fields.List(fields.Float(required=False), required=False)
attribute_rating = fields.List(fields.Str(required=False), required=False)
facets = fields.Boolean(required=False)
sort = fields.Str(required=False)
active = fields.Boolean(required=False)
approve = fields.Boolean(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class getTemplates(BaseSchema):
template_id = fields.Str(required=False)
entity_id = fields.Str(required=False)
entity_type = fields.Str(required=False)
class createQuestion(BaseSchema):
pass
class updateQuestion(BaseSchema):
pass
class getQuestionAndAnswers(BaseSchema):
entity_type = fields.Str(required=False)
entity_id = fields.Str(required=False)
id = fields.Str(required=False)
user_id = fields.Str(required=False)
show_answer = fields.Boolean(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class getVotes(BaseSchema):
id = fields.Str(required=False)
ref_type = fields.Str(required=False)
page_no = fields.Int(required=False)
page_size = fields.Int(required=False)
class createVote(BaseSchema):
pass
class updateVote(BaseSchema):
pass
| 23 | 82 | 0.5484 |
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class FeedbackValidator:
class createAbuseReport(BaseSchema):
pass
class updateAbuseReport(BaseSchema):
pass
class getAbuseReports(BaseSchema):
entity_id = fields.Str(required=False)
entity_type = fields.Str(required=False)
id = fields.Str(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class getAttributes(BaseSchema):
page_no = fields.Int(required=False)
page_size = fields.Int(required=False)
class createAttribute(BaseSchema):
pass
class getAttribute(BaseSchema):
slug = fields.Str(required=False)
class updateAttribute(BaseSchema):
slug = fields.Str(required=False)
class createComment(BaseSchema):
pass
class updateComment(BaseSchema):
pass
class getComments(BaseSchema):
entity_type = fields.Str(required=False)
id = fields.Str(required=False)
entity_id = fields.Str(required=False)
user_id = fields.Str(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class checkEligibility(BaseSchema):
entity_type = fields.Str(required=False)
entity_id = fields.Str(required=False)
class deleteMedia(BaseSchema):
ids = fields.List(fields.Str(required=False), required=False)
class createMedia(BaseSchema):
pass
class updateMedia(BaseSchema):
pass
class getMedias(BaseSchema):
entity_type = fields.Str(required=False)
entity_id = fields.Str(required=False)
id = fields.Str(required=False)
type = fields.Str(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class getReviewSummaries(BaseSchema):
entity_type = fields.Str(required=False)
entity_id = fields.Str(required=False)
id = fields.Str(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class createReview(BaseSchema):
pass
class updateReview(BaseSchema):
pass
class getReviews(BaseSchema):
entity_type = fields.Str(required=False)
entity_id = fields.Str(required=False)
id = fields.Str(required=False)
user_id = fields.Str(required=False)
media = fields.Str(required=False)
rating = fields.List(fields.Float(required=False), required=False)
attribute_rating = fields.List(fields.Str(required=False), required=False)
facets = fields.Boolean(required=False)
sort = fields.Str(required=False)
active = fields.Boolean(required=False)
approve = fields.Boolean(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class getTemplates(BaseSchema):
template_id = fields.Str(required=False)
entity_id = fields.Str(required=False)
entity_type = fields.Str(required=False)
class createQuestion(BaseSchema):
pass
class updateQuestion(BaseSchema):
pass
class getQuestionAndAnswers(BaseSchema):
entity_type = fields.Str(required=False)
entity_id = fields.Str(required=False)
id = fields.Str(required=False)
user_id = fields.Str(required=False)
show_answer = fields.Boolean(required=False)
page_id = fields.Str(required=False)
page_size = fields.Int(required=False)
class getVotes(BaseSchema):
id = fields.Str(required=False)
ref_type = fields.Str(required=False)
page_no = fields.Int(required=False)
page_size = fields.Int(required=False)
class createVote(BaseSchema):
pass
class updateVote(BaseSchema):
pass
| true | true |
f7ff9f6b5756f215cb956437f8d7e710b371945c | 8,270 | py | Python | cbf_clf_qp.py | kimukook/CLF-CBF-python | 28a46a4f9abf095e1f1b92e6cc056956caab5374 | [
"MIT"
] | null | null | null | cbf_clf_qp.py | kimukook/CLF-CBF-python | 28a46a4f9abf095e1f1b92e6cc056956caab5374 | [
"MIT"
] | null | null | null | cbf_clf_qp.py | kimukook/CLF-CBF-python | 28a46a4f9abf095e1f1b92e6cc056956caab5374 | [
"MIT"
] | null | null | null | '''
=====================================
Author : Muhan Zhao
Date : Feb. 16, 2020
Location: UC San Diego, La Jolla, CA
=====================================
'''
import numpy as np
import cvxpy as cp
class OptionsClass:
"""
Options Class
"""
def __init__(self):
self.options = None
self.solverName = 'None'
def set_option(self, key, value):
try:
if type(value) is self.options[key][2]:
self.options[key][0] = value
else:
print(f"The type of value for the keyword '{key}' should be '{self.options[key][2]}'.")
except:
raise ValueError('Incorrect option keyword or type: ' + key)
def get_option(self, key):
try:
value = self.options[key][0]
return value
except:
raise ValueError('Incorrect option keyword: ' + key)
def reset_options(self, key):
try:
self.options[key] = self.options[key][1]
except:
raise ValueError('Incorrect option keyword: ' + key)
class CbfClfQpOptions(OptionsClass):
def __init__(self):
OptionsClass.__init__(self)
self.setup()
self.solver_name = 'CBF-CLF'
def setup(self):
self.options = {
# [Current value, default value, type]
'u_max': [None, None, np.ndarray],
'u_min': [None, None, np.ndarray],
'clf_lambda': [None, 5, float],
'cbf_gamma': [None, 5, float],
'weight_input': [None, None, np.ndarray],
'weight_slack': [None, 2e-2, float],
}
# def define_slack(self):
# TODO
class CbfClfQp:
"""
This is the implementation of the vanilla CBF-CLF-QP method. The optimization problem is:
min (u-u_ref).T * H * (u-u_ref) + p * delta**2
s.t. L_f V(x) + L_g V(x) * u + lambda * V(x) <= delta ---> CLF constraint
L_f B(x) + L_g B(x) * u + gamma * B(x) >= 0 ---> CBF constraint
Input:
:param system : The dynamic system of interest, containing CBF, CLF, and their Lie derivatives
:param x : The current state x
:param u_ref : The reference control input
:param slack : The slack activated or not, 1 -> activate while 0 -> not activate
:param verbose : Show the optimization log or not
"""
def __init__(self, system, option_class):
if hasattr(system, 'udim'):
self.udim = system.udim
else:
raise KeyError('udim is not given in the system dynamic!')
self.cbf = system.cbf
# todo check lf.lg/cbf clfs symbolic expression and their size!
self.lf_cbf = system.lf_cbf
self.lg_cbf = system.lg_cbf
self.clf = system.clf
self.lf_clf = system.lf_clf
self.lg_clf = system.lg_clf
# todo take input from the option class
self.weight_input = np.atleast_2d(option_class.get_option('weight_input'))
self.weight_slack = np.atleast_2d(option_class.get_option('weight_slack'))
self.H = None
self.slack_H = None
# todo
self.A = None
self.b = None
# Hyperparameters: CLF <- Lambda & CBF <- Gamma
self.clf_lambda = option_class.get_option('clf_lambda')
self.cbf_gamma = option_class.get_option('cbf_gamma')
self.u_max = option_class.get_option('u_max')
if self.u_max.shape != (self.udim,):
raise ValueError('The size of u_max should be udim-by-, a one dimensional vector in python.')
self.u_min = option_class.get_option('u_min')
if self.u_min.shape != (self.udim,):
raise ValueError('The size of u_min should be udim-by-, a one dimensional vector in python.')
self.with_slack = None
def cbf_clf_qp(self, x, u_ref=None, with_slack=1, verbose=0):
"""
:param x : The current state
:param u_ref : A real number of 1D vector with shape (udim,)
:param with_slack: Indicator if there is slack variable
:param verbose : Indicator if QP info is displayed
:return:
"""
inf = np.inf
self.with_slack = with_slack
slack = None
if u_ref is None:
u_ref = np.zeros(self.udim)
else:
if u_ref.shape != (self.udim,):
raise ValueError(f'u_ref should have the shape size (u_dim,), now it is {u_ref.shape}')
# Read the weight input and build up the matrix H in the cost function
if self.weight_input.shape == (1, 1):
# Weight input is a scalar
self.H = self.weight_input * np.eye(self.udim)
elif self.weight_input.shape == (self.udim, 1):
# Weight_input is a vector, use it to form the diagonal of the H matrix
self.H = np.diag(self.weight_input)
elif self.weight_input.shape == (self.udim, self.udim):
# Weight_input is a udim * udim matrix
self.H = np.copy(self.weight_input)
else:
self.H = np.eye(self.udim)
V = self.clf(x)
lf_V = self.lf_clf(x)
lg_V = self.lg_clf(x)
B = self.cbf(x)
lf_B = self.lf_cbf(x)
lg_B = self.lg_cbf(x)
if self.with_slack:
# slack variable is activated
# Constraints: A [u; slack] <= b
# LfV + LgV * u + lambda * V <= slack
# LfB + LgB * u + gamma * B >= 0
lg_V = np.hstack((lg_V, -np.ones((1, 1))))
lg_B = np.hstack((-lg_B, np.zeros((1, 1))))
self.A = np.vstack((lg_V, lg_B))
self.b = np.hstack((-lf_V - self.clf_lambda * V, lf_B + self.cbf_gamma * B))
# make sure that b is just a 1D vector with the shape (udim+1,)
self.b = np.atleast_2d(self.b)[0]
# Slack -> unconstrained
u_min = np.hstack((self.u_min, -inf * np.ones(1)))
u_max = np.hstack((self.u_max, inf * np.ones(1)))
u = cp.Variable(self.udim + 1)
# H_new = [H, 0; 0, p]
self.slack_H = np.hstack((self.H, np.zeros((1, 1))))
self.slack_H = np.vstack((self.slack_H, np.hstack((np.zeros((1, 1)), self.weight_slack * np.ones((1, 1))))))
# Cost -> (u-u_ref)' * H_new * (u-u_ref) + p * delta**2
# -> (1/2) * [u slack]' * H_new * [u slack] - [u slack]' * H_new * [u_ref 0]
u_ref = np.hstack((u_ref, np.zeros(1)))
objective = cp.Minimize((1/2) * cp.quad_form(u, self.slack_H) - (self.slack_H @ u_ref).T @ u)
# Constraints: A * u <= b and u_min, u_max
constraints = [u_min <= u, u <= u_max, self.A @ u <= self.b]
# constraints = [self.u_min <= u, u <= self.u_max, np.eye(2) @ u <= np.zeros(2)]
problem = cp.Problem(objective, constraints)
problem.solve()
# what if infeasible?
if problem.status != 'infeasible':
slack = u.value[-1]
u = u.value[:self.udim]
feas = 1
else:
u = None
slack = None
feas = -1
else:
# Slack variable is not activated:
# Constraints: A u <= b
# LfV + LgV * u + lambda * V <= 0
# LfB + LgB * u + gamma * B >= 0
self.A = np.vstack((lg_V, -lg_B))
# b -> one dimensional vector
self.b = np.hstack((-lf_V - self.clf_lambda * V, lf_B + self.cbf_gamma * B))
self.b = np.atleast_2d(self.b)[0]
u = cp.Variable(self.udim)
# Cost -> (u-u_ref)' * H * (u-u_ref) -> (1/2) * u'*H*u - u'*H*u_ref
objective = cp.Minimize((1/2)*cp.quad_form(u, self.H) - (self.H @ u_ref).T @ u)
# cons: A * u <= b and u_min, u_max
constraints = [self.u_min <= u, u <= self.u_max, self.A @ x <= self.b]
problem = cp.Problem(objective, constraints)
problem.solve()
if problem.status != 'infeasible':
u = u.value
feas = 1
else:
u = None
feas = -1
return u, slack, B, V, feas
| 34.173554 | 120 | 0.525998 |
import numpy as np
import cvxpy as cp
class OptionsClass:
def __init__(self):
self.options = None
self.solverName = 'None'
def set_option(self, key, value):
try:
if type(value) is self.options[key][2]:
self.options[key][0] = value
else:
print(f"The type of value for the keyword '{key}' should be '{self.options[key][2]}'.")
except:
raise ValueError('Incorrect option keyword or type: ' + key)
def get_option(self, key):
try:
value = self.options[key][0]
return value
except:
raise ValueError('Incorrect option keyword: ' + key)
def reset_options(self, key):
try:
self.options[key] = self.options[key][1]
except:
raise ValueError('Incorrect option keyword: ' + key)
class CbfClfQpOptions(OptionsClass):
def __init__(self):
OptionsClass.__init__(self)
self.setup()
self.solver_name = 'CBF-CLF'
def setup(self):
self.options = {
'u_max': [None, None, np.ndarray],
'u_min': [None, None, np.ndarray],
'clf_lambda': [None, 5, float],
'cbf_gamma': [None, 5, float],
'weight_input': [None, None, np.ndarray],
'weight_slack': [None, 2e-2, float],
}
class CbfClfQp:
def __init__(self, system, option_class):
if hasattr(system, 'udim'):
self.udim = system.udim
else:
raise KeyError('udim is not given in the system dynamic!')
self.cbf = system.cbf
self.lf_cbf = system.lf_cbf
self.lg_cbf = system.lg_cbf
self.clf = system.clf
self.lf_clf = system.lf_clf
self.lg_clf = system.lg_clf
self.weight_input = np.atleast_2d(option_class.get_option('weight_input'))
self.weight_slack = np.atleast_2d(option_class.get_option('weight_slack'))
self.H = None
self.slack_H = None
self.A = None
self.b = None
self.clf_lambda = option_class.get_option('clf_lambda')
self.cbf_gamma = option_class.get_option('cbf_gamma')
self.u_max = option_class.get_option('u_max')
if self.u_max.shape != (self.udim,):
raise ValueError('The size of u_max should be udim-by-, a one dimensional vector in python.')
self.u_min = option_class.get_option('u_min')
if self.u_min.shape != (self.udim,):
raise ValueError('The size of u_min should be udim-by-, a one dimensional vector in python.')
self.with_slack = None
def cbf_clf_qp(self, x, u_ref=None, with_slack=1, verbose=0):
inf = np.inf
self.with_slack = with_slack
slack = None
if u_ref is None:
u_ref = np.zeros(self.udim)
else:
if u_ref.shape != (self.udim,):
raise ValueError(f'u_ref should have the shape size (u_dim,), now it is {u_ref.shape}')
if self.weight_input.shape == (1, 1):
self.H = self.weight_input * np.eye(self.udim)
elif self.weight_input.shape == (self.udim, 1):
self.H = np.diag(self.weight_input)
elif self.weight_input.shape == (self.udim, self.udim):
self.H = np.copy(self.weight_input)
else:
self.H = np.eye(self.udim)
V = self.clf(x)
lf_V = self.lf_clf(x)
lg_V = self.lg_clf(x)
B = self.cbf(x)
lf_B = self.lf_cbf(x)
lg_B = self.lg_cbf(x)
if self.with_slack:
lg_V = np.hstack((lg_V, -np.ones((1, 1))))
lg_B = np.hstack((-lg_B, np.zeros((1, 1))))
self.A = np.vstack((lg_V, lg_B))
self.b = np.hstack((-lf_V - self.clf_lambda * V, lf_B + self.cbf_gamma * B))
self.b = np.atleast_2d(self.b)[0]
u_min = np.hstack((self.u_min, -inf * np.ones(1)))
u_max = np.hstack((self.u_max, inf * np.ones(1)))
u = cp.Variable(self.udim + 1)
self.slack_H = np.hstack((self.H, np.zeros((1, 1))))
self.slack_H = np.vstack((self.slack_H, np.hstack((np.zeros((1, 1)), self.weight_slack * np.ones((1, 1))))))
# -> (1/2) * [u slack]' * H_new * [u slack] - [u slack]' * H_new * [u_ref 0]
u_ref = np.hstack((u_ref, np.zeros(1)))
objective = cp.Minimize((1/2) * cp.quad_form(u, self.slack_H) - (self.slack_H @ u_ref).T @ u)
# Constraints: A * u <= b and u_min, u_max
constraints = [u_min <= u, u <= u_max, self.A @ u <= self.b]
# constraints = [self.u_min <= u, u <= self.u_max, np.eye(2) @ u <= np.zeros(2)]
problem = cp.Problem(objective, constraints)
problem.solve()
# what if infeasible?
if problem.status != 'infeasible':
slack = u.value[-1]
u = u.value[:self.udim]
feas = 1
else:
u = None
slack = None
feas = -1
else:
# Slack variable is not activated:
# Constraints: A u <= b
# LfV + LgV * u + lambda * V <= 0
# LfB + LgB * u + gamma * B >= 0
self.A = np.vstack((lg_V, -lg_B))
# b -> one dimensional vector
self.b = np.hstack((-lf_V - self.clf_lambda * V, lf_B + self.cbf_gamma * B))
self.b = np.atleast_2d(self.b)[0]
u = cp.Variable(self.udim)
# Cost -> (u-u_ref)' * H * (u-u_ref) -> (1/2) * u'*H*u - u'*H*u_ref
objective = cp.Minimize((1/2)*cp.quad_form(u, self.H) - (self.H @ u_ref).T @ u)
constraints = [self.u_min <= u, u <= self.u_max, self.A @ x <= self.b]
problem = cp.Problem(objective, constraints)
problem.solve()
if problem.status != 'infeasible':
u = u.value
feas = 1
else:
u = None
feas = -1
return u, slack, B, V, feas
| true | true |
f7ffa092811cfb98f8aabe5c4c7fe9bafccaa4a9 | 857 | py | Python | imodels/tree/iterative_random_forest/iterative_random_forest.py | csinva/interpretability-workshop | db201be7723179a1f5ab20b242419a604edc9186 | [
"MIT"
] | 1 | 2019-07-09T14:19:16.000Z | 2019-07-09T14:19:16.000Z | imodels/tree/iterative_random_forest/iterative_random_forest.py | csinva/interpretability-workshop | db201be7723179a1f5ab20b242419a604edc9186 | [
"MIT"
] | null | null | null | imodels/tree/iterative_random_forest/iterative_random_forest.py | csinva/interpretability-workshop | db201be7723179a1f5ab20b242419a604edc9186 | [
"MIT"
] | null | null | null | import numpy as np
from irf.ensemble import wrf
from sklearn.base import BaseEstimator
class IRFClassifier(BaseEstimator):
def __init__(self):
self.model = wrf()
self.predict = self.model.predict
self.predict_proba = self.model.predict_proba
def fit(self, X, y, lambda_reg=0.1, sample_weight=None):
'''fit a linear model with integer coefficient and L1 regularization
Params
------
_sample_weight: np.ndarray (n,)
weight for each individual sample
'''
if 'pandas' in str(type(X)):
X = X.values
if 'pandas' in str(type(y)):
y = y.values
assert type(X) == np.ndarray, 'inputs should be ndarrays'
assert type(y) == np.ndarray, 'inputs should be ndarrays'
self.model.fit(X, y, keep_record=False)
| 26.78125 | 76 | 0.605601 | import numpy as np
from irf.ensemble import wrf
from sklearn.base import BaseEstimator
class IRFClassifier(BaseEstimator):
def __init__(self):
self.model = wrf()
self.predict = self.model.predict
self.predict_proba = self.model.predict_proba
def fit(self, X, y, lambda_reg=0.1, sample_weight=None):
if 'pandas' in str(type(X)):
X = X.values
if 'pandas' in str(type(y)):
y = y.values
assert type(X) == np.ndarray, 'inputs should be ndarrays'
assert type(y) == np.ndarray, 'inputs should be ndarrays'
self.model.fit(X, y, keep_record=False)
| true | true |
f7ffa1e10c8902c4cf1150a9280e121b87615fc9 | 8,522 | py | Python | tests/test_connect.py | risicle/clickhouse-driver | d36569f52d3e62ad2e275b1d63ad79b75a06402d | [
"MIT"
] | 1 | 2020-03-08T10:10:48.000Z | 2020-03-08T10:10:48.000Z | tests/test_connect.py | risicle/clickhouse-driver | d36569f52d3e62ad2e275b1d63ad79b75a06402d | [
"MIT"
] | null | null | null | tests/test_connect.py | risicle/clickhouse-driver | d36569f52d3e62ad2e275b1d63ad79b75a06402d | [
"MIT"
] | null | null | null | # coding: utf-8
import socket
from mock import patch
from io import BytesIO
from clickhouse_driver import errors
from clickhouse_driver.client import Client
from clickhouse_driver.protocol import ClientPacketTypes, ServerPacketTypes
from clickhouse_driver.bufferedreader import BufferedReader
from clickhouse_driver.writer import write_binary_str
from tests.testcase import BaseTestCase
from unittest import TestCase
class PacketsTestCase(BaseTestCase):
def test_packets_to_str(self):
self.assertEqual(ClientPacketTypes.to_str(2), 'Data')
self.assertEqual(ClientPacketTypes.to_str(6), 'Unknown packet')
self.assertEqual(ClientPacketTypes.to_str(42), 'Unknown packet')
self.assertEqual(ServerPacketTypes.to_str(4), 'Pong')
self.assertEqual(ServerPacketTypes.to_str(12), 'Unknown packet')
self.assertEqual(ServerPacketTypes.to_str(42), 'Unknown packet')
class ConnectTestCase(BaseTestCase):
def test_exception_on_hello_packet(self):
with self.created_client(user='wrong_user') as client:
with self.assertRaises(errors.ServerException) as e:
client.execute('SHOW TABLES')
# Simple exception formatting checks
exc = e.exception
self.assertIn('Code:', str(exc))
self.assertIn('Stack trace:', str(exc))
def test_network_error(self):
client = Client('bad-address')
with patch('socket.getaddrinfo') as mocked_getaddrinfo:
mocked_getaddrinfo.side_effect = socket.error(
-2, 'Name or service not known'
)
with self.assertRaises(errors.NetworkError):
client.execute('SHOW TABLES')
def test_timeout_error(self):
with patch('socket.socket') as ms:
ms.return_value.connect.side_effect = socket.timeout
with self.assertRaises(errors.SocketTimeoutError) as e:
self.client.execute('SHOW TABLES')
self.assertEqual(
str(e.exception),
'Code: 209. ({}:{})'.format(self.host, self.port)
)
ms.return_value.connect.side_effect = socket.timeout(42, 'Test')
with self.assertRaises(errors.SocketTimeoutError) as e:
self.client.execute('SHOW TABLES')
self.assertEqual(
str(e.exception),
'Code: 209. Test ({}:{})'.format(self.host, self.port)
)
def test_transport_not_connection_on_disconnect(self):
# Create connection.
self.client.execute('SELECT 1')
connection = self.client.connection
with patch.object(connection, 'ping') as mocked_ping:
mocked_ping.return_value = False
with patch.object(connection, 'socket') as mocked_socket:
mocked_socket.shutdown.side_effect = socket.error(
107, 'Transport endpoint is not connected'
)
# New socket should be created.
rv = self.client.execute('SELECT 1')
self.assertEqual(rv, [(1, )])
# Close newly created socket.
connection.socket.close()
def test_socket_error_on_ping(self):
self.client.execute('SELECT 1')
with patch.object(self.client.connection, 'fout') as mocked_fout:
mocked_fout.flush.side_effect = socket.error(32, 'Broken pipe')
rv = self.client.execute('SELECT 1')
self.assertEqual(rv, [(1, )])
def test_ping_got_unexpected_package(self):
self.client.execute('SELECT 1')
with patch.object(self.client.connection, 'fin') as mocked_fin:
# Emulate Exception packet on ping.
mocked_fin.read_one.return_value = 2
with self.assertRaises(errors.UnexpectedPacketFromServerError):
self.client.execute('SELECT 1')
def test_eof_on_receive_packet(self):
self.client.execute('SELECT 1')
with patch.object(self.client.connection, 'fin') as mocked_fin:
# Emulate Exception packet on ping.
mocked_fin.read_one.side_effect = [4, EOFError]
with self.assertRaises(EOFError):
self.client.execute('SELECT 1')
def test_eof_error_on_ping(self):
self.client.execute('SELECT 1')
self.raised = False
read_one = self.client.connection.fin.read_one
def side_effect(*args, **kwargs):
if not self.raised:
self.raised = True
raise EOFError('Unexpected EOF while reading bytes')
else:
return read_one(*args, **kwargs)
with patch.object(self.client.connection, 'fin') as mocked_fin:
mocked_fin.read_one.side_effect = side_effect
rv = self.client.execute('SELECT 1')
self.assertEqual(rv, [(1, )])
def test_alt_hosts(self):
client = Client(
'wrong_host', 1234, self.database, self.user, self.password,
alt_hosts='{}:{}'.format(self.host, self.port)
)
self.n_calls = 0
getaddrinfo = socket.getaddrinfo
def side_getaddrinfo(host, *args, **kwargs):
if host == 'wrong_host':
self.n_calls += 1
raise socket.error(-2, 'Name or service not known')
return getaddrinfo(host, *args, **kwargs)
with patch('socket.getaddrinfo') as mocked_getaddrinfo:
mocked_getaddrinfo.side_effect = side_getaddrinfo
rv = client.execute('SELECT 1')
self.assertEqual(rv, [(1,)])
client.disconnect()
rv = client.execute('SELECT 1')
self.assertEqual(rv, [(1,)])
# Last host must be remembered and getaddrinfo must call exactly
# once with host == 'wrong_host'.
self.assertEqual(self.n_calls, 1)
client.disconnect()
class FakeBufferedReader(BufferedReader):
def __init__(self, inputs, bufsize=128):
super(FakeBufferedReader, self).__init__(bufsize)
self._inputs = inputs
self._counter = 0
def read_into_buffer(self):
try:
value = self._inputs[self._counter]
except IndexError:
value = b''
else:
self._counter += 1
self.current_buffer_size = len(value)
self.buffer[:len(value)] = value
if self.current_buffer_size == 0:
raise EOFError('Unexpected EOF while reading bytes')
class TestBufferedReader(TestCase):
def test_corner_case_read(self):
rdr = FakeBufferedReader([
b'\x00' * 10,
b'\xff' * 10,
])
self.assertEqual(rdr.read(5), b'\x00' * 5)
self.assertEqual(rdr.read(10), b'\x00' * 5 + b'\xff' * 5)
self.assertEqual(rdr.read(5), b'\xff' * 5)
self.assertRaises(EOFError, rdr.read, 10)
def test_corner_case_read_to_end_of_buffer(self):
rdr = FakeBufferedReader([
b'\x00' * 10,
b'\xff' * 10,
])
self.assertEqual(rdr.read(5), b'\x00' * 5)
self.assertEqual(rdr.read(5), b'\x00' * 5)
self.assertEqual(rdr.read(5), b'\xff' * 5)
self.assertEqual(rdr.read(5), b'\xff' * 5)
self.assertRaises(EOFError, rdr.read, 10)
def test_corner_case_exact_buffer(self):
rdr = FakeBufferedReader([
b'\x00' * 10,
b'\xff' * 10,
], bufsize=10)
self.assertEqual(rdr.read(5), b'\x00' * 5)
self.assertEqual(rdr.read(10), b'\x00' * 5 + b'\xff' * 5)
self.assertEqual(rdr.read(5), b'\xff' * 5)
def test_read_strings(self):
strings = [
u'Yoyodat' * 10,
u'Peter Maffay' * 10,
]
buf = BytesIO()
for name in strings:
write_binary_str(name, buf)
buf = buf.getvalue()
ref_values = tuple(x.encode('utf-8') for x in strings)
for split in range(1, len(buf) - 1):
for split_2 in range(split + 1, len(buf) - 2):
self.assertEqual(
buf[:split] + buf[split:split_2] + buf[split_2:], buf
)
bufs = [
buf[:split],
buf[split:split_2],
buf[split_2:],
]
rdr = FakeBufferedReader(bufs, bufsize=4096)
read_values = rdr.read_strings(2)
self.assertEqual(repr(ref_values), repr(read_values))
| 33.419608 | 76 | 0.591997 |
import socket
from mock import patch
from io import BytesIO
from clickhouse_driver import errors
from clickhouse_driver.client import Client
from clickhouse_driver.protocol import ClientPacketTypes, ServerPacketTypes
from clickhouse_driver.bufferedreader import BufferedReader
from clickhouse_driver.writer import write_binary_str
from tests.testcase import BaseTestCase
from unittest import TestCase
class PacketsTestCase(BaseTestCase):
def test_packets_to_str(self):
self.assertEqual(ClientPacketTypes.to_str(2), 'Data')
self.assertEqual(ClientPacketTypes.to_str(6), 'Unknown packet')
self.assertEqual(ClientPacketTypes.to_str(42), 'Unknown packet')
self.assertEqual(ServerPacketTypes.to_str(4), 'Pong')
self.assertEqual(ServerPacketTypes.to_str(12), 'Unknown packet')
self.assertEqual(ServerPacketTypes.to_str(42), 'Unknown packet')
class ConnectTestCase(BaseTestCase):
def test_exception_on_hello_packet(self):
with self.created_client(user='wrong_user') as client:
with self.assertRaises(errors.ServerException) as e:
client.execute('SHOW TABLES')
exc = e.exception
self.assertIn('Code:', str(exc))
self.assertIn('Stack trace:', str(exc))
def test_network_error(self):
client = Client('bad-address')
with patch('socket.getaddrinfo') as mocked_getaddrinfo:
mocked_getaddrinfo.side_effect = socket.error(
-2, 'Name or service not known'
)
with self.assertRaises(errors.NetworkError):
client.execute('SHOW TABLES')
def test_timeout_error(self):
with patch('socket.socket') as ms:
ms.return_value.connect.side_effect = socket.timeout
with self.assertRaises(errors.SocketTimeoutError) as e:
self.client.execute('SHOW TABLES')
self.assertEqual(
str(e.exception),
'Code: 209. ({}:{})'.format(self.host, self.port)
)
ms.return_value.connect.side_effect = socket.timeout(42, 'Test')
with self.assertRaises(errors.SocketTimeoutError) as e:
self.client.execute('SHOW TABLES')
self.assertEqual(
str(e.exception),
'Code: 209. Test ({}:{})'.format(self.host, self.port)
)
def test_transport_not_connection_on_disconnect(self):
self.client.execute('SELECT 1')
connection = self.client.connection
with patch.object(connection, 'ping') as mocked_ping:
mocked_ping.return_value = False
with patch.object(connection, 'socket') as mocked_socket:
mocked_socket.shutdown.side_effect = socket.error(
107, 'Transport endpoint is not connected'
)
rv = self.client.execute('SELECT 1')
self.assertEqual(rv, [(1, )])
connection.socket.close()
def test_socket_error_on_ping(self):
self.client.execute('SELECT 1')
with patch.object(self.client.connection, 'fout') as mocked_fout:
mocked_fout.flush.side_effect = socket.error(32, 'Broken pipe')
rv = self.client.execute('SELECT 1')
self.assertEqual(rv, [(1, )])
def test_ping_got_unexpected_package(self):
self.client.execute('SELECT 1')
with patch.object(self.client.connection, 'fin') as mocked_fin:
mocked_fin.read_one.return_value = 2
with self.assertRaises(errors.UnexpectedPacketFromServerError):
self.client.execute('SELECT 1')
def test_eof_on_receive_packet(self):
self.client.execute('SELECT 1')
with patch.object(self.client.connection, 'fin') as mocked_fin:
mocked_fin.read_one.side_effect = [4, EOFError]
with self.assertRaises(EOFError):
self.client.execute('SELECT 1')
def test_eof_error_on_ping(self):
self.client.execute('SELECT 1')
self.raised = False
read_one = self.client.connection.fin.read_one
def side_effect(*args, **kwargs):
if not self.raised:
self.raised = True
raise EOFError('Unexpected EOF while reading bytes')
else:
return read_one(*args, **kwargs)
with patch.object(self.client.connection, 'fin') as mocked_fin:
mocked_fin.read_one.side_effect = side_effect
rv = self.client.execute('SELECT 1')
self.assertEqual(rv, [(1, )])
def test_alt_hosts(self):
client = Client(
'wrong_host', 1234, self.database, self.user, self.password,
alt_hosts='{}:{}'.format(self.host, self.port)
)
self.n_calls = 0
getaddrinfo = socket.getaddrinfo
def side_getaddrinfo(host, *args, **kwargs):
if host == 'wrong_host':
self.n_calls += 1
raise socket.error(-2, 'Name or service not known')
return getaddrinfo(host, *args, **kwargs)
with patch('socket.getaddrinfo') as mocked_getaddrinfo:
mocked_getaddrinfo.side_effect = side_getaddrinfo
rv = client.execute('SELECT 1')
self.assertEqual(rv, [(1,)])
client.disconnect()
rv = client.execute('SELECT 1')
self.assertEqual(rv, [(1,)])
self.assertEqual(self.n_calls, 1)
client.disconnect()
class FakeBufferedReader(BufferedReader):
def __init__(self, inputs, bufsize=128):
super(FakeBufferedReader, self).__init__(bufsize)
self._inputs = inputs
self._counter = 0
def read_into_buffer(self):
try:
value = self._inputs[self._counter]
except IndexError:
value = b''
else:
self._counter += 1
self.current_buffer_size = len(value)
self.buffer[:len(value)] = value
if self.current_buffer_size == 0:
raise EOFError('Unexpected EOF while reading bytes')
class TestBufferedReader(TestCase):
def test_corner_case_read(self):
rdr = FakeBufferedReader([
b'\x00' * 10,
b'\xff' * 10,
])
self.assertEqual(rdr.read(5), b'\x00' * 5)
self.assertEqual(rdr.read(10), b'\x00' * 5 + b'\xff' * 5)
self.assertEqual(rdr.read(5), b'\xff' * 5)
self.assertRaises(EOFError, rdr.read, 10)
def test_corner_case_read_to_end_of_buffer(self):
rdr = FakeBufferedReader([
b'\x00' * 10,
b'\xff' * 10,
])
self.assertEqual(rdr.read(5), b'\x00' * 5)
self.assertEqual(rdr.read(5), b'\x00' * 5)
self.assertEqual(rdr.read(5), b'\xff' * 5)
self.assertEqual(rdr.read(5), b'\xff' * 5)
self.assertRaises(EOFError, rdr.read, 10)
def test_corner_case_exact_buffer(self):
rdr = FakeBufferedReader([
b'\x00' * 10,
b'\xff' * 10,
], bufsize=10)
self.assertEqual(rdr.read(5), b'\x00' * 5)
self.assertEqual(rdr.read(10), b'\x00' * 5 + b'\xff' * 5)
self.assertEqual(rdr.read(5), b'\xff' * 5)
def test_read_strings(self):
strings = [
u'Yoyodat' * 10,
u'Peter Maffay' * 10,
]
buf = BytesIO()
for name in strings:
write_binary_str(name, buf)
buf = buf.getvalue()
ref_values = tuple(x.encode('utf-8') for x in strings)
for split in range(1, len(buf) - 1):
for split_2 in range(split + 1, len(buf) - 2):
self.assertEqual(
buf[:split] + buf[split:split_2] + buf[split_2:], buf
)
bufs = [
buf[:split],
buf[split:split_2],
buf[split_2:],
]
rdr = FakeBufferedReader(bufs, bufsize=4096)
read_values = rdr.read_strings(2)
self.assertEqual(repr(ref_values), repr(read_values))
| true | true |
f7ffa21c257ba1e17cc8c6621957e39dccd105d8 | 14,480 | py | Python | softlearning/algorithms/sac.py | JiazhengChai/synergy_DRL | c08e78e5fe39d9d46213e1bf07b8dafc2195b05a | [
"MIT"
] | 2 | 2020-01-07T04:12:42.000Z | 2021-12-21T22:25:31.000Z | softlearning/algorithms/sac.py | JiazhengChai/synergy_DRL | c08e78e5fe39d9d46213e1bf07b8dafc2195b05a | [
"MIT"
] | 11 | 2019-11-29T02:59:34.000Z | 2022-03-12T00:07:28.000Z | softlearning/algorithms/sac.py | JiazhengChai/synergy_DRL | c08e78e5fe39d9d46213e1bf07b8dafc2195b05a | [
"MIT"
] | 1 | 2020-04-28T12:06:40.000Z | 2020-04-28T12:06:40.000Z | from collections import OrderedDict
from numbers import Number
import numpy as np
import tensorflow as tf
from tensorflow.python.training import training_util
from .rl_algorithm import RLAlgorithm
def td_target(reward, discount, next_value):
return reward + discount * next_value
class SAC(RLAlgorithm):
"""Soft Actor-Critic (SAC)
References
----------
[1] Tuomas Haarnoja*, Aurick Zhou*, Kristian Hartikainen*, George Tucker,
Sehoon Ha, Jie Tan, Vikash Kumar, Henry Zhu, Abhishek Gupta, Pieter
Abbeel, and Sergey Levine. Soft Actor-Critic Algorithms and
Applications. arXiv preprint arXiv:1812.05905. 2018.
"""
def __init__(
self,
env,
policy,
Qs,
pool,
plotter=None,
tf_summaries=False,
lr=3e-4,
reward_scale=1.0,
target_entropy='auto',
discount=0.99,
tau=5e-3,
target_update_interval=1,
action_prior='uniform',
reparameterize=False,
store_extra_policy_info=False,
save_full_state=False,
**kwargs,
):
"""
Args:
env (`SoftlearningEnv`): Environment used for training.
policy: A policy function approximator.
initial_exploration_policy: ('Policy'): A policy that we use
for initial exploration which is not trained by the algorithm.
Qs: Q-function approximators. The min of these
approximators will be used. Usage of at least two Q-functions
improves performance by reducing overestimation bias.
pool (`PoolBase`): Replay pool to add gathered samples to.
plotter (`QFPolicyPlotter`): Plotter instance to be used for
visualizing Q-function during training.
lr (`float`): Learning rate used for the function approximators.
discount (`float`): Discount factor for Q-function updates.
tau (`float`): Soft value function target update weight.
target_update_interval ('int'): Frequency at which target network
updates occur in iterations.
reparameterize ('bool'): If True, we use a gradient estimator for
the policy derived using the reparameterization trick. We use
a likelihood ratio based estimator otherwise.
"""
super(SAC, self).__init__(**kwargs)
self._env = env
self._policy = policy
self._Qs = Qs
self._Q_targets = tuple(tf.keras.models.clone_model(Q) for Q in Qs)
self._pool = pool
self._plotter = plotter
self._tf_summaries = tf_summaries
self._policy_lr = lr
self._Q_lr = lr
self._reward_scale = reward_scale
self._target_entropy = (
-np.prod(self._env.action_space.shape)
if target_entropy == 'auto'
else target_entropy)
self._discount = discount
self._tau = tau
self._target_update_interval = target_update_interval
self._action_prior = action_prior
self._reparameterize = reparameterize
self._store_extra_policy_info = store_extra_policy_info
self._save_full_state = save_full_state
observation_shape = self._env.active_observation_shape
action_shape = self._env.action_space.shape
assert len(observation_shape) == 1, observation_shape
self._observation_shape = observation_shape
assert len(action_shape) == 1, action_shape
self._action_shape = action_shape
self._build()
def _build(self):
self._training_ops = {}
self._init_global_step()
self._init_placeholders()
self._init_actor_update()
self._init_critic_update()
def train(self, *args, **kwargs):
"""Initiate training of the SAC instance."""
return self._train(
self._env,
self._policy,
self._pool,
initial_exploration_policy=self._initial_exploration_policy,
*args,
**kwargs)
def _init_global_step(self):
self.global_step = training_util.get_or_create_global_step()
self._training_ops.update({
'increment_global_step': training_util._increment_global_step(1)
})
def _init_placeholders(self):
"""Create input placeholders for the SAC algorithm.
Creates `tf.placeholder`s for:
- observation
- next observation
- action
- reward
- terminals
"""
self._iteration_ph = tf.placeholder(
tf.int64, shape=None, name='iteration')
self._observations_ph = tf.placeholder(
tf.float32,
shape=(None, *self._observation_shape),
name='observation',
)
self._next_observations_ph = tf.placeholder(
tf.float32,
shape=(None, *self._observation_shape),
name='next_observation',
)
self._actions_ph = tf.placeholder(
tf.float32,
shape=(None, *self._action_shape),
name='actions',
)
self._rewards_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='rewards',
)
self._terminals_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='terminals',
)
if self._store_extra_policy_info:
self._log_pis_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='log_pis',
)
self._raw_actions_ph = tf.placeholder(
tf.float32,
shape=(None, *self._action_shape),
name='raw_actions',
)
def _get_Q_target(self):
next_actions = self._policy.actions([self._next_observations_ph])
next_log_pis = self._policy.log_pis(
[self._next_observations_ph], next_actions)
next_Qs_values = tuple(
Q([self._next_observations_ph, next_actions])
for Q in self._Q_targets)
min_next_Q = tf.reduce_min(next_Qs_values, axis=0)
next_value = min_next_Q - self._alpha * next_log_pis
Q_target = td_target(
reward=self._reward_scale * self._rewards_ph,
discount=self._discount,
next_value=(1 - self._terminals_ph) * next_value)
return Q_target
def _init_critic_update(self):
"""Create minimization operation for critic Q-function.
Creates a `tf.optimizer.minimize` operation for updating
critic Q-function with gradient descent, and appends it to
`self._training_ops` attribute.
See Equations (5, 6) in [1], for further information of the
Q-function update rule.
"""
Q_target = tf.stop_gradient(self._get_Q_target())
assert Q_target.shape.as_list() == [None, 1]
Q_values = self._Q_values = tuple(
Q([self._observations_ph, self._actions_ph])
for Q in self._Qs)
Q_losses = self._Q_losses = tuple(
tf.losses.mean_squared_error(
labels=Q_target, predictions=Q_value, weights=0.5)
for Q_value in Q_values)
self._Q_optimizers = tuple(
tf.train.AdamOptimizer(
learning_rate=self._Q_lr,
name='{}_{}_optimizer'.format(Q._name, i)
) for i, Q in enumerate(self._Qs))
Q_training_ops = tuple(
tf.contrib.layers.optimize_loss(
Q_loss,
self.global_step,
learning_rate=self._Q_lr,
optimizer=Q_optimizer,
variables=Q.trainable_variables,
increment_global_step=False,
summaries=((
"loss", "gradients", "gradient_norm", "global_gradient_norm"
) if self._tf_summaries else ()))
for i, (Q, Q_loss, Q_optimizer)
in enumerate(zip(self._Qs, Q_losses, self._Q_optimizers)))
self._training_ops.update({'Q': tf.group(Q_training_ops)})
def _init_actor_update(self):
"""Create minimization operations for policy and entropy.
Creates a `tf.optimizer.minimize` operations for updating
policy and entropy with gradient descent, and adds them to
`self._training_ops` attribute.
See Section 4.2 in [1], for further information of the policy update,
and Section 5 in [1] for further information of the entropy update.
"""
actions = self._policy.actions([self._observations_ph])
log_pis = self._policy.log_pis([self._observations_ph], actions)
assert log_pis.shape.as_list() == [None, 1]
log_alpha = self._log_alpha = tf.get_variable(
'log_alpha',
dtype=tf.float32,
initializer=0.0)
alpha = tf.exp(log_alpha)
if isinstance(self._target_entropy, Number):
alpha_loss = -tf.reduce_mean(
log_alpha * tf.stop_gradient(log_pis + self._target_entropy))
self._alpha_optimizer = tf.train.AdamOptimizer(
self._policy_lr, name='alpha_optimizer')
self._alpha_train_op = self._alpha_optimizer.minimize(
loss=alpha_loss, var_list=[log_alpha])
self._training_ops.update({
'temperature_alpha': self._alpha_train_op
})
self._alpha = alpha
if self._action_prior == 'normal':
policy_prior = tf.contrib.distributions.MultivariateNormalDiag(
loc=tf.zeros(self._action_shape),
scale_diag=tf.ones(self._action_shape))
policy_prior_log_probs = policy_prior.log_prob(actions)
elif self._action_prior == 'uniform':
policy_prior_log_probs = 0.0
Q_log_targets = tuple(
Q([self._observations_ph, actions])
for Q in self._Qs)
min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)
if self._reparameterize:
policy_kl_losses = (
alpha * log_pis
- min_Q_log_target
- policy_prior_log_probs)
else:
raise NotImplementedError
assert policy_kl_losses.shape.as_list() == [None, 1]
policy_loss = tf.reduce_mean(policy_kl_losses)
self._policy_optimizer = tf.train.AdamOptimizer(
learning_rate=self._policy_lr,
name="policy_optimizer")
policy_train_op = tf.contrib.layers.optimize_loss(
policy_loss,
self.global_step,
learning_rate=self._policy_lr,
optimizer=self._policy_optimizer,
variables=self._policy.trainable_variables,
increment_global_step=False,
summaries=(
"loss", "gradients", "gradient_norm", "global_gradient_norm"
) if self._tf_summaries else ())
self._training_ops.update({'policy_train_op': policy_train_op})
def _init_training(self):
self._update_target(tau=1.0)
def _update_target(self, tau=None):
tau = tau or self._tau
for Q, Q_target in zip(self._Qs, self._Q_targets):
source_params = Q.get_weights()
target_params = Q_target.get_weights()
Q_target.set_weights([
tau * source + (1.0 - tau) * target
for source, target in zip(source_params, target_params)
])
def _do_training(self, iteration, batch):
"""Runs the operations for updating training and target ops."""
feed_dict = self._get_feed_dict(iteration, batch)
self._session.run(self._training_ops, feed_dict)
if iteration % self._target_update_interval == 0:
# Run target ops here.
self._update_target()
def _get_feed_dict(self, iteration, batch):
"""Construct TensorFlow feed_dict from sample batch."""
feed_dict = {
self._observations_ph: batch['observations'],
self._actions_ph: batch['actions'],
self._next_observations_ph: batch['next_observations'],
self._rewards_ph: batch['rewards'],
self._terminals_ph: batch['terminals'],
}
if self._store_extra_policy_info:
feed_dict[self._log_pis_ph] = batch['log_pis']
feed_dict[self._raw_actions_ph] = batch['raw_actions']
if iteration is not None:
feed_dict[self._iteration_ph] = iteration
return feed_dict
def get_diagnostics(self,
iteration,
batch,
training_paths,
evaluation_paths):
"""Return diagnostic information as ordered dictionary.
Records mean and standard deviation of Q-function and state
value function, and TD-loss (mean squared Bellman error)
for the sample batch.
Also calls the `draw` method of the plotter, if plotter defined.
"""
feed_dict = self._get_feed_dict(iteration, batch)
(Q_values, Q_losses, alpha, global_step) = self._session.run(
(self._Q_values,
self._Q_losses,
self._alpha,
self.global_step),
feed_dict)
diagnostics = OrderedDict({
'Q-avg': np.mean(Q_values),
'Q-std': np.std(Q_values),
'Q_loss': np.mean(Q_losses),
'alpha': alpha,
})
policy_diagnostics = self._policy.get_diagnostics(
batch['observations'])
diagnostics.update({
f'policy/{key}': value
for key, value in policy_diagnostics.items()
})
if self._plotter:
self._plotter.draw()
return diagnostics
@property
def tf_saveables(self):
saveables = {
'_policy_optimizer': self._policy_optimizer,
**{
f'Q_optimizer_{i}': optimizer
for i, optimizer in enumerate(self._Q_optimizers)
},
'_log_alpha': self._log_alpha,
}
if hasattr(self, '_alpha_optimizer'):
saveables['_alpha_optimizer'] = self._alpha_optimizer
return saveables
| 33.364055 | 80 | 0.59261 | from collections import OrderedDict
from numbers import Number
import numpy as np
import tensorflow as tf
from tensorflow.python.training import training_util
from .rl_algorithm import RLAlgorithm
def td_target(reward, discount, next_value):
return reward + discount * next_value
class SAC(RLAlgorithm):
def __init__(
self,
env,
policy,
Qs,
pool,
plotter=None,
tf_summaries=False,
lr=3e-4,
reward_scale=1.0,
target_entropy='auto',
discount=0.99,
tau=5e-3,
target_update_interval=1,
action_prior='uniform',
reparameterize=False,
store_extra_policy_info=False,
save_full_state=False,
**kwargs,
):
super(SAC, self).__init__(**kwargs)
self._env = env
self._policy = policy
self._Qs = Qs
self._Q_targets = tuple(tf.keras.models.clone_model(Q) for Q in Qs)
self._pool = pool
self._plotter = plotter
self._tf_summaries = tf_summaries
self._policy_lr = lr
self._Q_lr = lr
self._reward_scale = reward_scale
self._target_entropy = (
-np.prod(self._env.action_space.shape)
if target_entropy == 'auto'
else target_entropy)
self._discount = discount
self._tau = tau
self._target_update_interval = target_update_interval
self._action_prior = action_prior
self._reparameterize = reparameterize
self._store_extra_policy_info = store_extra_policy_info
self._save_full_state = save_full_state
observation_shape = self._env.active_observation_shape
action_shape = self._env.action_space.shape
assert len(observation_shape) == 1, observation_shape
self._observation_shape = observation_shape
assert len(action_shape) == 1, action_shape
self._action_shape = action_shape
self._build()
def _build(self):
self._training_ops = {}
self._init_global_step()
self._init_placeholders()
self._init_actor_update()
self._init_critic_update()
def train(self, *args, **kwargs):
return self._train(
self._env,
self._policy,
self._pool,
initial_exploration_policy=self._initial_exploration_policy,
*args,
**kwargs)
def _init_global_step(self):
self.global_step = training_util.get_or_create_global_step()
self._training_ops.update({
'increment_global_step': training_util._increment_global_step(1)
})
def _init_placeholders(self):
self._iteration_ph = tf.placeholder(
tf.int64, shape=None, name='iteration')
self._observations_ph = tf.placeholder(
tf.float32,
shape=(None, *self._observation_shape),
name='observation',
)
self._next_observations_ph = tf.placeholder(
tf.float32,
shape=(None, *self._observation_shape),
name='next_observation',
)
self._actions_ph = tf.placeholder(
tf.float32,
shape=(None, *self._action_shape),
name='actions',
)
self._rewards_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='rewards',
)
self._terminals_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='terminals',
)
if self._store_extra_policy_info:
self._log_pis_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='log_pis',
)
self._raw_actions_ph = tf.placeholder(
tf.float32,
shape=(None, *self._action_shape),
name='raw_actions',
)
def _get_Q_target(self):
next_actions = self._policy.actions([self._next_observations_ph])
next_log_pis = self._policy.log_pis(
[self._next_observations_ph], next_actions)
next_Qs_values = tuple(
Q([self._next_observations_ph, next_actions])
for Q in self._Q_targets)
min_next_Q = tf.reduce_min(next_Qs_values, axis=0)
next_value = min_next_Q - self._alpha * next_log_pis
Q_target = td_target(
reward=self._reward_scale * self._rewards_ph,
discount=self._discount,
next_value=(1 - self._terminals_ph) * next_value)
return Q_target
def _init_critic_update(self):
Q_target = tf.stop_gradient(self._get_Q_target())
assert Q_target.shape.as_list() == [None, 1]
Q_values = self._Q_values = tuple(
Q([self._observations_ph, self._actions_ph])
for Q in self._Qs)
Q_losses = self._Q_losses = tuple(
tf.losses.mean_squared_error(
labels=Q_target, predictions=Q_value, weights=0.5)
for Q_value in Q_values)
self._Q_optimizers = tuple(
tf.train.AdamOptimizer(
learning_rate=self._Q_lr,
name='{}_{}_optimizer'.format(Q._name, i)
) for i, Q in enumerate(self._Qs))
Q_training_ops = tuple(
tf.contrib.layers.optimize_loss(
Q_loss,
self.global_step,
learning_rate=self._Q_lr,
optimizer=Q_optimizer,
variables=Q.trainable_variables,
increment_global_step=False,
summaries=((
"loss", "gradients", "gradient_norm", "global_gradient_norm"
) if self._tf_summaries else ()))
for i, (Q, Q_loss, Q_optimizer)
in enumerate(zip(self._Qs, Q_losses, self._Q_optimizers)))
self._training_ops.update({'Q': tf.group(Q_training_ops)})
def _init_actor_update(self):
actions = self._policy.actions([self._observations_ph])
log_pis = self._policy.log_pis([self._observations_ph], actions)
assert log_pis.shape.as_list() == [None, 1]
log_alpha = self._log_alpha = tf.get_variable(
'log_alpha',
dtype=tf.float32,
initializer=0.0)
alpha = tf.exp(log_alpha)
if isinstance(self._target_entropy, Number):
alpha_loss = -tf.reduce_mean(
log_alpha * tf.stop_gradient(log_pis + self._target_entropy))
self._alpha_optimizer = tf.train.AdamOptimizer(
self._policy_lr, name='alpha_optimizer')
self._alpha_train_op = self._alpha_optimizer.minimize(
loss=alpha_loss, var_list=[log_alpha])
self._training_ops.update({
'temperature_alpha': self._alpha_train_op
})
self._alpha = alpha
if self._action_prior == 'normal':
policy_prior = tf.contrib.distributions.MultivariateNormalDiag(
loc=tf.zeros(self._action_shape),
scale_diag=tf.ones(self._action_shape))
policy_prior_log_probs = policy_prior.log_prob(actions)
elif self._action_prior == 'uniform':
policy_prior_log_probs = 0.0
Q_log_targets = tuple(
Q([self._observations_ph, actions])
for Q in self._Qs)
min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)
if self._reparameterize:
policy_kl_losses = (
alpha * log_pis
- min_Q_log_target
- policy_prior_log_probs)
else:
raise NotImplementedError
assert policy_kl_losses.shape.as_list() == [None, 1]
policy_loss = tf.reduce_mean(policy_kl_losses)
self._policy_optimizer = tf.train.AdamOptimizer(
learning_rate=self._policy_lr,
name="policy_optimizer")
policy_train_op = tf.contrib.layers.optimize_loss(
policy_loss,
self.global_step,
learning_rate=self._policy_lr,
optimizer=self._policy_optimizer,
variables=self._policy.trainable_variables,
increment_global_step=False,
summaries=(
"loss", "gradients", "gradient_norm", "global_gradient_norm"
) if self._tf_summaries else ())
self._training_ops.update({'policy_train_op': policy_train_op})
def _init_training(self):
self._update_target(tau=1.0)
def _update_target(self, tau=None):
tau = tau or self._tau
for Q, Q_target in zip(self._Qs, self._Q_targets):
source_params = Q.get_weights()
target_params = Q_target.get_weights()
Q_target.set_weights([
tau * source + (1.0 - tau) * target
for source, target in zip(source_params, target_params)
])
def _do_training(self, iteration, batch):
feed_dict = self._get_feed_dict(iteration, batch)
self._session.run(self._training_ops, feed_dict)
if iteration % self._target_update_interval == 0:
self._update_target()
def _get_feed_dict(self, iteration, batch):
feed_dict = {
self._observations_ph: batch['observations'],
self._actions_ph: batch['actions'],
self._next_observations_ph: batch['next_observations'],
self._rewards_ph: batch['rewards'],
self._terminals_ph: batch['terminals'],
}
if self._store_extra_policy_info:
feed_dict[self._log_pis_ph] = batch['log_pis']
feed_dict[self._raw_actions_ph] = batch['raw_actions']
if iteration is not None:
feed_dict[self._iteration_ph] = iteration
return feed_dict
def get_diagnostics(self,
iteration,
batch,
training_paths,
evaluation_paths):
feed_dict = self._get_feed_dict(iteration, batch)
(Q_values, Q_losses, alpha, global_step) = self._session.run(
(self._Q_values,
self._Q_losses,
self._alpha,
self.global_step),
feed_dict)
diagnostics = OrderedDict({
'Q-avg': np.mean(Q_values),
'Q-std': np.std(Q_values),
'Q_loss': np.mean(Q_losses),
'alpha': alpha,
})
policy_diagnostics = self._policy.get_diagnostics(
batch['observations'])
diagnostics.update({
f'policy/{key}': value
for key, value in policy_diagnostics.items()
})
if self._plotter:
self._plotter.draw()
return diagnostics
@property
def tf_saveables(self):
saveables = {
'_policy_optimizer': self._policy_optimizer,
**{
f'Q_optimizer_{i}': optimizer
for i, optimizer in enumerate(self._Q_optimizers)
},
'_log_alpha': self._log_alpha,
}
if hasattr(self, '_alpha_optimizer'):
saveables['_alpha_optimizer'] = self._alpha_optimizer
return saveables
| true | true |
f7ffa24aab5c5cec9535bd4faca6a966d41322f9 | 1,510 | py | Python | spyder.py | soonyenju/metSpy | d49c4229786e144c7588fad79efb6cf59b602182 | [
"MIT"
] | 2 | 2018-11-07T08:36:06.000Z | 2019-01-10T06:59:48.000Z | spyder.py | soonyenju/metSpy | d49c4229786e144c7588fad79efb6cf59b602182 | [
"MIT"
] | 2 | 2018-10-24T08:21:54.000Z | 2018-10-25T02:50:49.000Z | spyder.py | soonyenju/metSpy | d49c4229786e144c7588fad79efb6cf59b602182 | [
"MIT"
] | null | null | null | # coding: utf-8
from bs4 import BeautifulSoup
from pathlib import Path
from datetime import datetime
import urllib, requests
import socket
import time, pickle
from metspy.config import Config
from metspy.initializer import Urlinit, Urloader
class Spyder(Config):
"""
Create a new spider
"""
def __init__(self, obj_path = "./static/urls.pkl"):
super(Spyder, self).__init__()
self.obj_path = obj_path
def run(self, country_name = None):
records = {}
if not Path(self.obj_path).exists():
init = Urlinit()
init.run()
else:
loader = Urloader(self.obj_path)
urls = loader.urls
if country_name:
for url in urls[country_name]:
print(url)
record = self.curt_scrape(url)
records[url] = record
else:
for country, country_urls in urls.items():
print(country)
for url in country_urls:
print(url)
record = self.curt_scrape(url)
records[url] = record
return records
def curt_scrape(self, url):
record = {}
request = urllib.request.Request(url, headers=self.header)
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
soup = BeautifulSoup(html, features="lxml")
vals = soup.find_all("td", {"class": "tdcur"})
for val in vals:
record[val.get("id")] = val.get_text()
now = datetime.now().strftime("%Y-%m-%d:%H")
record["time"] = now
response.close()
except urllib.error.URLError as e:
print(e.reason)
time.sleep(1) # sleep time, it's for anti-anti-scraping
return record | 25.59322 | 60 | 0.683444 |
from bs4 import BeautifulSoup
from pathlib import Path
from datetime import datetime
import urllib, requests
import socket
import time, pickle
from metspy.config import Config
from metspy.initializer import Urlinit, Urloader
class Spyder(Config):
def __init__(self, obj_path = "./static/urls.pkl"):
super(Spyder, self).__init__()
self.obj_path = obj_path
def run(self, country_name = None):
records = {}
if not Path(self.obj_path).exists():
init = Urlinit()
init.run()
else:
loader = Urloader(self.obj_path)
urls = loader.urls
if country_name:
for url in urls[country_name]:
print(url)
record = self.curt_scrape(url)
records[url] = record
else:
for country, country_urls in urls.items():
print(country)
for url in country_urls:
print(url)
record = self.curt_scrape(url)
records[url] = record
return records
def curt_scrape(self, url):
record = {}
request = urllib.request.Request(url, headers=self.header)
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
soup = BeautifulSoup(html, features="lxml")
vals = soup.find_all("td", {"class": "tdcur"})
for val in vals:
record[val.get("id")] = val.get_text()
now = datetime.now().strftime("%Y-%m-%d:%H")
record["time"] = now
response.close()
except urllib.error.URLError as e:
print(e.reason)
time.sleep(1)
return record | true | true |
f7ffa2c9b3d490070ee4ca47edb7b91b15ded8fd | 2,838 | py | Python | Deep RL/agent.py | sakshamarora1/Road-Fighter-AI | 9009240aee7af60605a1f878c0e969c35558c51c | [
"MIT"
] | 1 | 2020-07-19T15:03:30.000Z | 2020-07-19T15:03:30.000Z | Deep RL/agent.py | sakshamarora1/Road-Fighter-AI | 9009240aee7af60605a1f878c0e969c35558c51c | [
"MIT"
] | null | null | null | Deep RL/agent.py | sakshamarora1/Road-Fighter-AI | 9009240aee7af60605a1f878c0e969c35558c51c | [
"MIT"
] | null | null | null | from dqn import DeepQNetwork, ReplayMemory, Transition
import torch
import numpy as np
class DQNAgent:
def __init__(self, inputs, n_actions):
self.brain = DeepQNetwork(inputs, 16, 16, outputNum=n_actions)
self.target_brain = DeepQNetwork(inputs, 16, 16, outputNum=n_actions)
self.target_brain.load_state_dict(self.brain.state_dict())
self.target_brain.eval()
self.set_params()
self.optimizer = torch.optim.Adam(self.brain.parameters())
self.memory = ReplayMemory(50000)
self.action_space = [0, 1]
def set_params(self):
self.batch_size = 64
self.max_exploration_rate = 1
self.min_exploration_rate = 0.05
self.exploration_decay_rate = 0.0005
self.steps_done = 0
def select_action(self, state):
sample = np.random.random()
exploration_rate = self.min_exploration_rate + (
self.max_exploration_rate - self.min_exploration_rate
) * np.exp(-self.steps_done * self.exploration_decay_rate)
self.steps_done += 1
if sample > exploration_rate:
with torch.no_grad():
actions = self.brain(state)
return torch.argmax(actions).item()
else:
return np.random.choice(self.action_space)
def learn(self):
if len(self.memory) < self.batch_size:
return
self.optimizer.zero_grad()
max_capacity = (
len(self.memory)
if len(self.memory) < self.memory.capacity
else self.memory.capacity
)
batch = np.random.choice(max_capacity, self.batch_size)
transitions = self.memory.sample(self.batch_size)
batch = Transition(*zip(*transitions))
non_final_mask = torch.tensor(
tuple(map(lambda s: s is not None, batch.next_state)), dtype=torch.bool,
)
non_final_next_states = torch.tensor(
[s for s in batch.next_state if s is not None]
)
state_batch = torch.tensor(batch.state)
action_batch = torch.tensor(batch.action)
reward_batch = torch.tensor(batch.reward, dtype=torch.float)
state_action_values = self.brain(state_batch).gather(
1, action_batch.unsqueeze(-1)
)
next_state_values = torch.zeros(self.batch_size)
next_state_values[non_final_mask] = self.target_brain(
non_final_next_states
).max(1)[0]
gamma = 0.99
expected_state_action_values = (
gamma * next_state_values + reward_batch / reward_batch.max()
)
self.loss = torch.nn.MSELoss()(
expected_state_action_values.unsqueeze(-1), state_action_values
)
self.optimizer.zero_grad()
self.loss.backward()
self.optimizer.step()
| 31.533333 | 84 | 0.625088 | from dqn import DeepQNetwork, ReplayMemory, Transition
import torch
import numpy as np
class DQNAgent:
def __init__(self, inputs, n_actions):
self.brain = DeepQNetwork(inputs, 16, 16, outputNum=n_actions)
self.target_brain = DeepQNetwork(inputs, 16, 16, outputNum=n_actions)
self.target_brain.load_state_dict(self.brain.state_dict())
self.target_brain.eval()
self.set_params()
self.optimizer = torch.optim.Adam(self.brain.parameters())
self.memory = ReplayMemory(50000)
self.action_space = [0, 1]
def set_params(self):
self.batch_size = 64
self.max_exploration_rate = 1
self.min_exploration_rate = 0.05
self.exploration_decay_rate = 0.0005
self.steps_done = 0
def select_action(self, state):
sample = np.random.random()
exploration_rate = self.min_exploration_rate + (
self.max_exploration_rate - self.min_exploration_rate
) * np.exp(-self.steps_done * self.exploration_decay_rate)
self.steps_done += 1
if sample > exploration_rate:
with torch.no_grad():
actions = self.brain(state)
return torch.argmax(actions).item()
else:
return np.random.choice(self.action_space)
def learn(self):
if len(self.memory) < self.batch_size:
return
self.optimizer.zero_grad()
max_capacity = (
len(self.memory)
if len(self.memory) < self.memory.capacity
else self.memory.capacity
)
batch = np.random.choice(max_capacity, self.batch_size)
transitions = self.memory.sample(self.batch_size)
batch = Transition(*zip(*transitions))
non_final_mask = torch.tensor(
tuple(map(lambda s: s is not None, batch.next_state)), dtype=torch.bool,
)
non_final_next_states = torch.tensor(
[s for s in batch.next_state if s is not None]
)
state_batch = torch.tensor(batch.state)
action_batch = torch.tensor(batch.action)
reward_batch = torch.tensor(batch.reward, dtype=torch.float)
state_action_values = self.brain(state_batch).gather(
1, action_batch.unsqueeze(-1)
)
next_state_values = torch.zeros(self.batch_size)
next_state_values[non_final_mask] = self.target_brain(
non_final_next_states
).max(1)[0]
gamma = 0.99
expected_state_action_values = (
gamma * next_state_values + reward_batch / reward_batch.max()
)
self.loss = torch.nn.MSELoss()(
expected_state_action_values.unsqueeze(-1), state_action_values
)
self.optimizer.zero_grad()
self.loss.backward()
self.optimizer.step()
| true | true |
f7ffa4ca243d797dc52779a3b8f18154e9171231 | 3,438 | py | Python | src/instrumentum/model_tuning/old_wrapper_optuna.py | FedericoMontana/instrumentum | 0d07f6503c3c0fc980d349aeb6f47c960a4afe9c | [
"MIT"
] | 1 | 2022-02-22T17:27:39.000Z | 2022-02-22T17:27:39.000Z | src/instrumentum/model_tuning/old_wrapper_optuna.py | FedericoMontana/instrumentum | 0d07f6503c3c0fc980d349aeb6f47c960a4afe9c | [
"MIT"
] | 1 | 2021-12-03T21:43:42.000Z | 2021-12-03T21:43:42.000Z | src/instrumentum/model_tuning/old_wrapper_optuna.py | FedericoMontana/instrumentum | 0d07f6503c3c0fc980d349aeb6f47c960a4afe9c | [
"MIT"
] | null | null | null | import logging
import optuna
import optuna.integration.lightgbm as lgb
import pandas as pd
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import (RepeatedStratifiedKFold, StratifiedKFold,
cross_val_score)
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier, XGBRegressor, plot_importance
from instrumentum.model_tuning._optuna_dispatchers import optuna_param_disp
logger = logging.getLogger(__name__)
def _opt_generic_objective(X, y, trial, estimator, cv, metric):
param = optuna_param_disp[estimator.__name__](trial)
estimator = estimator(**param)
score = cross_val_score(estimator, X=X, y=y, cv=cv, scoring=metric).mean()
trial_n = len(trial.study.trials)
best_score = (
score
if trial_n == 1 or score > trial.study.best_value
else trial.study.best_value
)
logger.info("Trials: %s, Best Score: %s, Score %s", trial_n, best_score, score)
return score
def wrapper_opt(
X,
y,
estimator=None,
metric="roc_auc",
n_trials=5,
verbose=logging.INFO,
return_fit=True,
direction="maximize",
cv_splits=5,
cv_repeats=1,
):
# Our Logger
logger.setLevel(verbose)
# Let's turn off the verbosity of optuna
optuna.logging.set_verbosity(optuna.logging.ERROR)
cv = RepeatedStratifiedKFold(n_splits=cv_splits, n_repeats=cv_repeats)
estimator = estimator or DecisionTreeClassifier
logger.info("Estimator received: %s, trials: %s\n", estimator.__name__, n_trials)
study = optuna.create_study(direction=direction)
study.optimize(
lambda trial: _opt_generic_objective(
trial=trial,
X=X,
y=y,
estimator=estimator,
cv=cv,
metric=metric,
),
n_trials=n_trials,
)
estimator = estimator(**study.best_params)
return_fit and estimator.fit(X, y)
return study.best_trial.value, estimator
def wrapper_opt_lgbm(
X, y, metric="auc", time_budget=120, verbose=logging.INFO, return_fit=False
):
# Our Logger
logger.setLevel(verbose)
# Let's turn off the verbosity of optuna and lighgbm
optuna.logging.set_verbosity(optuna.logging.ERROR)
no_logger = logging.getLogger("sd")
no_logger.addHandler(logging.NullHandler())
lgb.register_logger(no_logger)
def log_trials(std, frz_trial):
logger.info(
"\nTrials: %s, Iteration Score: %s", len(std.trials), std.best_value
)
params = {
"objective": "binary",
"metric": metric,
"boosting_type": "gbdt",
"seed": 42,
}
dtrain = lgb.Dataset(X, label=y)
rkf = RepeatedStratifiedKFold(
n_splits=10,
n_repeats=2,
random_state=42,
)
study_tuner = optuna.create_study(direction="maximize")
tuner = lgb.LightGBMTunerCV(
params,
dtrain,
study=study_tuner,
time_budget=time_budget,
seed=42,
optuna_callbacks=[log_trials],
show_progress_bar=False,
folds=rkf,
)
tuner.run()
lgbm = LGBMClassifier(**tuner.best_params)
return_fit and lgbm.fit(X, y)
return tuner.best_score, lgbm
| 26.244275 | 85 | 0.672193 | import logging
import optuna
import optuna.integration.lightgbm as lgb
import pandas as pd
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import (RepeatedStratifiedKFold, StratifiedKFold,
cross_val_score)
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier, XGBRegressor, plot_importance
from instrumentum.model_tuning._optuna_dispatchers import optuna_param_disp
logger = logging.getLogger(__name__)
def _opt_generic_objective(X, y, trial, estimator, cv, metric):
param = optuna_param_disp[estimator.__name__](trial)
estimator = estimator(**param)
score = cross_val_score(estimator, X=X, y=y, cv=cv, scoring=metric).mean()
trial_n = len(trial.study.trials)
best_score = (
score
if trial_n == 1 or score > trial.study.best_value
else trial.study.best_value
)
logger.info("Trials: %s, Best Score: %s, Score %s", trial_n, best_score, score)
return score
def wrapper_opt(
X,
y,
estimator=None,
metric="roc_auc",
n_trials=5,
verbose=logging.INFO,
return_fit=True,
direction="maximize",
cv_splits=5,
cv_repeats=1,
):
logger.setLevel(verbose)
optuna.logging.set_verbosity(optuna.logging.ERROR)
cv = RepeatedStratifiedKFold(n_splits=cv_splits, n_repeats=cv_repeats)
estimator = estimator or DecisionTreeClassifier
logger.info("Estimator received: %s, trials: %s\n", estimator.__name__, n_trials)
study = optuna.create_study(direction=direction)
study.optimize(
lambda trial: _opt_generic_objective(
trial=trial,
X=X,
y=y,
estimator=estimator,
cv=cv,
metric=metric,
),
n_trials=n_trials,
)
estimator = estimator(**study.best_params)
return_fit and estimator.fit(X, y)
return study.best_trial.value, estimator
def wrapper_opt_lgbm(
X, y, metric="auc", time_budget=120, verbose=logging.INFO, return_fit=False
):
# Our Logger
logger.setLevel(verbose)
# Let's turn off the verbosity of optuna and lighgbm
optuna.logging.set_verbosity(optuna.logging.ERROR)
no_logger = logging.getLogger("sd")
no_logger.addHandler(logging.NullHandler())
lgb.register_logger(no_logger)
def log_trials(std, frz_trial):
logger.info(
"\nTrials: %s, Iteration Score: %s", len(std.trials), std.best_value
)
params = {
"objective": "binary",
"metric": metric,
"boosting_type": "gbdt",
"seed": 42,
}
dtrain = lgb.Dataset(X, label=y)
rkf = RepeatedStratifiedKFold(
n_splits=10,
n_repeats=2,
random_state=42,
)
study_tuner = optuna.create_study(direction="maximize")
tuner = lgb.LightGBMTunerCV(
params,
dtrain,
study=study_tuner,
time_budget=time_budget,
seed=42,
optuna_callbacks=[log_trials],
show_progress_bar=False,
folds=rkf,
)
tuner.run()
lgbm = LGBMClassifier(**tuner.best_params)
return_fit and lgbm.fit(X, y)
return tuner.best_score, lgbm
| true | true |
f7ffa53d57e8e39bd23112bc98ef09f1ba3b8a51 | 216 | py | Python | ltr/helpers/handle_resp.py | tanjie123/hello-ltr | fe1ad1989e1bb17dfc8d1c09931480becf59766e | [
"Apache-2.0"
] | 109 | 2019-04-18T01:24:29.000Z | 2022-03-12T17:37:30.000Z | ltr/helpers/handle_resp.py | tanjie123/hello-ltr | fe1ad1989e1bb17dfc8d1c09931480becf59766e | [
"Apache-2.0"
] | 63 | 2019-04-14T01:01:24.000Z | 2022-03-03T20:48:41.000Z | ltr/helpers/handle_resp.py | tanjie123/hello-ltr | fe1ad1989e1bb17dfc8d1c09931480becf59766e | [
"Apache-2.0"
] | 41 | 2019-04-22T15:22:41.000Z | 2022-02-26T00:03:02.000Z |
def resp_msg(msg, resp, throw=True):
print('{} [Status: {}]'.format(msg, resp.status_code))
if resp.status_code >= 400:
print(resp.text)
if throw:
raise RuntimeError(resp.text)
| 21.6 | 58 | 0.587963 |
def resp_msg(msg, resp, throw=True):
print('{} [Status: {}]'.format(msg, resp.status_code))
if resp.status_code >= 400:
print(resp.text)
if throw:
raise RuntimeError(resp.text)
| true | true |
f7ffa5c86ee2c07884c76763f6aff99c38be46ba | 303 | py | Python | utils_nlp/dataset/__init__.py | youtanyouzhen/nlp | 06b2be2c2dad3a28c8ceb408c2f6838cd00c13e0 | [
"MIT"
] | null | null | null | utils_nlp/dataset/__init__.py | youtanyouzhen/nlp | 06b2be2c2dad3a28c8ceb408c2f6838cd00c13e0 | [
"MIT"
] | null | null | null | utils_nlp/dataset/__init__.py | youtanyouzhen/nlp | 06b2be2c2dad3a28c8ceb408c2f6838cd00c13e0 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from enum import Enum
import nltk
# nltk.download("punkt", quiet=True)
# nltk.download("stopwords", quiet=True)
class Split(str, Enum):
TRAIN: str = "train"
DEV: str = "dev"
TEST: str = "test"
| 20.2 | 59 | 0.683168 |
from enum import Enum
import nltk
class Split(str, Enum):
TRAIN: str = "train"
DEV: str = "dev"
TEST: str = "test"
| true | true |
f7ffa5d4dd241723b403921aa4bac015b8c379a6 | 8,322 | py | Python | test/test_report.py | OSM-es/CatAtom2Osm | 89394161b07c5dd7cd2843bce8f911b93796b33a | [
"BSD-2-Clause"
] | 8 | 2018-01-30T18:26:29.000Z | 2022-02-03T19:16:54.000Z | test/test_report.py | OSM-es/CatAtom2Osm | 89394161b07c5dd7cd2843bce8f911b93796b33a | [
"BSD-2-Clause"
] | 99 | 2018-01-22T08:37:53.000Z | 2022-03-28T13:50:06.000Z | test/test_report.py | OSM-es/CatAtom2Osm | 89394161b07c5dd7cd2843bce8f911b93796b33a | [
"BSD-2-Clause"
] | 9 | 2018-01-21T15:51:41.000Z | 2022-01-04T18:35:19.000Z | # -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
from builtins import str
import io
import mock
import unittest
import os
import locale
from collections import Counter
from datetime import datetime
from catatom2osm import config, osm, report
from catatom2osm.compat import install_gettext
os.environ['LANGUAGE'] = 'C'
install_gettext('catato2osm', '')
class TestReport(unittest.TestCase):
def test_init(self):
r = report.Report(foo = 'bar')
self.assertEqual(r.foo, 'bar')
def test_setattr(self):
r = report.Report()
r.mun_name = 'foobar'
self.assertEqual(r.values['mun_name'], 'foobar')
def test_getattr(self):
r = report.Report()
r.values['mun_name'] = 'foobar'
self.assertEqual(r.mun_name, 'foobar')
def test_get(self):
r = report.Report()
self.assertEqual(r.get('foo', 'bar'), 'bar')
self.assertEqual(r.get('bar'), 0)
def test_inc(self):
r = report.Report()
r.inc('foo')
self.assertEqual(r.foo, 1)
r.inc('foo', 2)
self.assertEqual(r.foo, 3)
def test_validate1(self):
r = report.Report()
r.inp_address_entrance = 6
r.inp_address_parcel = 4
r.inp_address = 10
r.addresses_without_number = 1
r.orphand_addresses = 2
r.multiple_addresses = 1
r.refused_addresses = 2
r.out_address_entrance = 2
r.out_address_building = 2
r.out_addr_str = 3
r.out_addr_plc = 1
r.out_address = 4
r.inp_features = 6
r.inp_buildings = 2
r.inp_parts = 3
r.inp_pools = 1
r.building_counter = {'a': 1, 'b': 2}
r.out_buildings = 3
r.out_features = 8
r.orphand_parts = 1
r.underground_parts = 1
r.new_outlines = 2
r.multipart_geoms_building = 2
r.exploded_parts_building = 4
r.validate()
self.assertEqual(len(r.errors), 0)
def test_validate2(self):
r = report.Report()
r.inp_address_entrance = 1
r.inp_address_parcel = 2
r.inp_address = 4
r.addresses_without_number = 1
r.orphand_addresses = 1
r.multiple_addresses = 1
r.refused_addresses = 1
r.out_address_entrance = 1
r.out_address_building = 2
r.out_addr_str = 1
r.out_addr_plc = 2
r.out_address = 4
r.inp_features = 7
r.inp_buildings = 2
r.inp_parts = 3
r.inp_pools = 1
r.building_counter = {'a': 1, 'b': 2}
r.out_buildings = 4
r.out_features = 8
r.validate()
msgs = [
"Sum of address types should be equal to the input addresses",
"Sum of output and deleted addresses should be equal to the input addresses",
"Sum of entrance and building address should be equal to output addresses",
"Sum of street and place addresses should be equal to output addresses",
"Sum of buildings, parts and pools should be equal to the feature count",
"Sum of building types should be equal to the number of buildings",
"Sum of output and deleted minus created building features should be equal to input features"
]
for msg in msgs:
self.assertIn(msg, r.errors)
def test_to_string0(self):
r = report.Report()
output = r.to_string()
expected = "Date: " + datetime.now().strftime('%x') + config.eol
self.assertEqual(output, expected)
def test_to_string1(self):
r = report.Report()
r.mun_name = 'Foobar'
r.code = 99999
r.inp_zip_codes = 1000
r.fixmes = []
output = r.to_string()
expected = u"Municipality: Foobar" + config.eol \
+ "Date: " + datetime.now().strftime('%x') + config.eol + config.eol \
+ "=Addresses=" + config.eol + config.eol \
+ "==Input data==" + config.eol \
+ "Postal codes: " + report.int_format(1000) + config.eol \
+ config.eol + config.fixme_doc_url
self.assertEqual(output, expected)
def test_to_string2(self):
r = report.Report()
r.fixme_count = 2
r.fixmes = ['f1', 'f2']
r.warnings = ['w1', 'w2']
output = r.to_string()
expected = u"Date: " + datetime.now().strftime('%x') + config.eol \
+ config.eol + "=Problems=" + config.eol \
+ "Fixmes: 2" + config.eol \
+ report.TAB + "f1" + config.eol + report.TAB + "f2" + config.eol \
+ "Warnings: 2" + config.eol \
+ report.TAB + "w1" + config.eol + report.TAB + "w2" + config.eol \
+ config.eol + config.fixme_doc_url
self.assertEqual(output, expected)
def test_to_string3(self):
r = report.Report(sys_info=True)
output = r.to_string()
expected = locale.format_string("Execution time: %.1f seconds", r.ex_time, 1)
self.assertIn(expected, output)
def test_to_file(self):
r = report.Report()
r.mun_name = "áéíóúñ"
output = r.to_string()
fn = 'test_report.txt'
r.to_file(fn)
with io.open(fn, 'r', encoding=config.encoding) as fo:
text = str(fo.read())
text = text.replace('\n\n', config.eol)
self.assertEqual(output, text)
if os.path.exists(fn):
os.remove(fn)
def test_address_stats(self):
ad = osm.Osm()
ad.Node(0,0, {'addr:street': 's1'})
ad.Node(2,0, {'addr:street': 's2', 'entrance': 'yes'})
ad.Node(4,0, {'addr:place': 'p1', 'entrance': 'yes'})
ad.Way([], {'addr:street': 's3'})
r = report.Report()
r.address_stats(ad)
self.assertEqual(r.out_addr_str, 3)
self.assertEqual(r.out_addr_plc, 1)
self.assertEqual(r.out_address_entrance, 2)
self.assertEqual(r.out_address_building, 2)
def test_cons_end_stats(self):
r = report.Report()
r.max_level = {'a': 1, 'b': 2, 'c': 2}
r.min_level = {'a': 1, 'b': 1, 'c': 2}
r.building_counter = {'a': 1, 'b': 2}
r.cons_end_stats()
self.assertEqual(set(r.dlag.split(', ')), set('1: 1, 2: 2'.split(', ')))
self.assertEqual(set(r.dlbg.split(', ')), set('1: 2, 2: 1'.split(', ')))
self.assertEqual(set(r.building_types.split(', ')), set('a: 1, b: 2'.split(', ')))
def test_cons_stats(self):
r = report.Report()
r.building_counter = Counter()
data = osm.Osm()
data.Node(0,0, {'leisure': 'swimming_pool'})
data.Node(0,0, {'building': 'a', 'fixme': 'f1'})
data.Node(0,0, {'building': 'b', 'fixme': 'f2'})
data.Node(0,0, {'building:part': 'yes', 'fixme': 'f2'})
data.Node(0,0)
r.cons_stats(data)
self.assertEqual(r.out_pools, 1)
self.assertEqual(r.out_buildings, 2)
self.assertEqual(r.out_parts, 1)
self.assertEqual(r.building_counter['a'], 1)
self.assertEqual(r.building_counter['b'], 1)
self.assertEqual(r.fixme_counter['f1'], 1)
self.assertEqual(r.fixme_counter['f2'], 2)
def test_fixme_stats(self):
r = report.Report()
r.fixme_counter = {}
r.fixme_stats()
self.assertEqual(r.fixme_stats(), 0)
r.fixme_counter = {'a': 1, 'b': 2}
r.fixme_stats()
self.assertEqual(r.fixme_stats(), 3)
self.assertEqual(len(r.fixmes), 2)
@mock.patch('catatom2osm.report.io')
def test_from_file(self, m_io):
r = report.Report()
t = (
"Municipality: foobar\n"
"Code: 12345\n"
"Application version: taz\n"
"=Addresses=\n"
"==Input data==\n"
"Source date: 2021-09-11\n"
"=Buildings=\n"
"==Input data==\n"
"Source date: 2021-06-22\n"
)
m_io.open.return_value = io.StringIO(t)
r.from_file('')
self.assertEqual(r.mun_name, 'foobar')
self.assertEqual(r.mun_code, '12345')
self.assertEqual(r.app_version, 'taz')
self.assertEqual(r.building_date, '2021-06-22')
self.assertEqual(r.address_date, '2021-09-11') | 35.412766 | 105 | 0.563927 |
from __future__ import division, unicode_literals
from builtins import str
import io
import mock
import unittest
import os
import locale
from collections import Counter
from datetime import datetime
from catatom2osm import config, osm, report
from catatom2osm.compat import install_gettext
os.environ['LANGUAGE'] = 'C'
install_gettext('catato2osm', '')
class TestReport(unittest.TestCase):
def test_init(self):
r = report.Report(foo = 'bar')
self.assertEqual(r.foo, 'bar')
def test_setattr(self):
r = report.Report()
r.mun_name = 'foobar'
self.assertEqual(r.values['mun_name'], 'foobar')
def test_getattr(self):
r = report.Report()
r.values['mun_name'] = 'foobar'
self.assertEqual(r.mun_name, 'foobar')
def test_get(self):
r = report.Report()
self.assertEqual(r.get('foo', 'bar'), 'bar')
self.assertEqual(r.get('bar'), 0)
def test_inc(self):
r = report.Report()
r.inc('foo')
self.assertEqual(r.foo, 1)
r.inc('foo', 2)
self.assertEqual(r.foo, 3)
def test_validate1(self):
r = report.Report()
r.inp_address_entrance = 6
r.inp_address_parcel = 4
r.inp_address = 10
r.addresses_without_number = 1
r.orphand_addresses = 2
r.multiple_addresses = 1
r.refused_addresses = 2
r.out_address_entrance = 2
r.out_address_building = 2
r.out_addr_str = 3
r.out_addr_plc = 1
r.out_address = 4
r.inp_features = 6
r.inp_buildings = 2
r.inp_parts = 3
r.inp_pools = 1
r.building_counter = {'a': 1, 'b': 2}
r.out_buildings = 3
r.out_features = 8
r.orphand_parts = 1
r.underground_parts = 1
r.new_outlines = 2
r.multipart_geoms_building = 2
r.exploded_parts_building = 4
r.validate()
self.assertEqual(len(r.errors), 0)
def test_validate2(self):
r = report.Report()
r.inp_address_entrance = 1
r.inp_address_parcel = 2
r.inp_address = 4
r.addresses_without_number = 1
r.orphand_addresses = 1
r.multiple_addresses = 1
r.refused_addresses = 1
r.out_address_entrance = 1
r.out_address_building = 2
r.out_addr_str = 1
r.out_addr_plc = 2
r.out_address = 4
r.inp_features = 7
r.inp_buildings = 2
r.inp_parts = 3
r.inp_pools = 1
r.building_counter = {'a': 1, 'b': 2}
r.out_buildings = 4
r.out_features = 8
r.validate()
msgs = [
"Sum of address types should be equal to the input addresses",
"Sum of output and deleted addresses should be equal to the input addresses",
"Sum of entrance and building address should be equal to output addresses",
"Sum of street and place addresses should be equal to output addresses",
"Sum of buildings, parts and pools should be equal to the feature count",
"Sum of building types should be equal to the number of buildings",
"Sum of output and deleted minus created building features should be equal to input features"
]
for msg in msgs:
self.assertIn(msg, r.errors)
def test_to_string0(self):
r = report.Report()
output = r.to_string()
expected = "Date: " + datetime.now().strftime('%x') + config.eol
self.assertEqual(output, expected)
def test_to_string1(self):
r = report.Report()
r.mun_name = 'Foobar'
r.code = 99999
r.inp_zip_codes = 1000
r.fixmes = []
output = r.to_string()
expected = u"Municipality: Foobar" + config.eol \
+ "Date: " + datetime.now().strftime('%x') + config.eol + config.eol \
+ "=Addresses=" + config.eol + config.eol \
+ "==Input data==" + config.eol \
+ "Postal codes: " + report.int_format(1000) + config.eol \
+ config.eol + config.fixme_doc_url
self.assertEqual(output, expected)
def test_to_string2(self):
r = report.Report()
r.fixme_count = 2
r.fixmes = ['f1', 'f2']
r.warnings = ['w1', 'w2']
output = r.to_string()
expected = u"Date: " + datetime.now().strftime('%x') + config.eol \
+ config.eol + "=Problems=" + config.eol \
+ "Fixmes: 2" + config.eol \
+ report.TAB + "f1" + config.eol + report.TAB + "f2" + config.eol \
+ "Warnings: 2" + config.eol \
+ report.TAB + "w1" + config.eol + report.TAB + "w2" + config.eol \
+ config.eol + config.fixme_doc_url
self.assertEqual(output, expected)
def test_to_string3(self):
r = report.Report(sys_info=True)
output = r.to_string()
expected = locale.format_string("Execution time: %.1f seconds", r.ex_time, 1)
self.assertIn(expected, output)
def test_to_file(self):
r = report.Report()
r.mun_name = "áéíóúñ"
output = r.to_string()
fn = 'test_report.txt'
r.to_file(fn)
with io.open(fn, 'r', encoding=config.encoding) as fo:
text = str(fo.read())
text = text.replace('\n\n', config.eol)
self.assertEqual(output, text)
if os.path.exists(fn):
os.remove(fn)
def test_address_stats(self):
ad = osm.Osm()
ad.Node(0,0, {'addr:street': 's1'})
ad.Node(2,0, {'addr:street': 's2', 'entrance': 'yes'})
ad.Node(4,0, {'addr:place': 'p1', 'entrance': 'yes'})
ad.Way([], {'addr:street': 's3'})
r = report.Report()
r.address_stats(ad)
self.assertEqual(r.out_addr_str, 3)
self.assertEqual(r.out_addr_plc, 1)
self.assertEqual(r.out_address_entrance, 2)
self.assertEqual(r.out_address_building, 2)
def test_cons_end_stats(self):
r = report.Report()
r.max_level = {'a': 1, 'b': 2, 'c': 2}
r.min_level = {'a': 1, 'b': 1, 'c': 2}
r.building_counter = {'a': 1, 'b': 2}
r.cons_end_stats()
self.assertEqual(set(r.dlag.split(', ')), set('1: 1, 2: 2'.split(', ')))
self.assertEqual(set(r.dlbg.split(', ')), set('1: 2, 2: 1'.split(', ')))
self.assertEqual(set(r.building_types.split(', ')), set('a: 1, b: 2'.split(', ')))
def test_cons_stats(self):
r = report.Report()
r.building_counter = Counter()
data = osm.Osm()
data.Node(0,0, {'leisure': 'swimming_pool'})
data.Node(0,0, {'building': 'a', 'fixme': 'f1'})
data.Node(0,0, {'building': 'b', 'fixme': 'f2'})
data.Node(0,0, {'building:part': 'yes', 'fixme': 'f2'})
data.Node(0,0)
r.cons_stats(data)
self.assertEqual(r.out_pools, 1)
self.assertEqual(r.out_buildings, 2)
self.assertEqual(r.out_parts, 1)
self.assertEqual(r.building_counter['a'], 1)
self.assertEqual(r.building_counter['b'], 1)
self.assertEqual(r.fixme_counter['f1'], 1)
self.assertEqual(r.fixme_counter['f2'], 2)
def test_fixme_stats(self):
r = report.Report()
r.fixme_counter = {}
r.fixme_stats()
self.assertEqual(r.fixme_stats(), 0)
r.fixme_counter = {'a': 1, 'b': 2}
r.fixme_stats()
self.assertEqual(r.fixme_stats(), 3)
self.assertEqual(len(r.fixmes), 2)
@mock.patch('catatom2osm.report.io')
def test_from_file(self, m_io):
r = report.Report()
t = (
"Municipality: foobar\n"
"Code: 12345\n"
"Application version: taz\n"
"=Addresses=\n"
"==Input data==\n"
"Source date: 2021-09-11\n"
"=Buildings=\n"
"==Input data==\n"
"Source date: 2021-06-22\n"
)
m_io.open.return_value = io.StringIO(t)
r.from_file('')
self.assertEqual(r.mun_name, 'foobar')
self.assertEqual(r.mun_code, '12345')
self.assertEqual(r.app_version, 'taz')
self.assertEqual(r.building_date, '2021-06-22')
self.assertEqual(r.address_date, '2021-09-11') | true | true |
f7ffa73921d29299350bfec3addf3379899c9441 | 791 | py | Python | biodata/api/serializers.py | znatty22/biodataservice | a3eeb137d2e727a0fc58437b185f2637bc4665ed | [
"Apache-2.0"
] | null | null | null | biodata/api/serializers.py | znatty22/biodataservice | a3eeb137d2e727a0fc58437b185f2637bc4665ed | [
"Apache-2.0"
] | null | null | null | biodata/api/serializers.py | znatty22/biodataservice | a3eeb137d2e727a0fc58437b185f2637bc4665ed | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from biodata.api import models as m
COMMON_FIELDS = ['kf_id', 'created', 'modified']
class StudySerializer(serializers.ModelSerializer):
class Meta:
model = m.Study
fields = COMMON_FIELDS + ['name', 'short_name', 'participants']
read_only_fields = ['participants']
class ParticipantSerializer(serializers.ModelSerializer):
class Meta:
model = m.Participant
fields = COMMON_FIELDS + [
'gender', 'race', 'ethnicity', 'study', 'biospecimens',
]
read_only_fields = ['biospecimens']
class BiospecimenSerializer(serializers.ModelSerializer):
class Meta:
model = m.Biospecimen
fields = COMMON_FIELDS + [
'analyte_type', 'participant',
]
| 28.25 | 71 | 0.653603 | from rest_framework import serializers
from biodata.api import models as m
COMMON_FIELDS = ['kf_id', 'created', 'modified']
class StudySerializer(serializers.ModelSerializer):
class Meta:
model = m.Study
fields = COMMON_FIELDS + ['name', 'short_name', 'participants']
read_only_fields = ['participants']
class ParticipantSerializer(serializers.ModelSerializer):
class Meta:
model = m.Participant
fields = COMMON_FIELDS + [
'gender', 'race', 'ethnicity', 'study', 'biospecimens',
]
read_only_fields = ['biospecimens']
class BiospecimenSerializer(serializers.ModelSerializer):
class Meta:
model = m.Biospecimen
fields = COMMON_FIELDS + [
'analyte_type', 'participant',
]
| true | true |
f7ffa7e780b0a050b3a879c095cc31bd8786485a | 4,884 | py | Python | conf.py | tabshaikh/documentation | 96f1e169eaccbb22799cc6bbdf6045e726ff8bb6 | [
"CC-BY-4.0"
] | null | null | null | conf.py | tabshaikh/documentation | 96f1e169eaccbb22799cc6bbdf6045e726ff8bb6 | [
"CC-BY-4.0"
] | null | null | null | conf.py | tabshaikh/documentation | 96f1e169eaccbb22799cc6bbdf6045e726ff8bb6 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sphinx_rtd_theme
#
# Open States API documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 9 13:39:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Open States Documentation'
copyright = '2017, Open States'
author = 'Open States'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenStatesAPIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'OpenStatesAPI.tex', 'Open States API Documentation',
'Open States', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'openstatesapi', 'Open States API Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'OpenStatesAPI', 'Open States API Documentation',
author, 'OpenStatesAPI', 'One line description of project.',
'Miscellaneous'),
]
| 30.335404 | 79 | 0.685299 |
import sphinx_rtd_theme
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Open States Documentation'
copyright = '2017, Open States'
author = 'Open States'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenStatesAPIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'OpenStatesAPI.tex', 'Open States API Documentation',
'Open States', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'openstatesapi', 'Open States API Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'OpenStatesAPI', 'Open States API Documentation',
author, 'OpenStatesAPI', 'One line description of project.',
'Miscellaneous'),
]
| true | true |
f7ffa93719f2d814ac9ccb8a67449230dd793043 | 225 | py | Python | tweets/serializers.py | bewallyt/Classy | f4a672f759da862b37b3980151c37cb0d05504d6 | [
"MIT"
] | null | null | null | tweets/serializers.py | bewallyt/Classy | f4a672f759da862b37b3980151c37cb0d05504d6 | [
"MIT"
] | null | null | null | tweets/serializers.py | bewallyt/Classy | f4a672f759da862b37b3980151c37cb0d05504d6 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from tweets.models import Tweet
class TweetSerializer(serializers.ModelSerializer):
class Meta:
model = Tweet
fields = ('content', 'date', 'author', 'hashtag')
| 17.307692 | 57 | 0.697778 | from rest_framework import serializers
from tweets.models import Tweet
class TweetSerializer(serializers.ModelSerializer):
class Meta:
model = Tweet
fields = ('content', 'date', 'author', 'hashtag')
| true | true |
f7ffaa5d9be644df19e50b7aa9febcafe18805dc | 337 | py | Python | src/setup.py | jesserockz/python-iglo | f36454b6ed1131af8d432c1b3dbe21df80371d93 | [
"MIT"
] | null | null | null | src/setup.py | jesserockz/python-iglo | f36454b6ed1131af8d432c1b3dbe21df80371d93 | [
"MIT"
] | 1 | 2019-11-23T09:23:01.000Z | 2019-11-26T09:19:46.000Z | src/setup.py | jesserockz/python-iglo | f36454b6ed1131af8d432c1b3dbe21df80371d93 | [
"MIT"
] | 3 | 2018-03-18T10:52:11.000Z | 2021-01-24T21:13:35.000Z | from setuptools import setup, find_packages
setup(name='iglo',
version='1.2.7',
description='Control iGlo based RGB lights',
url='http://github.com/jesserockz/python-iglo',
author='Jesse Hills',
license='MIT',
install_requires=[],
packages=find_packages(exclude=["dist"]),
zip_safe=True)
| 28.083333 | 53 | 0.649852 | from setuptools import setup, find_packages
setup(name='iglo',
version='1.2.7',
description='Control iGlo based RGB lights',
url='http://github.com/jesserockz/python-iglo',
author='Jesse Hills',
license='MIT',
install_requires=[],
packages=find_packages(exclude=["dist"]),
zip_safe=True)
| true | true |
f7ffaa66df0debc9ff7967d63cc13577e329ddd4 | 2,052 | py | Python | lifelist/api/serializers.py | andela-mnzomo/life-list | 28a7fa9d16e2b322e4a1bce269dbe7331e783534 | [
"Unlicense"
] | 3 | 2017-08-17T07:12:03.000Z | 2017-10-18T11:13:44.000Z | lifelist/api/serializers.py | andela-mnzomo/life-list | 28a7fa9d16e2b322e4a1bce269dbe7331e783534 | [
"Unlicense"
] | 1 | 2018-05-30T14:38:52.000Z | 2018-05-30T14:38:52.000Z | lifelist/api/serializers.py | andela-mnzomo/life-list | 28a7fa9d16e2b322e4a1bce269dbe7331e783534 | [
"Unlicense"
] | null | null | null | from api.models import Bucketlist, Item
from django.contrib.auth.models import User
from rest_framework import serializers
class ItemSerializer(serializers.ModelSerializer):
item_bucketlist = serializers.StringRelatedField(
read_only=True)
date_created = serializers.DateTimeField(
format='%d.%m.%Y %H:%M',
required=False,
read_only=True)
date_modified = serializers.DateTimeField(
format='%d.%m.%Y %H:%M',
required=False)
class Meta:
model = Item
fields = ("id", "item_bucketlist", "title", "description",
"date_created", "date_modified", "is_done")
class BucketlistSerializer(serializers.ModelSerializer):
created_by = serializers.SlugRelatedField(
read_only=True,
slug_field='username')
items = ItemSerializer(
many=True,
read_only=True)
date_created = serializers.DateTimeField(
format='%d.%m.%Y %H:%M',
required=False,
read_only=True)
date_modified = serializers.DateTimeField(
format='%d.%m.%Y %H:%M',
required=False)
class Meta:
model = Bucketlist
fields = ("id", "created_by", "title", "description",
"items", "date_created", "date_modified")
class UserSerializer(serializers.ModelSerializer):
bucketlists = serializers.StringRelatedField(
read_only=True,
many=True)
email = serializers.EmailField(
max_length=None,
min_length=None,
allow_blank=False)
password = serializers.CharField(
max_length=100,
style={'input_type': 'password'},
required=True,
write_only=True)
def create(self, validated_data):
user = User(username=validated_data['username'],
email=validated_data['email'])
user.set_password(validated_data['password'])
user.save()
return user
class Meta:
model = User
fields = ("id", "username", "email", "password", "bucketlists")
| 26.649351 | 71 | 0.623782 | from api.models import Bucketlist, Item
from django.contrib.auth.models import User
from rest_framework import serializers
class ItemSerializer(serializers.ModelSerializer):
item_bucketlist = serializers.StringRelatedField(
read_only=True)
date_created = serializers.DateTimeField(
format='%d.%m.%Y %H:%M',
required=False,
read_only=True)
date_modified = serializers.DateTimeField(
format='%d.%m.%Y %H:%M',
required=False)
class Meta:
model = Item
fields = ("id", "item_bucketlist", "title", "description",
"date_created", "date_modified", "is_done")
class BucketlistSerializer(serializers.ModelSerializer):
created_by = serializers.SlugRelatedField(
read_only=True,
slug_field='username')
items = ItemSerializer(
many=True,
read_only=True)
date_created = serializers.DateTimeField(
format='%d.%m.%Y %H:%M',
required=False,
read_only=True)
date_modified = serializers.DateTimeField(
format='%d.%m.%Y %H:%M',
required=False)
class Meta:
model = Bucketlist
fields = ("id", "created_by", "title", "description",
"items", "date_created", "date_modified")
class UserSerializer(serializers.ModelSerializer):
bucketlists = serializers.StringRelatedField(
read_only=True,
many=True)
email = serializers.EmailField(
max_length=None,
min_length=None,
allow_blank=False)
password = serializers.CharField(
max_length=100,
style={'input_type': 'password'},
required=True,
write_only=True)
def create(self, validated_data):
user = User(username=validated_data['username'],
email=validated_data['email'])
user.set_password(validated_data['password'])
user.save()
return user
class Meta:
model = User
fields = ("id", "username", "email", "password", "bucketlists")
| true | true |
f7ffaa8f5188de0e09d241c1c18a144b4e55ab5d | 9,110 | py | Python | service/config/config.py | Nansir0808/kamiFaka | 5deb2e907437e280cb73e05b3e6ec2cbc115904f | [
"MIT"
] | null | null | null | service/config/config.py | Nansir0808/kamiFaka | 5deb2e907437e280cb73e05b3e6ec2cbc115904f | [
"MIT"
] | null | null | null | service/config/config.py | Nansir0808/kamiFaka | 5deb2e907437e280cb73e05b3e6ec2cbc115904f | [
"MIT"
] | null | null | null | from service.database.models import *
from service.api.db import db
# 经验:先用true或false
def init_db(update=False):
# 管理员信息
db.session.add(AdminUser('admin@qq.com','$2b$12$BKSXKYuCgeXjr8IEbK02re0VhkFoAz7f3aHF3kYAMLzYaEiObqPYm'))
# 邮箱配置
# db.session.add(Smtp('demo@qq.com','卡密发卡网','smtp.qq.com','465','xxxxxxxxx',True))
# 支付渠道
db.session.add(Payment('支付宝当面付','支付宝',"{'APPID':'2016091800537528','alipay_public_key':'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4AHTfGleo8WI3qb+mSWOjJRyn6Vh8XvO6YsQmJjPnNKhvACHTHcU+PCUWUKZ54fSVhMkFZEQWMtAGeOt3lGy3pMBS96anh841gxJc2NUljU14ESXnDn4QdVe4bosmYvfko46wfA0fGClHdpO8UUiJGLj1W5alv10CwiCrYRDtx93SLIuQgwJn4yBC1/kE/KENOaWaA45dXIQvKh2P0lTbm0AvwYMVvYB+eB1GtOGQbuFJXUxWaMa0byTo9wSllhgyiIkOH+HJ9oOZIweGlsrezeUUdr3EEX97k25LdnUt/oQK8FIfthexfWZpTDDlHqmI7p6gCtRVDJenU4sxwpEyQIDAQAB','app_private_key':'MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCqWmxsyPLwRmZHwoLYlUJXMF7PATKtvp7BrJfwLbxwrz6I48G11HpPPyAoNynwAMG7DCXjVX76NCbmfvvPqnbk09rNRULqGju8G6NkQTbLfDjhJs+CE8kdIs89btxqDG70ebePiZTGpQngPLfrziKDOhRfXkA5qRPImbC+PUXiXq9qvkp9Yu/8IYjyxUpNBNjZuTK+fTjSI0RCt7eE+wR0KqpNIzot1q/ds1KTIYmJQM5tEFie4BK0pDtGiIs/VrUG8PTPqLyzEyIMy1N75olUWAiGrk0USqiieP3TYj0PdlQDX2T14DOwMkl5Rjvt7Knc+WGdolPIBssUX1wTE+J7AgMBAAECggEAWpRP+Jv0yRu1wMxFRKJArxmSH+GUL9wej/6Un2nCO+yChMkNtAAxtLdtAtUqIGpWmH2CG9nW9XULhh3ZCPer1kprmiAMz2t5fbD4dRNT7miz2cwIJDMfCbX7mb+7xUutJ6Mcnl7aU7FnierfJKvrn/ke4gK8haxIT66g0tbDtPQhYnGPawyM+gqFulaMBcuqH0naAIq5ZBWHkKuuwJ1SD6yGrWgHdq3Kt2pE8b9yjfdUl15IeW0rszXG6fTika9WX6qaulyoGAAZdjiXED+mbRyqZA3jq7RI38qBP9+/jAb+fdwE8EwqnpPvfGHMBdkREOXK0kzRU8rpd9GbH7INaQKBgQDwpuW+bK/qxKx3BSAXL98f0J2I7YVuk0EFCStGoxnzWRv0yvL0QEDwN+QPiVMmcVQcr79mW5zTBkd4vmr3ud+v1f/X6UPI82kQhZlVWry8LEnisPlZuE0E/EaJrLgF7z4l3ItzCVi8IfpgizPcCYSz/vY49a5W34eKjXHWUB1jDwKBgQC1N8PgGKI2LRDaJeqt5Ef6yyYSMOgVe0WSqAlgyMECb1pjmMBjcNG1AFE/FfgNu4thOaXIogElGVoQFvA5GuJQY48HOJNgx3Ua2SxiowcXkAN0gIm4FY+ozkp7xhizvLVfsmX+MKqPtl6nggiWETJJyvMQnjMgKLmSvhsopMwZ1QKBgGV36az2BOK3VITGq3Y7YBf5DUN76uPpwOOPryiUgs+hhfEcVX55TSg8WLPYUjAGXtHNpKVTAXfU0PPvTgjv3Yo1cC+okkU7pNQrkLB1lti8z9Z+ilSzKf5tJIzOP7V437p1GHNDwJ9qsDhe2VnwxXxjh4wSwxSsIWlhJFuZ4hovAoGAFgm8Fmqof3InlH/79D3IyyUdciTkdIhTQ6yPx2dioYstMOOIsg8sUZjCSKvBSNo/7wj1slqRTROyMja37Bnq39/bqwMkWSaohSVYEn7FBAaNhQOEvBBTMjI0OK00n9cZL5QgdzMv6t5A0JottSJOPU8jFChJC2Yoe0IHR4ATGikCgYB2smi7/ptKiGdwmiuUHsF/U3jfjpHyHwLrXjoSU+mwV+GjqcdbtkSP1suGjN8tcdbFvLSCRX/IRdFHYJeuPUXQtZtiC431+upasbEiJ1xZ2KcK3lKf0mOn10kPD5QC7mmsfmjz4cw9cSrBjmcWGXeIwIXPLhOAAIzpHqy8oP/F/g=='}",'alipay.com 官方接口0.38~0.6%',True))
db.session.add(Payment('微信官方接口','微信支付',"{'APPID':'XXXXXXXX','MCH_ID':'XXXXXX','APP_SECRET':'XXXXXX'}",'pay.weixin.qq.com 微信官方0.38%需要营业执照',False))
db.session.add(Payment('QQ钱包','QQ支付',"{'mch_id':'XXXXXXXX','key':'YYYYY'}",'mp.qpay.tenpay.com QQ官方0.6%需要营业执照',False))
db.session.add(Payment('虎皮椒支付宝','支付宝',"{'API':'api.vrmrgame.com','appid':'XXXXXX','AppSecret':'YYYYY'}",'xunhupay.com 个人接口0.38%+1~2%',False))
db.session.add(Payment('虎皮椒微信','微信支付',"{'API':'api.vrmrgame.com','appid':'XXXXXX','AppSecret':'YYYYY'}",'xunhupay.com 个人接口0.38~0.6%+1~2%',False))
db.session.add(Payment('PAYJS支付宝','支付宝',"{'payjs_key':'XXXXXX','mchid':'YYYYY','mchid':'ZZZZZZZ'}",'payjs.cn 个人接口2.38%',False))
db.session.add(Payment('PAYJS微信','微信支付',"{'payjs_key':'XXXXXX','mchid':'YYYYY','mchid':'ZZZZZZZ'}",'payjs.cn 个人接口2.38%',False))
db.session.add(Payment('码支付支付宝','支付宝',"{'codepay_id':'58027','codepay_key':'fgl454542WSDJHEJHDJZpTRrmbn','token':'jljCGU3pRvXXXXXXXXXXXb1iq'}",'codepay.fateqq.com[不可用]',False))
db.session.add(Payment('码支付微信','微信支付',"{'codepay_id':'58027','codepay_key':'fgl454542WSDJHEJHDJZpTRrmbn','token':'jljCGU3pRvXXXXXXXXXXXb1iq'}",'codepay.fateqq.com[不可用]',False))
db.session.add(Payment('码支付QQ','QQ支付',"{'codepay_id':'58027','codepay_key':'fgl454542WSDJHEJHDJZpTRrmbn','token':'jljCGU3pRvXXXXXXXXXXXb1iq'}",'codepay.fateqq.com[不可用]',False))
db.session.add(Payment('V免签支付宝','支付宝',"{'API':'http://google.com','KEY':'YYYYYYYY'}",'0费率实时到账',False))
db.session.add(Payment('V免签微信','微信',"{'API':'http://google.com','KEY':'YYYYYYYY'}",'0费率实时到账',False))
db.session.add(Payment('易支付','易支付',"{'API':'http://google.com','ID':'XXXXX','KEY':'YYYYYYYY'}",'支持订单查询接口的任意易支付 高费率不稳定',False))
db.session.add(Payment('Mugglepay','Mugglepay',"{'TOKEN':'xxxxxx','Currency':'CNY'}",'mugglepay.com全球综合收款系统(已修复)',False))
db.session.add(Payment('YunGouOS','微信或支付宝支付',"{'mch_id':'xxxxxx','pay_secret':'yyyyyyy'}",'yungouos.com 微信或支付宝个体1+0.38%',False))
db.session.add(Payment('YunGouOS_WXPAY','微信支付',"{'mch_id':'xxxxxx','pay_secret':'yyyyyyy'}",'yungouos.com 微信个体1+0.38~0.6%',False))
# 商品分类
db.session.add(ProdCag('账户ID','虚拟账号类商品','100'))
db.session.add(ProdCag('激活码','单独激活类商品','1000'))
db.session.add(ProdCag('第三分类','单独激活类商品','1000'))
# 商品设置
db.session.add(ProdInfo('账户ID','普通商品演示','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,None, True,0,0,True))
# db.session.add(ProdInfo('账户ID','批发商品演示','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
# 9.99,'9#9.9,8.8', True,0,0,True))
db.session.add(ProdInfo('账户ID','批发商品演示','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,'9,100#9.9,8.82,7.7', True,0,0,True))
db.session.add(ProdInfo('账户ID','普通商品DD','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,None, False,0,0,False))
db.session.add(ProdInfo('激活码','重复卡密演示','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,None, True,0,0,True))
db.session.add(ProdInfo('激活码','普通商品CC','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,None, True,0,0,True))
db.session.add(ProdInfo('激活码','普通商品BB','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,None,True,0,0,False))
# 卡密设置
db.session.add(Card('普通商品演示','454545454454545454',False,False))
db.session.add(Card('批发商品演示','555555555555555555',False,False))
db.session.add(Card('批发商品演示','666666666666666666',False,False))
db.session.add(Card('重复卡密演示','666666666666666666',True,False))
# 系统配置
db.session.add(Config('web_name','KAMIFAKA','网站名称',True))
db.session.add(Config('web_keyword','关键词、收录词汇','网站关键词',True))
db.session.add(Config('description','网站描述信息。。。','网站描述',True))
db.session.add(Config('web_url','https://baidu.com','网站实际地址',True))
db.session.add(Config('web_bg_url','https://cdn.jsdelivr.net/gh/Baiyuetribe/yyycode@dev/colorfull.jpg','网站背景图片',True))
db.session.add(Config('contact_us','<p>示例,请在管理后台>>网站设置里修改,支持HTML格式</p>','首页-联系我们',True))
# db.session.add(Config('web_footer','【未开发】','可填写备案信息',True))
db.session.add(Config('top_notice','首页公告栏信息,请在管理后台,网站设置里修改,支持HTML格式','首页公告',True))
# db.session.add(Config('modal_notice','【计划中】','全局弹窗信息',True))
db.session.add(Config('toast_notice','演示站随时更新,可优先体验新功能','首页滑动消息设置',True))
db.session.add(Config('contact_option','0','是否启用联系方式查询[0启用,1关闭]',True))
db.session.add(Config('theme','list','主题',False))
db.session.add(Config('kamiFaka','https://github.com/Baiyuetribe/kamiFaka','Github项目地址,用于手动检测新版',False))
db.session.add(Config('kamiFaka_v','1.8','Github项目地址,用于手动检测新版',False))
# 通知渠道 :名称;对管理员开关;对用户开关;对管理员需要管理员账号;用户无;名称+config+管理员+admin_switch+user_switch
db.session.add(Notice('邮箱通知',"{'sendname':'no_replay','sendmail':'demo@gmail.com','smtp_address':'smtp.qq.com','smtp_port':'465','smtp_pwd':'ZZZZZZZ'}",'demo@qq.com',False,False))
db.session.add(Notice('微信通知',"{'token':'AT_nvlYDjev89gV96hBAvUX5HR3idWQwLlA'}",'xxxxxxxxxxxxxxxx',False,False))
db.session.add(Notice('TG通知',"{'TG_TOKEN':'1290570937:AAHaXA2uOvDoGKbGeY4xVIi5kR7K55saXhs'}",'445545444',False,False))
db.session.add(Notice('短信通知',"{'username':'XXXXXX','password':'YYYYY','tokenYZM':'必填','templateid':'必填'}",'15347875415',False,False))
db.session.add(Notice('QQ通知',"{'Key':'null'}",'格式:您的KEY@已添加的QQ号,示例:abc@123',False,False))
# 订单信息【测试环境】
db.session.add(Order('演示订单可删除','普通商品演示','支付宝当面付','472835979','请求尽快发货',9.99,1,0.9,'账号:xxxxx;密码:xxxx',None,None))
db.session.add(Order('演示订单可删除2','普通商品演示','虎皮椒微信','458721@qq.com','非常感谢',9.99,3,1.97,None,None,None)) #卡密为None或‘’空都可以
db.session.add(Order('Order_1608107857954q7kyldyg','普通商品演示','虎皮椒支付宝','demo@gmail.com','不错',9.99,1,0.9,'此处为卡密',None,None))
db.session.add(Order('演示订单4457','普通商品演示','虎皮椒支付宝','472835979','不错',9.99,1,1.9,'TG卡密DEMO',None,None))
# 插件配置信息
db.session.add(Plugin('TG发卡',"{'TG_TOKEN':'1488086653:AAHihuO0JuvmiDNZtsYcDBpUhL1rTDO6o1C'}",'### 示例 \n请在管理后台--》Telegram里设置,支持HTML格式',False))
db.session.add(Plugin('微信公众号',"{'PID':'xxxxxxxxxxxx'}",'<p>示例,请在管理后台>>Telegram里设置,支持HTML格式</p>',False))
# 临时订单
# db.session.add(TempOrder('id44454','重复卡密演示','alipay','154311','',10,False,None))
# db.session.add(TempOrder('id44454','批发商品演示','alipay','154311','',22,False,None))
db.session.commit()
| 101.222222 | 2,166 | 0.724698 | from service.database.models import *
from service.api.db import db
def init_db(update=False):
db.session.add(AdminUser('admin@qq.com','$2b$12$BKSXKYuCgeXjr8IEbK02re0VhkFoAz7f3aHF3kYAMLzYaEiObqPYm'))
db.session.add(Payment('支付宝当面付','支付宝',"{'APPID':'2016091800537528','alipay_public_key':'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4AHTfGleo8WI3qb+mSWOjJRyn6Vh8XvO6YsQmJjPnNKhvACHTHcU+PCUWUKZ54fSVhMkFZEQWMtAGeOt3lGy3pMBS96anh841gxJc2NUljU14ESXnDn4QdVe4bosmYvfko46wfA0fGClHdpO8UUiJGLj1W5alv10CwiCrYRDtx93SLIuQgwJn4yBC1/kE/KENOaWaA45dXIQvKh2P0lTbm0AvwYMVvYB+eB1GtOGQbuFJXUxWaMa0byTo9wSllhgyiIkOH+HJ9oOZIweGlsrezeUUdr3EEX97k25LdnUt/oQK8FIfthexfWZpTDDlHqmI7p6gCtRVDJenU4sxwpEyQIDAQAB','app_private_key':'MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCqWmxsyPLwRmZHwoLYlUJXMF7PATKtvp7BrJfwLbxwrz6I48G11HpPPyAoNynwAMG7DCXjVX76NCbmfvvPqnbk09rNRULqGju8G6NkQTbLfDjhJs+CE8kdIs89btxqDG70ebePiZTGpQngPLfrziKDOhRfXkA5qRPImbC+PUXiXq9qvkp9Yu/8IYjyxUpNBNjZuTK+fTjSI0RCt7eE+wR0KqpNIzot1q/ds1KTIYmJQM5tEFie4BK0pDtGiIs/VrUG8PTPqLyzEyIMy1N75olUWAiGrk0USqiieP3TYj0PdlQDX2T14DOwMkl5Rjvt7Knc+WGdolPIBssUX1wTE+J7AgMBAAECggEAWpRP+Jv0yRu1wMxFRKJArxmSH+GUL9wej/6Un2nCO+yChMkNtAAxtLdtAtUqIGpWmH2CG9nW9XULhh3ZCPer1kprmiAMz2t5fbD4dRNT7miz2cwIJDMfCbX7mb+7xUutJ6Mcnl7aU7FnierfJKvrn/ke4gK8haxIT66g0tbDtPQhYnGPawyM+gqFulaMBcuqH0naAIq5ZBWHkKuuwJ1SD6yGrWgHdq3Kt2pE8b9yjfdUl15IeW0rszXG6fTika9WX6qaulyoGAAZdjiXED+mbRyqZA3jq7RI38qBP9+/jAb+fdwE8EwqnpPvfGHMBdkREOXK0kzRU8rpd9GbH7INaQKBgQDwpuW+bK/qxKx3BSAXL98f0J2I7YVuk0EFCStGoxnzWRv0yvL0QEDwN+QPiVMmcVQcr79mW5zTBkd4vmr3ud+v1f/X6UPI82kQhZlVWry8LEnisPlZuE0E/EaJrLgF7z4l3ItzCVi8IfpgizPcCYSz/vY49a5W34eKjXHWUB1jDwKBgQC1N8PgGKI2LRDaJeqt5Ef6yyYSMOgVe0WSqAlgyMECb1pjmMBjcNG1AFE/FfgNu4thOaXIogElGVoQFvA5GuJQY48HOJNgx3Ua2SxiowcXkAN0gIm4FY+ozkp7xhizvLVfsmX+MKqPtl6nggiWETJJyvMQnjMgKLmSvhsopMwZ1QKBgGV36az2BOK3VITGq3Y7YBf5DUN76uPpwOOPryiUgs+hhfEcVX55TSg8WLPYUjAGXtHNpKVTAXfU0PPvTgjv3Yo1cC+okkU7pNQrkLB1lti8z9Z+ilSzKf5tJIzOP7V437p1GHNDwJ9qsDhe2VnwxXxjh4wSwxSsIWlhJFuZ4hovAoGAFgm8Fmqof3InlH/79D3IyyUdciTkdIhTQ6yPx2dioYstMOOIsg8sUZjCSKvBSNo/7wj1slqRTROyMja37Bnq39/bqwMkWSaohSVYEn7FBAaNhQOEvBBTMjI0OK00n9cZL5QgdzMv6t5A0JottSJOPU8jFChJC2Yoe0IHR4ATGikCgYB2smi7/ptKiGdwmiuUHsF/U3jfjpHyHwLrXjoSU+mwV+GjqcdbtkSP1suGjN8tcdbFvLSCRX/IRdFHYJeuPUXQtZtiC431+upasbEiJ1xZ2KcK3lKf0mOn10kPD5QC7mmsfmjz4cw9cSrBjmcWGXeIwIXPLhOAAIzpHqy8oP/F/g=='}",'alipay.com 官方接口0.38~0.6%',True))
db.session.add(Payment('微信官方接口','微信支付',"{'APPID':'XXXXXXXX','MCH_ID':'XXXXXX','APP_SECRET':'XXXXXX'}",'pay.weixin.qq.com 微信官方0.38%需要营业执照',False))
db.session.add(Payment('QQ钱包','QQ支付',"{'mch_id':'XXXXXXXX','key':'YYYYY'}",'mp.qpay.tenpay.com QQ官方0.6%需要营业执照',False))
db.session.add(Payment('虎皮椒支付宝','支付宝',"{'API':'api.vrmrgame.com','appid':'XXXXXX','AppSecret':'YYYYY'}",'xunhupay.com 个人接口0.38%+1~2%',False))
db.session.add(Payment('虎皮椒微信','微信支付',"{'API':'api.vrmrgame.com','appid':'XXXXXX','AppSecret':'YYYYY'}",'xunhupay.com 个人接口0.38~0.6%+1~2%',False))
db.session.add(Payment('PAYJS支付宝','支付宝',"{'payjs_key':'XXXXXX','mchid':'YYYYY','mchid':'ZZZZZZZ'}",'payjs.cn 个人接口2.38%',False))
db.session.add(Payment('PAYJS微信','微信支付',"{'payjs_key':'XXXXXX','mchid':'YYYYY','mchid':'ZZZZZZZ'}",'payjs.cn 个人接口2.38%',False))
db.session.add(Payment('码支付支付宝','支付宝',"{'codepay_id':'58027','codepay_key':'fgl454542WSDJHEJHDJZpTRrmbn','token':'jljCGU3pRvXXXXXXXXXXXb1iq'}",'codepay.fateqq.com[不可用]',False))
db.session.add(Payment('码支付微信','微信支付',"{'codepay_id':'58027','codepay_key':'fgl454542WSDJHEJHDJZpTRrmbn','token':'jljCGU3pRvXXXXXXXXXXXb1iq'}",'codepay.fateqq.com[不可用]',False))
db.session.add(Payment('码支付QQ','QQ支付',"{'codepay_id':'58027','codepay_key':'fgl454542WSDJHEJHDJZpTRrmbn','token':'jljCGU3pRvXXXXXXXXXXXb1iq'}",'codepay.fateqq.com[不可用]',False))
db.session.add(Payment('V免签支付宝','支付宝',"{'API':'http://google.com','KEY':'YYYYYYYY'}",'0费率实时到账',False))
db.session.add(Payment('V免签微信','微信',"{'API':'http://google.com','KEY':'YYYYYYYY'}",'0费率实时到账',False))
db.session.add(Payment('易支付','易支付',"{'API':'http://google.com','ID':'XXXXX','KEY':'YYYYYYYY'}",'支持订单查询接口的任意易支付 高费率不稳定',False))
db.session.add(Payment('Mugglepay','Mugglepay',"{'TOKEN':'xxxxxx','Currency':'CNY'}",'mugglepay.com全球综合收款系统(已修复)',False))
db.session.add(Payment('YunGouOS','微信或支付宝支付',"{'mch_id':'xxxxxx','pay_secret':'yyyyyyy'}",'yungouos.com 微信或支付宝个体1+0.38%',False))
db.session.add(Payment('YunGouOS_WXPAY','微信支付',"{'mch_id':'xxxxxx','pay_secret':'yyyyyyy'}",'yungouos.com 微信个体1+0.38~0.6%',False))
db.session.add(ProdCag('账户ID','虚拟账号类商品','100'))
db.session.add(ProdCag('激活码','单独激活类商品','1000'))
db.session.add(ProdCag('第三分类','单独激活类商品','1000'))
db.session.add(ProdInfo('账户ID','普通商品演示','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,None, True,0,0,True))
db.session.add(ProdInfo('账户ID','批发商品演示','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,'9,100#9.9,8.82,7.7', True,0,0,True))
db.session.add(ProdInfo('账户ID','普通商品DD','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,None, False,0,0,False))
db.session.add(ProdInfo('激活码','重复卡密演示','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,None, True,0,0,True))
db.session.add(ProdInfo('激活码','普通商品CC','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,None, True,0,0,True))
db.session.add(ProdInfo('激活码','普通商品BB','商品简述信息演示XXXX','images/null.png','100','演示:我是商品描述信息',\
9.99,None,True,0,0,False))
db.session.add(Card('普通商品演示','454545454454545454',False,False))
db.session.add(Card('批发商品演示','555555555555555555',False,False))
db.session.add(Card('批发商品演示','666666666666666666',False,False))
db.session.add(Card('重复卡密演示','666666666666666666',True,False))
db.session.add(Config('web_name','KAMIFAKA','网站名称',True))
db.session.add(Config('web_keyword','关键词、收录词汇','网站关键词',True))
db.session.add(Config('description','网站描述信息。。。','网站描述',True))
db.session.add(Config('web_url','https://baidu.com','网站实际地址',True))
db.session.add(Config('web_bg_url','https://cdn.jsdelivr.net/gh/Baiyuetribe/yyycode@dev/colorfull.jpg','网站背景图片',True))
db.session.add(Config('contact_us','<p>示例,请在管理后台>>网站设置里修改,支持HTML格式</p>','首页-联系我们',True))
db.session.add(Config('top_notice','首页公告栏信息,请在管理后台,网站设置里修改,支持HTML格式','首页公告',True))
db.session.add(Config('toast_notice','演示站随时更新,可优先体验新功能','首页滑动消息设置',True))
db.session.add(Config('contact_option','0','是否启用联系方式查询[0启用,1关闭]',True))
db.session.add(Config('theme','list','主题',False))
db.session.add(Config('kamiFaka','https://github.com/Baiyuetribe/kamiFaka','Github项目地址,用于手动检测新版',False))
db.session.add(Config('kamiFaka_v','1.8','Github项目地址,用于手动检测新版',False))
db.session.add(Notice('邮箱通知',"{'sendname':'no_replay','sendmail':'demo@gmail.com','smtp_address':'smtp.qq.com','smtp_port':'465','smtp_pwd':'ZZZZZZZ'}",'demo@qq.com',False,False))
db.session.add(Notice('微信通知',"{'token':'AT_nvlYDjev89gV96hBAvUX5HR3idWQwLlA'}",'xxxxxxxxxxxxxxxx',False,False))
db.session.add(Notice('TG通知',"{'TG_TOKEN':'1290570937:AAHaXA2uOvDoGKbGeY4xVIi5kR7K55saXhs'}",'445545444',False,False))
db.session.add(Notice('短信通知',"{'username':'XXXXXX','password':'YYYYY','tokenYZM':'必填','templateid':'必填'}",'15347875415',False,False))
db.session.add(Notice('QQ通知',"{'Key':'null'}",'格式:您的KEY@已添加的QQ号,示例:abc@123',False,False))
db.session.add(Order('演示订单可删除','普通商品演示','支付宝当面付','472835979','请求尽快发货',9.99,1,0.9,'账号:xxxxx;密码:xxxx',None,None))
db.session.add(Order('演示订单可删除2','普通商品演示','虎皮椒微信','458721@qq.com','非常感谢',9.99,3,1.97,None,None,None))
db.session.add(Order('Order_1608107857954q7kyldyg','普通商品演示','虎皮椒支付宝','demo@gmail.com','不错',9.99,1,0.9,'此处为卡密',None,None))
db.session.add(Order('演示订单4457','普通商品演示','虎皮椒支付宝','472835979','不错',9.99,1,1.9,'TG卡密DEMO',None,None))
db.session.add(Plugin('TG发卡',"{'TG_TOKEN':'1488086653:AAHihuO0JuvmiDNZtsYcDBpUhL1rTDO6o1C'}",'### 示例 \n请在管理后台--》Telegram里设置,支持HTML格式',False))
db.session.add(Plugin('微信公众号',"{'PID':'xxxxxxxxxxxx'}",'<p>示例,请在管理后台>>Telegram里设置,支持HTML格式</p>',False))
db.session.commit()
| true | true |
f7ffaaeb999762ff1aa782cb6d15a2736a36595a | 3,745 | py | Python | task_dispatcher.py | embeddedden/organizer | afe3243b535137d83b12e5c8ebaa58cf8a7a1a48 | [
"MIT"
] | 4 | 2020-05-24T20:46:02.000Z | 2020-06-16T12:37:15.000Z | task_dispatcher.py | embeddedden/organizer | afe3243b535137d83b12e5c8ebaa58cf8a7a1a48 | [
"MIT"
] | null | null | null | task_dispatcher.py | embeddedden/organizer | afe3243b535137d83b12e5c8ebaa58cf8a7a1a48 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Created on Sun May 17 00:39:50 2020
@author: embden
"""
from task import Task, task_id
from csv_data_organizer import CSVDataOrganizer
from datetime import timedelta, datetime
class TaskDispatcher:
""" Implement inner logic of task management. """
def __init__(self, session_tasks=None):
""" Initialize Task Dispatcher."""
if session_tasks is not None:
self.session_tasks = session_tasks
else:
self.session_tasks = dict()
self.active_tasks = dict()
self.task_db = CSVDataOrganizer()
self.existing_categories = dict()
def add_new_task(self, task_name, task_category=None, activity_periods=None):
""" Add the task to the list of possible tasks. """
tmp_task = Task(task_name, task_category, activity_periods)
#TODO: what should we do if there is a task with an equal name?
# Generate exception NameExists?
self.session_tasks[task_id(task_name, task_category)] = tmp_task
self.write_changes_for_task(self.session_tasks[task_id(task_name, task_category)])
def make_task_active(self, task_name, task_category):
""" Add the task to the list of active tasks. """
#TODO: Exception handling is required here
self.active_tasks[task_id(task_name, task_category)] =\
self.session_tasks[task_id(task_name, task_category)]
self.session_tasks[task_id(task_name, task_category)].start_task()
def make_task_stopped(self, task_name, task_category):
""" Add the task to the list of stopped tasks. """
#TODO: Exception handling is required here
self.active_tasks.pop(task_id(task_name, task_category))
self.session_tasks[task_id(task_name, task_category)].stop_task()
self.task_db.write_tasks_data([self.session_tasks[task_id(task_name, task_category)]])
def get_previous_tasks(self, period=7):
""" Read tasks from the task database. """
previous_tasks = self.task_db.read_previous_tasks(period)
for task_to_add in previous_tasks:
if task_to_add.category in self.existing_categories.keys():
self.existing_categories[task_to_add.category] += 1
else:
self.existing_categories[task_to_add.category] = 1
self.add_new_task(task_to_add.name, task_to_add.category,
task_to_add.activity_periods)
return previous_tasks
def get_tasks_and_duration(self, period=7, min_dur_bound=5):
""" Get dicts of tasks and their durations. """
tasks_and_durs = dict()
finish_before = datetime.now() - timedelta(days=period)
# A lot of unneeded loads and stores, can be optimized
for tmp_task_id in self.session_tasks.keys():
tasks_and_durs[tmp_task_id] = 0.0
for period in self.session_tasks[tmp_task_id].activity_periods:
if period[1] > finish_before:
tasks_and_durs[tmp_task_id] += (period[1]-period[0])/timedelta(hours=1)
tasks_to_delete = []
for tmp_task_id in tasks_and_durs.keys():
if tasks_and_durs[tmp_task_id] < timedelta(minutes=min_dur_bound)/timedelta(hours=1):
tasks_to_delete.append(tmp_task_id)
for a in tasks_to_delete:
del tasks_and_durs[a]
return tasks_and_durs
def get_existing_categories(self):
""" Get known categories from history. """
return list(self.existing_categories.keys())
def write_changes_for_task(self, task_c):
""" Write changes on disk and clean the cache. """
self.task_db.write_tasks_data([task_c])
task_c.new_activity_periods = []
| 43.546512 | 97 | 0.670761 |
from task import Task, task_id
from csv_data_organizer import CSVDataOrganizer
from datetime import timedelta, datetime
class TaskDispatcher:
def __init__(self, session_tasks=None):
if session_tasks is not None:
self.session_tasks = session_tasks
else:
self.session_tasks = dict()
self.active_tasks = dict()
self.task_db = CSVDataOrganizer()
self.existing_categories = dict()
def add_new_task(self, task_name, task_category=None, activity_periods=None):
tmp_task = Task(task_name, task_category, activity_periods)
self.session_tasks[task_id(task_name, task_category)] = tmp_task
self.write_changes_for_task(self.session_tasks[task_id(task_name, task_category)])
def make_task_active(self, task_name, task_category):
self.active_tasks[task_id(task_name, task_category)] =\
self.session_tasks[task_id(task_name, task_category)]
self.session_tasks[task_id(task_name, task_category)].start_task()
def make_task_stopped(self, task_name, task_category):
self.active_tasks.pop(task_id(task_name, task_category))
self.session_tasks[task_id(task_name, task_category)].stop_task()
self.task_db.write_tasks_data([self.session_tasks[task_id(task_name, task_category)]])
def get_previous_tasks(self, period=7):
previous_tasks = self.task_db.read_previous_tasks(period)
for task_to_add in previous_tasks:
if task_to_add.category in self.existing_categories.keys():
self.existing_categories[task_to_add.category] += 1
else:
self.existing_categories[task_to_add.category] = 1
self.add_new_task(task_to_add.name, task_to_add.category,
task_to_add.activity_periods)
return previous_tasks
def get_tasks_and_duration(self, period=7, min_dur_bound=5):
tasks_and_durs = dict()
finish_before = datetime.now() - timedelta(days=period)
for tmp_task_id in self.session_tasks.keys():
tasks_and_durs[tmp_task_id] = 0.0
for period in self.session_tasks[tmp_task_id].activity_periods:
if period[1] > finish_before:
tasks_and_durs[tmp_task_id] += (period[1]-period[0])/timedelta(hours=1)
tasks_to_delete = []
for tmp_task_id in tasks_and_durs.keys():
if tasks_and_durs[tmp_task_id] < timedelta(minutes=min_dur_bound)/timedelta(hours=1):
tasks_to_delete.append(tmp_task_id)
for a in tasks_to_delete:
del tasks_and_durs[a]
return tasks_and_durs
def get_existing_categories(self):
return list(self.existing_categories.keys())
def write_changes_for_task(self, task_c):
self.task_db.write_tasks_data([task_c])
task_c.new_activity_periods = []
| true | true |
f7ffab09880b5d622e469eac600cefc923031191 | 447 | py | Python | app/helpers/File.py | Abdusalam-mah/omr-fastapi | 95b8888116612c4d37862dc29596860bc9825a68 | [
"MIT"
] | 1 | 2022-03-29T19:08:59.000Z | 2022-03-29T19:08:59.000Z | app/helpers/File.py | Abdusalam-mah/omr-fastapi | 95b8888116612c4d37862dc29596860bc9825a68 | [
"MIT"
] | 1 | 2022-03-24T18:04:18.000Z | 2022-03-24T18:04:18.000Z | app/helpers/File.py | Abdusalam-mah/omr-fastapi | 95b8888116612c4d37862dc29596860bc9825a68 | [
"MIT"
] | null | null | null | import shutil
import os
def saveTempFile(file):
""" save the file to the temp images folder """
with open(file.filename, "wb") as f:
shutil.copyfileobj(file.file, f)
shutil.move(file.filename, 'temp/images/'+file.filename)
# return the filename for identification purposes
return file.filename
def deleteTempFile(file):
""" delete the file from the temp images folder """
os.remove('temp/images/'+file)
| 26.294118 | 64 | 0.682327 | import shutil
import os
def saveTempFile(file):
with open(file.filename, "wb") as f:
shutil.copyfileobj(file.file, f)
shutil.move(file.filename, 'temp/images/'+file.filename)
return file.filename
def deleteTempFile(file):
os.remove('temp/images/'+file)
| true | true |
f7ffab68bc2f8b44a9c2cdc17a0c0c53fc16a43f | 326 | py | Python | examples/convert_to_hass.py | schoenpat/sonyapilib | 367490d53dbd8b4c20ea145f14600e3e3356ef71 | [
"MIT"
] | null | null | null | examples/convert_to_hass.py | schoenpat/sonyapilib | 367490d53dbd8b4c20ea145f14600e3e3356ef71 | [
"MIT"
] | null | null | null | examples/convert_to_hass.py | schoenpat/sonyapilib | 367490d53dbd8b4c20ea145f14600e3e3356ef71 | [
"MIT"
] | null | null | null | import json
from sonyapilib.device import SonyDevice
config_file = 'bluray.json'
with open(config_file, 'r') as myfile:
data=myfile.read()
device = SonyDevice.load_from_json(data)
hass_cfg = {}
hass_cfg[device.host] = {}
hass_cfg[device.host]["device"] = data
print(json.dumps(hass_cfg), file=open("sony.conf", "w")) | 21.733333 | 57 | 0.723926 | import json
from sonyapilib.device import SonyDevice
config_file = 'bluray.json'
with open(config_file, 'r') as myfile:
data=myfile.read()
device = SonyDevice.load_from_json(data)
hass_cfg = {}
hass_cfg[device.host] = {}
hass_cfg[device.host]["device"] = data
print(json.dumps(hass_cfg), file=open("sony.conf", "w")) | true | true |
f7ffabd32a29075b3dd0d5b53bd44b681e980a68 | 2,870 | py | Python | hs_swat_modelinstance/receivers.py | tommac7/hydroshare | 87c4543a55f98103d2614bf4c47f7904c3f9c029 | [
"BSD-3-Clause"
] | 1 | 2018-09-17T13:07:29.000Z | 2018-09-17T13:07:29.000Z | hs_swat_modelinstance/receivers.py | tommac7/hydroshare | 87c4543a55f98103d2614bf4c47f7904c3f9c029 | [
"BSD-3-Clause"
] | 100 | 2017-08-01T23:48:04.000Z | 2018-04-03T13:17:27.000Z | hs_swat_modelinstance/receivers.py | tommac7/hydroshare | 87c4543a55f98103d2614bf4c47f7904c3f9c029 | [
"BSD-3-Clause"
] | 2 | 2017-07-27T20:41:33.000Z | 2017-07-27T22:40:57.000Z | from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist
from hs_core.signals import pre_metadata_element_create, pre_metadata_element_update,\
pre_create_resource, post_metadata_element_update
import hs_swat_modelinstance.models as swat_models
from hs_swat_modelinstance.forms import ModelOutputValidationForm, ExecutedByValidationForm,\
ModelObjectiveValidationForm, SimulationTypeValidationForm, ModelMethodValidationForm,\
ModelParameterValidationForm, ModelInputValidationForm
@receiver(pre_create_resource, sender=swat_models.SWATModelInstanceResource)
def swatmodelinstance_pre_create_resource(sender, **kwargs):
metadata = kwargs['metadata']
modeloutput = {'modeloutput': {'includes_output': False}}
metadata.append(modeloutput)
@receiver(pre_metadata_element_create, sender=swat_models.SWATModelInstanceResource)
def metadata_element_pre_create_handler(sender, **kwargs):
return _process_metadata_update_create(**kwargs)
@receiver(pre_metadata_element_update, sender=swat_models.SWATModelInstanceResource)
def metadata_element_pre_update_handler(sender, **kwargs):
return _process_metadata_update_create(**kwargs)
@receiver(post_metadata_element_update, sender=swat_models.SWATModelInstanceResource)
def check_element_exist(sender, **kwargs):
element_id = kwargs['element_id']
element_name = kwargs['element_name']
element_exists = False
class_names = vars(swat_models)
for class_name, cls in class_names.iteritems():
if class_name.lower() == element_name.lower():
try:
cls.objects.get(pk=element_id)
element_exists = True
except ObjectDoesNotExist:
break
return {'element_exists': element_exists}
def _process_metadata_update_create(**kwargs):
element_name = kwargs['element_name'].lower()
request = kwargs['request']
if element_name == "modeloutput":
element_form = ModelOutputValidationForm(request.POST)
elif element_name == 'executedby':
element_form = ExecutedByValidationForm(request.POST)
elif element_name == 'modelobjective':
element_form = ModelObjectiveValidationForm(request.POST)
elif element_name == 'simulationtype':
element_form = SimulationTypeValidationForm(request.POST)
elif element_name == 'modelmethod':
element_form = ModelMethodValidationForm(request.POST)
elif element_name == 'modelparameter':
element_form = ModelParameterValidationForm(request.POST)
elif element_name == 'modelinput':
element_form = ModelInputValidationForm(request.POST)
if element_form.is_valid():
return {'is_valid': True, 'element_data_dict': element_form.cleaned_data}
else:
return {'is_valid': False, 'element_data_dict': None, "errors": element_form.errors}
| 41.594203 | 93 | 0.76446 | from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist
from hs_core.signals import pre_metadata_element_create, pre_metadata_element_update,\
pre_create_resource, post_metadata_element_update
import hs_swat_modelinstance.models as swat_models
from hs_swat_modelinstance.forms import ModelOutputValidationForm, ExecutedByValidationForm,\
ModelObjectiveValidationForm, SimulationTypeValidationForm, ModelMethodValidationForm,\
ModelParameterValidationForm, ModelInputValidationForm
@receiver(pre_create_resource, sender=swat_models.SWATModelInstanceResource)
def swatmodelinstance_pre_create_resource(sender, **kwargs):
metadata = kwargs['metadata']
modeloutput = {'modeloutput': {'includes_output': False}}
metadata.append(modeloutput)
@receiver(pre_metadata_element_create, sender=swat_models.SWATModelInstanceResource)
def metadata_element_pre_create_handler(sender, **kwargs):
return _process_metadata_update_create(**kwargs)
@receiver(pre_metadata_element_update, sender=swat_models.SWATModelInstanceResource)
def metadata_element_pre_update_handler(sender, **kwargs):
return _process_metadata_update_create(**kwargs)
@receiver(post_metadata_element_update, sender=swat_models.SWATModelInstanceResource)
def check_element_exist(sender, **kwargs):
element_id = kwargs['element_id']
element_name = kwargs['element_name']
element_exists = False
class_names = vars(swat_models)
for class_name, cls in class_names.iteritems():
if class_name.lower() == element_name.lower():
try:
cls.objects.get(pk=element_id)
element_exists = True
except ObjectDoesNotExist:
break
return {'element_exists': element_exists}
def _process_metadata_update_create(**kwargs):
element_name = kwargs['element_name'].lower()
request = kwargs['request']
if element_name == "modeloutput":
element_form = ModelOutputValidationForm(request.POST)
elif element_name == 'executedby':
element_form = ExecutedByValidationForm(request.POST)
elif element_name == 'modelobjective':
element_form = ModelObjectiveValidationForm(request.POST)
elif element_name == 'simulationtype':
element_form = SimulationTypeValidationForm(request.POST)
elif element_name == 'modelmethod':
element_form = ModelMethodValidationForm(request.POST)
elif element_name == 'modelparameter':
element_form = ModelParameterValidationForm(request.POST)
elif element_name == 'modelinput':
element_form = ModelInputValidationForm(request.POST)
if element_form.is_valid():
return {'is_valid': True, 'element_data_dict': element_form.cleaned_data}
else:
return {'is_valid': False, 'element_data_dict': None, "errors": element_form.errors}
| true | true |
f7ffac1c456ec7fca0a3128bbf927972054d90a3 | 2,997 | py | Python | vivisect/tests/testremote.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/tests/testremote.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/tests/testremote.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import os
import time
import tempfile
import unittest
import threading
import multiprocessing as mp
import vivisect
import vivisect.tests.helpers as helpers
import vivisect.remote.server as v_r_server
def runServer(name, port):
dirn = os.path.dirname(name)
testfile = helpers.getTestPath('windows', 'amd64', 'firefox.exe')
# load the file in so we get some workspace events, but not so many to make
# this test take forever
vw = vivisect.VivWorkspace()
vw.loadFromFile(testfile)
vw.setMeta('StorageName', name)
vw.saveWorkspace()
v_r_server.runMainServer(dirn, port)
class VivisectRemoteTests(unittest.TestCase):
'''
So...what would be fun is basically a chain of remote workspaces all tied in interesting
configurations.
'''
def test_basic(self):
testfile = helpers.getTestPath('windows', 'amd64', 'firefox.exe')
good = vivisect.VivWorkspace()
good.loadFromFile(testfile)
host = '0.0.0.0'
port = 0x4097
with tempfile.TemporaryDirectory() as tmpd:
with tempfile.NamedTemporaryFile(dir=tmpd) as tmpf:
proc = mp.Process(target=runServer, args=(tmpf.name, port,))
proc.daemon = True
proc.start()
# give the other process time to spin up
time.sleep(0.5)
# So...yea. The server could not be up yet, but I'm not waiting a mmillion years to
# wait for it.
retry = 0
conn = False
while retry < 5:
try:
server = v_r_server.connectToServer(host, port)
conn = True
break
except:
retry += 1
time.sleep(0.2)
if not conn:
self.fail('Could not connect to %s:%s' % (host, port))
wslist = server.listWorkspaces()
self.assertEqual(len(wslist), 1)
self.assertEqual(server.getServerVersion(), 20130820)
othr = v_r_server.getServerWorkspace(server, wslist[0])
# So the consumption of events from the server is *also* threaded, so I've got to do some blocking
# to get us to wait on things
retry = 0
while retry < 5:
locs = othr.getLocations()
if len(locs) != 1380:
retry += 1
time.sleep(0.2)
else:
break
self.assertEqual(len(othr.getLocations()), 1380)
self.assertEqual(set(othr.getLocations()), set(good.getLocations()))
self.assertEqual(set(othr.getXrefs()), set(good.getXrefs()))
try:
proc.terminate()
proc.close()
except:
pass
| 33.674157 | 114 | 0.532866 | import os
import time
import tempfile
import unittest
import threading
import multiprocessing as mp
import vivisect
import vivisect.tests.helpers as helpers
import vivisect.remote.server as v_r_server
def runServer(name, port):
dirn = os.path.dirname(name)
testfile = helpers.getTestPath('windows', 'amd64', 'firefox.exe')
vw = vivisect.VivWorkspace()
vw.loadFromFile(testfile)
vw.setMeta('StorageName', name)
vw.saveWorkspace()
v_r_server.runMainServer(dirn, port)
class VivisectRemoteTests(unittest.TestCase):
def test_basic(self):
testfile = helpers.getTestPath('windows', 'amd64', 'firefox.exe')
good = vivisect.VivWorkspace()
good.loadFromFile(testfile)
host = '0.0.0.0'
port = 0x4097
with tempfile.TemporaryDirectory() as tmpd:
with tempfile.NamedTemporaryFile(dir=tmpd) as tmpf:
proc = mp.Process(target=runServer, args=(tmpf.name, port,))
proc.daemon = True
proc.start()
time.sleep(0.5)
# wait for it.
retry = 0
conn = False
while retry < 5:
try:
server = v_r_server.connectToServer(host, port)
conn = True
break
except:
retry += 1
time.sleep(0.2)
if not conn:
self.fail('Could not connect to %s:%s' % (host, port))
wslist = server.listWorkspaces()
self.assertEqual(len(wslist), 1)
self.assertEqual(server.getServerVersion(), 20130820)
othr = v_r_server.getServerWorkspace(server, wslist[0])
# So the consumption of events from the server is *also* threaded, so I've got to do some blocking
retry = 0
while retry < 5:
locs = othr.getLocations()
if len(locs) != 1380:
retry += 1
time.sleep(0.2)
else:
break
self.assertEqual(len(othr.getLocations()), 1380)
self.assertEqual(set(othr.getLocations()), set(good.getLocations()))
self.assertEqual(set(othr.getXrefs()), set(good.getXrefs()))
try:
proc.terminate()
proc.close()
except:
pass
| true | true |
f7ffacc2f2755cda2e764dd600b2d02fc5bd732d | 5,512 | py | Python | tools/python/boutiques/util/utils.py | boutiques/boutiques-schema | 5a2374e87dd837cb83fec9603f32e3c9085613ba | [
"MIT"
] | 54 | 2016-07-21T19:14:13.000Z | 2021-11-16T11:49:15.000Z | tools/python/boutiques/util/utils.py | boutiques/boutiques-schema | 5a2374e87dd837cb83fec9603f32e3c9085613ba | [
"MIT"
] | 539 | 2016-07-20T20:09:38.000Z | 2022-03-17T00:45:26.000Z | tools/python/boutiques/util/utils.py | boutiques/boutiques-schema | 5a2374e87dd837cb83fec9603f32e3c9085613ba | [
"MIT"
] | 52 | 2016-07-22T18:09:59.000Z | 2021-02-03T15:22:55.000Z | import os
import simplejson as json
from boutiques.logger import raise_error, print_warning
from boutiques import __file__ as bfile
# Parses absolute path into filename
def extractFileName(path):
# Helps OS path handle case where "/" is at the end of path
if path is None:
return None
elif path[:-1] == '/':
return os.path.basename(path[:-1]) + "/"
else:
return os.path.basename(path)
class LoadError(Exception):
pass
# Helper function that loads the JSON object coming from either a string,
# a local file or a file pulled from Zenodo
def loadJson(userInput, verbose=False, sandbox=False):
# Check for JSON file (local or from Zenodo)
json_file = None
if os.path.isfile(userInput):
json_file = userInput
elif "zenodo" in ".".join(userInput.split('.')[:-1]).lower():
from boutiques.puller import Puller
puller = Puller([userInput], verbose, sandbox)
json_file = puller.pull()[0]
if json_file is not None:
with open(json_file, 'r') as f:
return json.loads(f.read())
# JSON file not found, so try to parse JSON object
e = ("Cannot parse input {}: file not found, "
"invalid Zenodo ID, or invalid JSON object").format(userInput)
if userInput.isdigit():
raise_error(LoadError, e)
try:
return json.loads(userInput)
except ValueError:
raise_error(LoadError, e)
# Helper function that takes a conditional path template key as input,
# and outputs a formatted string that isolates variables/values from
# operators, parentheses, and python keywords with a space.
# ex: "(opt1>2)" becomes " ( opt1 > 2 ) "
# "(opt1<=10.1)" becomes " ( opt1 <= 10.1 ) "
def conditionalExpFormat(s):
cleanedExpression = ""
idx = 0
while idx < len(s):
c = s[idx]
if c in ['=', '!', '<', '>']:
cleanedExpression += " {0}{1}".format(
c, "=" if s[idx+1] == "=" else " ")
idx += 1
elif c in ['(', ')']:
cleanedExpression += " {0} ".format(c)
else:
cleanedExpression += c
idx += 1
return cleanedExpression
# Sorts and returns a descriptor dictionary according to
# the keys' order in a template descriptor
def customSortDescriptorByKey(descriptor,
template=os.path.join(
os.path.dirname(bfile),
"templates",
"ordered_keys_desc.json")):
def sortListedObjects(objList, template):
sortedObjList = []
for obj in objList:
sortedObj = {key: obj[key] for key in template if
key in obj}
sortedObj.update(obj)
sortedObjList.append(sortedObj)
if len(objList) != len(sortedObjList):
return objList
for obj, sobj in zip(objList, sortedObjList):
if obj != sobj:
print_warning("Sorted list does not represent"
" original list.")
return objList
return sortedObjList
template = loadJson(template)
sortedDesc = {}
# Add k:v to sortedDesc according to their order in template
for key in [k for k in template if k in descriptor]:
if type(descriptor[key]) is list:
sortedDesc[key] =\
sortListedObjects(descriptor[key], template[key][0])
else:
sortedDesc[key] = descriptor[key]
# Add remaining k:v that are missing from template
sortedDesc.update(descriptor)
if sortedDesc != descriptor:
print_warning("Sorted descriptor does not represent"
" original descriptor.")
return descriptor
return sortedDesc
# Sorts tool invocations according to descriptor's inputs'
def customSortInvocationByInput(invocation, descriptor):
descriptor = loadJson(descriptor)
# sort invoc according to input's order in decsriptor
sortedInvoc = {key: invocation[key] for key in
[inp['id'] for inp in descriptor['inputs']
if descriptor['inputs'] is not None]
if key in invocation}
if sortedInvoc != invocation:
print_warning("Sorted invocation does not represent"
" original invocation.")
return invocation
return sortedInvoc
def snakeCaseToCamelCase(id):
words = id.split("_")
for idx, word in enumerate(words[1:]):
if word[0].islower():
words[idx+1] = word[0].upper() + word[1:]
return "".join(words)
def camelCaseInputIds(descriptor):
conversion_dict = {}
if 'inputs' in descriptor:
for inp in descriptor['inputs']:
camelCaseId = snakeCaseToCamelCase(inp['id'])
conversion_dict[inp['id']] = camelCaseId
# Find all instances of old input ids
# and replace them with camelCase ids
plainTextDesc = json.dumps(descriptor, indent=2)
for k, v in conversion_dict.items():
# Only replace ids surrounded by single/double quotes,
# in case the the old input ids are used in other strings
plainTextDesc = plainTextDesc.replace("\"{0}\"".format(k),
"\"{0}\"".format(v))
plainTextDesc = plainTextDesc.replace("\'{0}\'".format(k),
"\'{0}\'".format(v))
descriptor = json.loads(plainTextDesc)
return descriptor
| 35.56129 | 73 | 0.595065 | import os
import simplejson as json
from boutiques.logger import raise_error, print_warning
from boutiques import __file__ as bfile
def extractFileName(path):
if path is None:
return None
elif path[:-1] == '/':
return os.path.basename(path[:-1]) + "/"
else:
return os.path.basename(path)
class LoadError(Exception):
pass
def loadJson(userInput, verbose=False, sandbox=False):
json_file = None
if os.path.isfile(userInput):
json_file = userInput
elif "zenodo" in ".".join(userInput.split('.')[:-1]).lower():
from boutiques.puller import Puller
puller = Puller([userInput], verbose, sandbox)
json_file = puller.pull()[0]
if json_file is not None:
with open(json_file, 'r') as f:
return json.loads(f.read())
e = ("Cannot parse input {}: file not found, "
"invalid Zenodo ID, or invalid JSON object").format(userInput)
if userInput.isdigit():
raise_error(LoadError, e)
try:
return json.loads(userInput)
except ValueError:
raise_error(LoadError, e)
def conditionalExpFormat(s):
cleanedExpression = ""
idx = 0
while idx < len(s):
c = s[idx]
if c in ['=', '!', '<', '>']:
cleanedExpression += " {0}{1}".format(
c, "=" if s[idx+1] == "=" else " ")
idx += 1
elif c in ['(', ')']:
cleanedExpression += " {0} ".format(c)
else:
cleanedExpression += c
idx += 1
return cleanedExpression
def customSortDescriptorByKey(descriptor,
template=os.path.join(
os.path.dirname(bfile),
"templates",
"ordered_keys_desc.json")):
def sortListedObjects(objList, template):
sortedObjList = []
for obj in objList:
sortedObj = {key: obj[key] for key in template if
key in obj}
sortedObj.update(obj)
sortedObjList.append(sortedObj)
if len(objList) != len(sortedObjList):
return objList
for obj, sobj in zip(objList, sortedObjList):
if obj != sobj:
print_warning("Sorted list does not represent"
" original list.")
return objList
return sortedObjList
template = loadJson(template)
sortedDesc = {}
# Add k:v to sortedDesc according to their order in template
for key in [k for k in template if k in descriptor]:
if type(descriptor[key]) is list:
sortedDesc[key] =\
sortListedObjects(descriptor[key], template[key][0])
else:
sortedDesc[key] = descriptor[key]
# Add remaining k:v that are missing from template
sortedDesc.update(descriptor)
if sortedDesc != descriptor:
print_warning("Sorted descriptor does not represent"
" original descriptor.")
return descriptor
return sortedDesc
# Sorts tool invocations according to descriptor's inputs'
def customSortInvocationByInput(invocation, descriptor):
descriptor = loadJson(descriptor)
# sort invoc according to input's order in decsriptor
sortedInvoc = {key: invocation[key] for key in
[inp['id'] for inp in descriptor['inputs']
if descriptor['inputs'] is not None]
if key in invocation}
if sortedInvoc != invocation:
print_warning("Sorted invocation does not represent"
" original invocation.")
return invocation
return sortedInvoc
def snakeCaseToCamelCase(id):
words = id.split("_")
for idx, word in enumerate(words[1:]):
if word[0].islower():
words[idx+1] = word[0].upper() + word[1:]
return "".join(words)
def camelCaseInputIds(descriptor):
conversion_dict = {}
if 'inputs' in descriptor:
for inp in descriptor['inputs']:
camelCaseId = snakeCaseToCamelCase(inp['id'])
conversion_dict[inp['id']] = camelCaseId
plainTextDesc = json.dumps(descriptor, indent=2)
for k, v in conversion_dict.items():
plainTextDesc = plainTextDesc.replace("\"{0}\"".format(k),
"\"{0}\"".format(v))
plainTextDesc = plainTextDesc.replace("\'{0}\'".format(k),
"\'{0}\'".format(v))
descriptor = json.loads(plainTextDesc)
return descriptor
| true | true |
f7ffae48ee2ed967b9483fbdd9ed0347bf577dc8 | 1,748 | py | Python | bot/cogs/prune_users.py | diegorusso/discordbot | 528ab3738b9fe759d65b73f59de8e9cb64f0ac45 | [
"MIT"
] | null | null | null | bot/cogs/prune_users.py | diegorusso/discordbot | 528ab3738b9fe759d65b73f59de8e9cb64f0ac45 | [
"MIT"
] | 2 | 2018-02-11T16:39:47.000Z | 2020-08-11T09:34:27.000Z | bot/cogs/prune_users.py | diegorusso/discordbot | 528ab3738b9fe759d65b73f59de8e9cb64f0ac45 | [
"MIT"
] | 1 | 2018-02-11T19:42:04.000Z | 2018-02-11T19:42:04.000Z | """PruneUsers module."""
import asyncio
class PruneUsers(object):
"""PruneUsers class."""
def __init__(self, bot):
"""Init method for PruneUsers."""
self.bot = bot
server_id = self.bot.config['general']['server_id']
inactive_days = int(self.bot.config['prune_users']['inactive_days'])
bg_interval = int(self.bot.config['prune_users']['bg_interval'])
self.bot.loop.create_task(
self._setup_prune(server_id, inactive_days, bg_interval))
async def _setup_prune(self, server_id, inactive_days, bg_interval):
"""Setup prune users."""
await self.bot.wait_until_ready()
server = self.bot.get_server(server_id)
to_prune = False
while not self.bot.is_closed:
# OK, let's prune them all!
if to_prune:
pm = self.bot.prune_members(server=server, days=inactive_days)
message = "I've pruned {} members because inactive for {} days"
await self.bot.say(message.format(pm, inactive_days))
to_prune = False
# I try to give a warning before the actual pruning
epm = await self.bot.estimate_pruned_members(server=server,
days=inactive_days)
if epm:
message = "{} members will be pruned in {} hours because " \
"inactive for {} days"
await self.bot.say(message.format(epm, bg_interval/3600,
inactive_days))
to_prune = True
await asyncio.sleep(bg_interval)
def setup(bot):
"""Cog setup method."""
bot.add_cog(PruneUsers(bot))
| 38.844444 | 79 | 0.566362 |
import asyncio
class PruneUsers(object):
def __init__(self, bot):
self.bot = bot
server_id = self.bot.config['general']['server_id']
inactive_days = int(self.bot.config['prune_users']['inactive_days'])
bg_interval = int(self.bot.config['prune_users']['bg_interval'])
self.bot.loop.create_task(
self._setup_prune(server_id, inactive_days, bg_interval))
async def _setup_prune(self, server_id, inactive_days, bg_interval):
await self.bot.wait_until_ready()
server = self.bot.get_server(server_id)
to_prune = False
while not self.bot.is_closed:
if to_prune:
pm = self.bot.prune_members(server=server, days=inactive_days)
message = "I've pruned {} members because inactive for {} days"
await self.bot.say(message.format(pm, inactive_days))
to_prune = False
epm = await self.bot.estimate_pruned_members(server=server,
days=inactive_days)
if epm:
message = "{} members will be pruned in {} hours because " \
"inactive for {} days"
await self.bot.say(message.format(epm, bg_interval/3600,
inactive_days))
to_prune = True
await asyncio.sleep(bg_interval)
def setup(bot):
bot.add_cog(PruneUsers(bot))
| true | true |
f7ffaeee77748554cd1a16478e873aa85dffc733 | 16,556 | gyp | Python | libjingle_tests.gyp | whxcctv/I-wanna-talk | e20723b1adae087541378f3551b47bdf7c8391f5 | [
"BSL-1.0",
"BSD-3-Clause"
] | 2 | 2020-10-17T03:21:36.000Z | 2021-06-21T02:41:49.000Z | libjingle_tests.gyp | whxcctv/I-wanna-talk | e20723b1adae087541378f3551b47bdf7c8391f5 | [
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null | libjingle_tests.gyp | whxcctv/I-wanna-talk | e20723b1adae087541378f3551b47bdf7c8391f5 | [
"BSL-1.0",
"BSD-3-Clause"
] | 1 | 2015-09-11T06:51:49.000Z | 2015-09-11T06:51:49.000Z | #
# libjingle
# Copyright 2012 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'includes': ['build/common.gypi'],
'targets': [
{
'target_name': 'libjingle_unittest_main',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'<@(libjingle_tests_additional_deps)',
],
'direct_dependent_settings': {
'include_dirs': [
'<(DEPTH)/third_party/libyuv/include',
'<(DEPTH)/testing/gtest/include',
'<(DEPTH)/testing/gtest',
],
},
'include_dirs': [
'<(DEPTH)/testing/gtest/include',
'<(DEPTH)/testing/gtest',
],
'sources': [
'media/base/fakecapturemanager.h',
'media/base/fakemediaengine.h',
'media/base/fakemediaprocessor.h',
'media/base/fakenetworkinterface.h',
'media/base/fakertp.h',
'media/base/fakevideocapturer.h',
'media/base/fakevideorenderer.h',
'media/base/nullvideoframe.h',
'media/base/nullvideorenderer.h',
'media/base/testutils.cc',
'media/base/testutils.h',
'media/devices/fakedevicemanager.h',
'media/webrtc/dummyinstantiation.cc',
'media/webrtc/fakewebrtccommon.h',
'media/webrtc/fakewebrtcdeviceinfo.h',
'media/webrtc/fakewebrtcvcmfactory.h',
'media/webrtc/fakewebrtcvideocapturemodule.h',
'media/webrtc/fakewebrtcvideoengine.h',
'media/webrtc/fakewebrtcvoiceengine.h',
],
}, # target libjingle_unittest_main
{
'target_name': 'libjingle_media_unittest',
'type': 'executable',
'dependencies': [
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'libjingle.gyp:libjingle_media',
'libjingle_unittest_main',
],
'sources': [
'media/base/capturemanager_unittest.cc',
'media/base/codec_unittest.cc',
'media/base/filemediaengine_unittest.cc',
'media/base/rtpdataengine_unittest.cc',
'media/base/rtpdump_unittest.cc',
'media/base/rtputils_unittest.cc',
'media/base/streamparams_unittest.cc',
'media/base/testutils.cc',
'media/base/testutils.h',
'media/base/videoadapter_unittest.cc',
'media/base/videocapturer_unittest.cc',
'media/base/videocommon_unittest.cc',
'media/base/videoengine_unittest.h',
'media/devices/dummydevicemanager_unittest.cc',
'media/devices/filevideocapturer_unittest.cc',
'media/sctp/sctpdataengine_unittest.cc',
'media/webrtc/simulcast_unittest.cc',
'media/webrtc/webrtcpassthroughrender_unittest.cc',
'media/webrtc/webrtcvideocapturer_unittest.cc',
'media/base/videoframe_unittest.h',
'media/webrtc/webrtcvideoframe_unittest.cc',
# Disabled because some tests fail.
# TODO(ronghuawu): Reenable these tests.
# 'media/devices/devicemanager_unittest.cc',
'media/webrtc/webrtcvideoengine_unittest.cc',
'media/webrtc/webrtcvideoengine2_unittest.cc',
'media/webrtc/webrtcvideoengine2_unittest.h',
'media/webrtc/webrtcvoiceengine_unittest.cc',
],
'conditions': [
['OS=="win"', {
'conditions': [
['use_openssl==0', {
'dependencies': [
'<(DEPTH)/net/third_party/nss/ssl.gyp:libssl',
'<(DEPTH)/third_party/nss/nss.gyp:nspr',
'<(DEPTH)/third_party/nss/nss.gyp:nss',
],
}],
],
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
# TODO(ronghuawu): Since we've included strmiids in
# libjingle_media target, we shouldn't need this here.
# Find out why it doesn't work without this.
'strmiids.lib',
],
},
},
}],
['OS=="ios"', {
'sources!': [
'media/sctp/sctpdataengine_unittest.cc',
],
}],
],
}, # target libjingle_media_unittest
{
'target_name': 'libjingle_p2p_unittest',
'type': 'executable',
'dependencies': [
'<(DEPTH)/third_party/libsrtp/libsrtp.gyp:libsrtp',
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'libjingle.gyp:libjingle',
'libjingle.gyp:libjingle_p2p',
'libjingle_unittest_main',
],
'include_dirs': [
'<(DEPTH)/third_party/libsrtp/srtp',
],
'sources': [
'session/media/bundlefilter_unittest.cc',
'session/media/channel_unittest.cc',
'session/media/channelmanager_unittest.cc',
'session/media/currentspeakermonitor_unittest.cc',
'session/media/mediarecorder_unittest.cc',
'session/media/mediasession_unittest.cc',
'session/media/rtcpmuxfilter_unittest.cc',
'session/media/srtpfilter_unittest.cc',
],
'conditions': [
['OS=="win"', {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'strmiids.lib',
],
},
},
}],
],
}, # target libjingle_p2p_unittest
{
'target_name': 'libjingle_peerconnection_unittest',
'type': 'executable',
'dependencies': [
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'<(webrtc_root)/common.gyp:webrtc_common',
'libjingle.gyp:libjingle',
'libjingle.gyp:libjingle_p2p',
'libjingle.gyp:libjingle_peerconnection',
'libjingle_unittest_main',
],
'direct_dependent_settings': {
'include_dirs': [
'<(DEPTH)/testing/gmock/include',
],
},
'sources': [
'app/webrtc/datachannel_unittest.cc',
'app/webrtc/dtlsidentitystore_unittest.cc',
'app/webrtc/dtmfsender_unittest.cc',
'app/webrtc/jsepsessiondescription_unittest.cc',
'app/webrtc/localaudiosource_unittest.cc',
'app/webrtc/mediastream_unittest.cc',
'app/webrtc/mediastreamhandler_unittest.cc',
'app/webrtc/mediastreamsignaling_unittest.cc',
'app/webrtc/peerconnection_unittest.cc',
'app/webrtc/peerconnectionendtoend_unittest.cc',
'app/webrtc/peerconnectionfactory_unittest.cc',
'app/webrtc/peerconnectioninterface_unittest.cc',
# 'app/webrtc/peerconnectionproxy_unittest.cc',
'app/webrtc/remotevideocapturer_unittest.cc',
'app/webrtc/sctputils.cc',
'app/webrtc/statscollector_unittest.cc',
'app/webrtc/test/fakeaudiocapturemodule.cc',
'app/webrtc/test/fakeaudiocapturemodule.h',
'app/webrtc/test/fakeaudiocapturemodule_unittest.cc',
'app/webrtc/test/fakeconstraints.h',
'app/webrtc/test/fakedatachannelprovider.h',
'app/webrtc/test/fakedtlsidentityservice.h',
'app/webrtc/test/fakemediastreamsignaling.h',
'app/webrtc/test/fakeperiodicvideocapturer.h',
'app/webrtc/test/fakevideotrackrenderer.h',
'app/webrtc/test/mockpeerconnectionobservers.h',
'app/webrtc/test/peerconnectiontestwrapper.h',
'app/webrtc/test/peerconnectiontestwrapper.cc',
'app/webrtc/test/testsdpstrings.h',
'app/webrtc/videosource_unittest.cc',
'app/webrtc/videotrack_unittest.cc',
'app/webrtc/webrtcsdp_unittest.cc',
'app/webrtc/webrtcsession_unittest.cc',
],
'conditions': [
['OS=="android"', {
# We want gmock features that use tr1::tuple, but we currently
# don't support the variadic templates used by libstdc++'s
# implementation. gmock supports this scenario by providing its
# own implementation but we must opt in to it.
'defines': [
'GTEST_USE_OWN_TR1_TUPLE=1',
# GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set.
# gmock r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0
# automatically on android, so it has to be set explicitly here.
'GTEST_HAS_TR1_TUPLE=1',
],
}],
],
}, # target libjingle_peerconnection_unittest
],
'conditions': [
['OS=="linux"', {
'targets': [
{
'target_name': 'libjingle_peerconnection_test_jar',
'type': 'none',
'actions': [
{
'variables': {
'java_src_dir': 'app/webrtc/javatests/src',
'java_files': [
'app/webrtc/java/testcommon/src/org/webrtc/PeerConnectionTest.java',
'app/webrtc/javatests/src/org/webrtc/PeerConnectionTestJava.java',
],
},
'action_name': 'create_jar',
'inputs': [
'build/build_jar.sh',
'<@(java_files)',
'<(PRODUCT_DIR)/libjingle_peerconnection.jar',
'<(PRODUCT_DIR)/lib/libjingle_peerconnection_so.so',
'<(DEPTH)/third_party/junit/junit-4.11.jar',
],
'outputs': [
'<(PRODUCT_DIR)/libjingle_peerconnection_test.jar',
],
'action': [
'build/build_jar.sh', '<(java_home)', '<@(_outputs)',
'<(INTERMEDIATE_DIR)',
'<(java_src_dir):<(PRODUCT_DIR)/libjingle_peerconnection.jar:<(DEPTH)/third_party/junit/junit-4.11.jar',
'<@(java_files)'
],
},
],
},
{
'target_name': 'libjingle_peerconnection_java_unittest',
'type': 'none',
'actions': [
{
'action_name': 'copy libjingle_peerconnection_java_unittest',
'inputs': [
'app/webrtc/javatests/libjingle_peerconnection_java_unittest.sh',
'<(PRODUCT_DIR)/libjingle_peerconnection_test_jar',
'<(DEPTH)/third_party/junit/junit-4.11.jar',
],
'outputs': [
'<(PRODUCT_DIR)/libjingle_peerconnection_java_unittest',
],
'action': [
'bash', '-c',
'rm -f <(PRODUCT_DIR)/libjingle_peerconnection_java_unittest && '
'sed -e "s@GYP_JAVA_HOME@<(java_home)@" '
'< app/webrtc/javatests/libjingle_peerconnection_java_unittest.sh '
'> <(PRODUCT_DIR)/libjingle_peerconnection_java_unittest && '
'cp <(DEPTH)/third_party/junit/junit-4.11.jar <(PRODUCT_DIR) && '
'chmod u+x <(PRODUCT_DIR)/libjingle_peerconnection_java_unittest'
],
},
],
},
],
}],
['OS=="android"', {
'targets': [
{
'target_name': 'libjingle_peerconnection_android_unittest',
'type': 'none',
'dependencies': [
'libjingle.gyp:libjingle_peerconnection_java',
],
'variables': {
'apk_name': 'libjingle_peerconnection_android_unittest',
'java_in_dir': 'app/webrtc/androidtests',
'resource_dir': 'app/webrtc/androidtests/res',
'additional_src_dirs': ['app/webrtc/java/testcommon'],
'native_lib_target': 'libjingle_peerconnection_so',
'is_test_apk': 1,
},
'includes': [ '../build/java_apk.gypi' ],
},
], # targets
}], # OS=="android"
['OS=="ios" or (OS=="mac" and target_arch!="ia32" and mac_sdk>="10.7")', {
# The >=10.7 above is required to make ARC link cleanly (e.g. as
# opposed to _compile_ cleanly, which the library under test
# does just fine on 10.6 too).
'targets': [
{
'target_name': 'libjingle_peerconnection_objc_test',
'type': 'executable',
'includes': [ 'build/objc_app.gypi' ],
'dependencies': [
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'libjingle.gyp:libjingle_peerconnection_objc',
],
'sources': [
'app/webrtc/objctests/RTCPeerConnectionSyncObserver.h',
'app/webrtc/objctests/RTCPeerConnectionSyncObserver.m',
'app/webrtc/objctests/RTCPeerConnectionTest.mm',
'app/webrtc/objctests/RTCSessionDescriptionSyncObserver.h',
'app/webrtc/objctests/RTCSessionDescriptionSyncObserver.m',
# TODO(fischman): figure out if this works for ios or if it
# needs a GUI driver.
'app/webrtc/objctests/mac/main.mm',
],
'conditions': [
['OS=="mac"', {
'xcode_settings': {
# Need to build against 10.7 framework for full ARC support
# on OSX.
'MACOSX_DEPLOYMENT_TARGET' : '10.7',
# common.gypi enables this for mac but we want this to be
# disabled like it is for ios.
'CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS': 'NO',
},
}],
],
}, # target libjingle_peerconnection_objc_test
{
'target_name': 'apprtc_signaling_gunit_test',
'type': 'executable',
'includes': [ 'build/objc_app.gypi' ],
'dependencies': [
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'<(DEPTH)/third_party/ocmock/ocmock.gyp:ocmock',
'libjingle_examples.gyp:apprtc_signaling',
],
'sources': [
'app/webrtc/objctests/mac/main.mm',
'examples/objc/AppRTCDemo/tests/ARDAppClientTest.mm',
],
'conditions': [
['OS=="mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET' : '10.8',
},
}],
],
}, # target apprtc_signaling_gunit_test
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'libjingle_media_unittest_run',
'type': 'none',
'dependencies': [
'libjingle_media_unittest',
],
'includes': [
'build/isolate.gypi',
],
'sources': [
'libjingle_media_unittest.isolate',
],
},
{
'target_name': 'libjingle_p2p_unittest_run',
'type': 'none',
'dependencies': [
'libjingle_p2p_unittest',
],
'includes': [
'build/isolate.gypi',
],
'sources': [
'libjingle_p2p_unittest.isolate',
],
},
{
'target_name': 'libjingle_peerconnection_unittest_run',
'type': 'none',
'dependencies': [
'libjingle_peerconnection_unittest',
],
'includes': [
'build/isolate.gypi',
],
'sources': [
'libjingle_peerconnection_unittest.isolate',
],
},
],
}],
],
}
| 38.502326 | 120 | 0.578884 |
{
'includes': ['build/common.gypi'],
'targets': [
{
'target_name': 'libjingle_unittest_main',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'<@(libjingle_tests_additional_deps)',
],
'direct_dependent_settings': {
'include_dirs': [
'<(DEPTH)/third_party/libyuv/include',
'<(DEPTH)/testing/gtest/include',
'<(DEPTH)/testing/gtest',
],
},
'include_dirs': [
'<(DEPTH)/testing/gtest/include',
'<(DEPTH)/testing/gtest',
],
'sources': [
'media/base/fakecapturemanager.h',
'media/base/fakemediaengine.h',
'media/base/fakemediaprocessor.h',
'media/base/fakenetworkinterface.h',
'media/base/fakertp.h',
'media/base/fakevideocapturer.h',
'media/base/fakevideorenderer.h',
'media/base/nullvideoframe.h',
'media/base/nullvideorenderer.h',
'media/base/testutils.cc',
'media/base/testutils.h',
'media/devices/fakedevicemanager.h',
'media/webrtc/dummyinstantiation.cc',
'media/webrtc/fakewebrtccommon.h',
'media/webrtc/fakewebrtcdeviceinfo.h',
'media/webrtc/fakewebrtcvcmfactory.h',
'media/webrtc/fakewebrtcvideocapturemodule.h',
'media/webrtc/fakewebrtcvideoengine.h',
'media/webrtc/fakewebrtcvoiceengine.h',
],
},
{
'target_name': 'libjingle_media_unittest',
'type': 'executable',
'dependencies': [
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'libjingle.gyp:libjingle_media',
'libjingle_unittest_main',
],
'sources': [
'media/base/capturemanager_unittest.cc',
'media/base/codec_unittest.cc',
'media/base/filemediaengine_unittest.cc',
'media/base/rtpdataengine_unittest.cc',
'media/base/rtpdump_unittest.cc',
'media/base/rtputils_unittest.cc',
'media/base/streamparams_unittest.cc',
'media/base/testutils.cc',
'media/base/testutils.h',
'media/base/videoadapter_unittest.cc',
'media/base/videocapturer_unittest.cc',
'media/base/videocommon_unittest.cc',
'media/base/videoengine_unittest.h',
'media/devices/dummydevicemanager_unittest.cc',
'media/devices/filevideocapturer_unittest.cc',
'media/sctp/sctpdataengine_unittest.cc',
'media/webrtc/simulcast_unittest.cc',
'media/webrtc/webrtcpassthroughrender_unittest.cc',
'media/webrtc/webrtcvideocapturer_unittest.cc',
'media/base/videoframe_unittest.h',
'media/webrtc/webrtcvideoframe_unittest.cc',
'media/webrtc/webrtcvideoengine_unittest.cc',
'media/webrtc/webrtcvideoengine2_unittest.cc',
'media/webrtc/webrtcvideoengine2_unittest.h',
'media/webrtc/webrtcvoiceengine_unittest.cc',
],
'conditions': [
['OS=="win"', {
'conditions': [
['use_openssl==0', {
'dependencies': [
'<(DEPTH)/net/third_party/nss/ssl.gyp:libssl',
'<(DEPTH)/third_party/nss/nss.gyp:nspr',
'<(DEPTH)/third_party/nss/nss.gyp:nss',
],
}],
],
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
# libjingle_media target, we shouldn't need this here.
'strmiids.lib',
],
},
},
}],
['OS=="ios"', {
'sources!': [
'media/sctp/sctpdataengine_unittest.cc',
],
}],
],
}, # target libjingle_media_unittest
{
'target_name': 'libjingle_p2p_unittest',
'type': 'executable',
'dependencies': [
'<(DEPTH)/third_party/libsrtp/libsrtp.gyp:libsrtp',
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'libjingle.gyp:libjingle',
'libjingle.gyp:libjingle_p2p',
'libjingle_unittest_main',
],
'include_dirs': [
'<(DEPTH)/third_party/libsrtp/srtp',
],
'sources': [
'session/media/bundlefilter_unittest.cc',
'session/media/channel_unittest.cc',
'session/media/channelmanager_unittest.cc',
'session/media/currentspeakermonitor_unittest.cc',
'session/media/mediarecorder_unittest.cc',
'session/media/mediasession_unittest.cc',
'session/media/rtcpmuxfilter_unittest.cc',
'session/media/srtpfilter_unittest.cc',
],
'conditions': [
['OS=="win"', {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'strmiids.lib',
],
},
},
}],
],
}, # target libjingle_p2p_unittest
{
'target_name': 'libjingle_peerconnection_unittest',
'type': 'executable',
'dependencies': [
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'<(webrtc_root)/common.gyp:webrtc_common',
'libjingle.gyp:libjingle',
'libjingle.gyp:libjingle_p2p',
'libjingle.gyp:libjingle_peerconnection',
'libjingle_unittest_main',
],
'direct_dependent_settings': {
'include_dirs': [
'<(DEPTH)/testing/gmock/include',
],
},
'sources': [
'app/webrtc/datachannel_unittest.cc',
'app/webrtc/dtlsidentitystore_unittest.cc',
'app/webrtc/dtmfsender_unittest.cc',
'app/webrtc/jsepsessiondescription_unittest.cc',
'app/webrtc/localaudiosource_unittest.cc',
'app/webrtc/mediastream_unittest.cc',
'app/webrtc/mediastreamhandler_unittest.cc',
'app/webrtc/mediastreamsignaling_unittest.cc',
'app/webrtc/peerconnection_unittest.cc',
'app/webrtc/peerconnectionendtoend_unittest.cc',
'app/webrtc/peerconnectionfactory_unittest.cc',
'app/webrtc/peerconnectioninterface_unittest.cc',
# 'app/webrtc/peerconnectionproxy_unittest.cc',
'app/webrtc/remotevideocapturer_unittest.cc',
'app/webrtc/sctputils.cc',
'app/webrtc/statscollector_unittest.cc',
'app/webrtc/test/fakeaudiocapturemodule.cc',
'app/webrtc/test/fakeaudiocapturemodule.h',
'app/webrtc/test/fakeaudiocapturemodule_unittest.cc',
'app/webrtc/test/fakeconstraints.h',
'app/webrtc/test/fakedatachannelprovider.h',
'app/webrtc/test/fakedtlsidentityservice.h',
'app/webrtc/test/fakemediastreamsignaling.h',
'app/webrtc/test/fakeperiodicvideocapturer.h',
'app/webrtc/test/fakevideotrackrenderer.h',
'app/webrtc/test/mockpeerconnectionobservers.h',
'app/webrtc/test/peerconnectiontestwrapper.h',
'app/webrtc/test/peerconnectiontestwrapper.cc',
'app/webrtc/test/testsdpstrings.h',
'app/webrtc/videosource_unittest.cc',
'app/webrtc/videotrack_unittest.cc',
'app/webrtc/webrtcsdp_unittest.cc',
'app/webrtc/webrtcsession_unittest.cc',
],
'conditions': [
['OS=="android"', {
# We want gmock features that use tr1::tuple, but we currently
# don't support the variadic templates used by libstdc++'s
# implementation. gmock supports this scenario by providing its
# own implementation but we must opt in to it.
'defines': [
'GTEST_USE_OWN_TR1_TUPLE=1',
# GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set.
# gmock r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0
# automatically on android, so it has to be set explicitly here.
'GTEST_HAS_TR1_TUPLE=1',
],
}],
],
}, # target libjingle_peerconnection_unittest
],
'conditions': [
['OS=="linux"', {
'targets': [
{
'target_name': 'libjingle_peerconnection_test_jar',
'type': 'none',
'actions': [
{
'variables': {
'java_src_dir': 'app/webrtc/javatests/src',
'java_files': [
'app/webrtc/java/testcommon/src/org/webrtc/PeerConnectionTest.java',
'app/webrtc/javatests/src/org/webrtc/PeerConnectionTestJava.java',
],
},
'action_name': 'create_jar',
'inputs': [
'build/build_jar.sh',
'<@(java_files)',
'<(PRODUCT_DIR)/libjingle_peerconnection.jar',
'<(PRODUCT_DIR)/lib/libjingle_peerconnection_so.so',
'<(DEPTH)/third_party/junit/junit-4.11.jar',
],
'outputs': [
'<(PRODUCT_DIR)/libjingle_peerconnection_test.jar',
],
'action': [
'build/build_jar.sh', '<(java_home)', '<@(_outputs)',
'<(INTERMEDIATE_DIR)',
'<(java_src_dir):<(PRODUCT_DIR)/libjingle_peerconnection.jar:<(DEPTH)/third_party/junit/junit-4.11.jar',
'<@(java_files)'
],
},
],
},
{
'target_name': 'libjingle_peerconnection_java_unittest',
'type': 'none',
'actions': [
{
'action_name': 'copy libjingle_peerconnection_java_unittest',
'inputs': [
'app/webrtc/javatests/libjingle_peerconnection_java_unittest.sh',
'<(PRODUCT_DIR)/libjingle_peerconnection_test_jar',
'<(DEPTH)/third_party/junit/junit-4.11.jar',
],
'outputs': [
'<(PRODUCT_DIR)/libjingle_peerconnection_java_unittest',
],
'action': [
'bash', '-c',
'rm -f <(PRODUCT_DIR)/libjingle_peerconnection_java_unittest && '
'sed -e "s@GYP_JAVA_HOME@<(java_home)@" '
'< app/webrtc/javatests/libjingle_peerconnection_java_unittest.sh '
'> <(PRODUCT_DIR)/libjingle_peerconnection_java_unittest && '
'cp <(DEPTH)/third_party/junit/junit-4.11.jar <(PRODUCT_DIR) && '
'chmod u+x <(PRODUCT_DIR)/libjingle_peerconnection_java_unittest'
],
},
],
},
],
}],
['OS=="android"', {
'targets': [
{
'target_name': 'libjingle_peerconnection_android_unittest',
'type': 'none',
'dependencies': [
'libjingle.gyp:libjingle_peerconnection_java',
],
'variables': {
'apk_name': 'libjingle_peerconnection_android_unittest',
'java_in_dir': 'app/webrtc/androidtests',
'resource_dir': 'app/webrtc/androidtests/res',
'additional_src_dirs': ['app/webrtc/java/testcommon'],
'native_lib_target': 'libjingle_peerconnection_so',
'is_test_apk': 1,
},
'includes': [ '../build/java_apk.gypi' ],
},
], # targets
}], # OS=="android"
['OS=="ios" or (OS=="mac" and target_arch!="ia32" and mac_sdk>="10.7")', {
# The >=10.7 above is required to make ARC link cleanly (e.g. as
# opposed to _compile_ cleanly, which the library under test
# does just fine on 10.6 too).
'targets': [
{
'target_name': 'libjingle_peerconnection_objc_test',
'type': 'executable',
'includes': [ 'build/objc_app.gypi' ],
'dependencies': [
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'libjingle.gyp:libjingle_peerconnection_objc',
],
'sources': [
'app/webrtc/objctests/RTCPeerConnectionSyncObserver.h',
'app/webrtc/objctests/RTCPeerConnectionSyncObserver.m',
'app/webrtc/objctests/RTCPeerConnectionTest.mm',
'app/webrtc/objctests/RTCSessionDescriptionSyncObserver.h',
'app/webrtc/objctests/RTCSessionDescriptionSyncObserver.m',
# TODO(fischman): figure out if this works for ios or if it
# needs a GUI driver.
'app/webrtc/objctests/mac/main.mm',
],
'conditions': [
['OS=="mac"', {
'xcode_settings': {
# Need to build against 10.7 framework for full ARC support
# on OSX.
'MACOSX_DEPLOYMENT_TARGET' : '10.7',
# common.gypi enables this for mac but we want this to be
# disabled like it is for ios.
'CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS': 'NO',
},
}],
],
}, # target libjingle_peerconnection_objc_test
{
'target_name': 'apprtc_signaling_gunit_test',
'type': 'executable',
'includes': [ 'build/objc_app.gypi' ],
'dependencies': [
'<(webrtc_root)/base/base_tests.gyp:rtc_base_tests_utils',
'<(DEPTH)/third_party/ocmock/ocmock.gyp:ocmock',
'libjingle_examples.gyp:apprtc_signaling',
],
'sources': [
'app/webrtc/objctests/mac/main.mm',
'examples/objc/AppRTCDemo/tests/ARDAppClientTest.mm',
],
'conditions': [
['OS=="mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET' : '10.8',
},
}],
],
}, # target apprtc_signaling_gunit_test
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'libjingle_media_unittest_run',
'type': 'none',
'dependencies': [
'libjingle_media_unittest',
],
'includes': [
'build/isolate.gypi',
],
'sources': [
'libjingle_media_unittest.isolate',
],
},
{
'target_name': 'libjingle_p2p_unittest_run',
'type': 'none',
'dependencies': [
'libjingle_p2p_unittest',
],
'includes': [
'build/isolate.gypi',
],
'sources': [
'libjingle_p2p_unittest.isolate',
],
},
{
'target_name': 'libjingle_peerconnection_unittest_run',
'type': 'none',
'dependencies': [
'libjingle_peerconnection_unittest',
],
'includes': [
'build/isolate.gypi',
],
'sources': [
'libjingle_peerconnection_unittest.isolate',
],
},
],
}],
],
}
| true | true |
f7ffb2c1f1f42a121d82e8a6a25a639f2a9e4bf8 | 1,410 | py | Python | setup.py | LevyForchh/bionic | f7f9486ac22c928bfbf12c40abf4bfd81751d77e | [
"Apache-2.0"
] | null | null | null | setup.py | LevyForchh/bionic | f7f9486ac22c928bfbf12c40abf4bfd81751d77e | [
"Apache-2.0"
] | 1 | 2020-05-19T01:01:27.000Z | 2020-05-19T01:01:27.000Z | setup.py | LevyForchh/bionic | f7f9486ac22c928bfbf12c40abf4bfd81751d77e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
from runpy import run_path
# This appears to be the least annoying Python-version-agnostic way of loading
# an external file.
extras_require = run_path(os.path.join(
os.path.dirname(__file__), 'bionic', 'extras.py'))['extras_require']
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
'PyYAML',
'numpy',
'pandas',
'pyarrow',
'pyrsistent',
]
setup(
name='bionic',
version='0.6.3',
description=(
'A Python framework for building, running, and sharing data science '
'workflows'),
long_description=readme,
long_description_content_type="text/markdown",
license='Apache License 2.0',
author='Janek Klawe',
author_email='janek@squareup.com',
url='https://github.com/square/bionic',
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
extras_require=extras_require,
python_requires='>=3.6',
zip_safe=False,
keywords='bionic',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
)
| 26.603774 | 78 | 0.656738 |
from setuptools import setup, find_packages
import os
from runpy import run_path
extras_require = run_path(os.path.join(
os.path.dirname(__file__), 'bionic', 'extras.py'))['extras_require']
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
'PyYAML',
'numpy',
'pandas',
'pyarrow',
'pyrsistent',
]
setup(
name='bionic',
version='0.6.3',
description=(
'A Python framework for building, running, and sharing data science '
'workflows'),
long_description=readme,
long_description_content_type="text/markdown",
license='Apache License 2.0',
author='Janek Klawe',
author_email='janek@squareup.com',
url='https://github.com/square/bionic',
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
extras_require=extras_require,
python_requires='>=3.6',
zip_safe=False,
keywords='bionic',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
)
| true | true |
f7ffb2d2e904341a1593f00e42fb9860879101f1 | 2,618 | py | Python | interchange/geo.py | py2neo-org/interchange | 5a081adb6358e038dceeb9b7bb166e7f341066bd | [
"Apache-2.0"
] | null | null | null | interchange/geo.py | py2neo-org/interchange | 5a081adb6358e038dceeb9b7bb166e7f341066bd | [
"Apache-2.0"
] | 4 | 2021-10-11T12:24:35.000Z | 2021-11-10T15:55:02.000Z | interchange/geo.py | py2neo-org/interchange | 5a081adb6358e038dceeb9b7bb166e7f341066bd | [
"Apache-2.0"
] | 1 | 2021-11-23T20:24:25.000Z | 2021-11-23T20:24:25.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines geospatial data types.
"""
__all__ = [
"Point",
"CartesianPoint",
"WGS84Point",
]
# SRID to subclass mappings
_srid_table = {}
class Point(tuple):
""" A point within a geometric space. This type is generally used
via its subclasses and should not be instantiated directly unless
there is no subclass defined for the required SRID.
"""
@classmethod
def class_for_srid(cls, srid):
point_class, dim = _srid_table[srid]
return point_class, dim
srid = None
def __new__(cls, iterable):
return tuple.__new__(cls, iterable)
def __str__(self):
return "POINT(%s)" % " ".join(map(str, self))
def __eq__(self, other):
try:
return type(self) is type(other) and tuple(self) == tuple(other)
except (AttributeError, TypeError):
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(type(self)) ^ hash(tuple(self))
def _point_subclass(name, fields, srid_map):
""" Dynamically create a Point subclass.
"""
def srid(self):
try:
return srid_map[len(self)]
except KeyError:
return None
attributes = {"srid": property(srid)}
for index, subclass_field in enumerate(fields):
def accessor(self, i=index, f=subclass_field):
try:
return self[i]
except IndexError:
raise AttributeError(f)
for field_alias in {subclass_field, "xyz"[index]}:
attributes[field_alias] = property(accessor)
cls = type(name, (Point,), attributes)
for dim, srid in srid_map.items():
_srid_table[srid] = (cls, dim)
return cls
# Point subclass definitions
CartesianPoint = _point_subclass("CartesianPoint", ["x", "y", "z"], {2: 7203, 3: 9157})
WGS84Point = _point_subclass("WGS84Point", ["longitude", "latitude", "height"], {2: 4326, 3: 4979})
| 25.920792 | 99 | 0.645149 |
__all__ = [
"Point",
"CartesianPoint",
"WGS84Point",
]
_srid_table = {}
class Point(tuple):
@classmethod
def class_for_srid(cls, srid):
point_class, dim = _srid_table[srid]
return point_class, dim
srid = None
def __new__(cls, iterable):
return tuple.__new__(cls, iterable)
def __str__(self):
return "POINT(%s)" % " ".join(map(str, self))
def __eq__(self, other):
try:
return type(self) is type(other) and tuple(self) == tuple(other)
except (AttributeError, TypeError):
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(type(self)) ^ hash(tuple(self))
def _point_subclass(name, fields, srid_map):
def srid(self):
try:
return srid_map[len(self)]
except KeyError:
return None
attributes = {"srid": property(srid)}
for index, subclass_field in enumerate(fields):
def accessor(self, i=index, f=subclass_field):
try:
return self[i]
except IndexError:
raise AttributeError(f)
for field_alias in {subclass_field, "xyz"[index]}:
attributes[field_alias] = property(accessor)
cls = type(name, (Point,), attributes)
for dim, srid in srid_map.items():
_srid_table[srid] = (cls, dim)
return cls
CartesianPoint = _point_subclass("CartesianPoint", ["x", "y", "z"], {2: 7203, 3: 9157})
WGS84Point = _point_subclass("WGS84Point", ["longitude", "latitude", "height"], {2: 4326, 3: 4979})
| true | true |
f7ffb39b815a121988ba68c8c295381065ceb78b | 14,226 | py | Python | src/plotting.py | pradeep90/reddit-post-classifier | 081fd1cda50a0938d1b7f32c3919defbf27bea68 | [
"Apache-2.0"
] | null | null | null | src/plotting.py | pradeep90/reddit-post-classifier | 081fd1cda50a0938d1b7f32c3919defbf27bea68 | [
"Apache-2.0"
] | null | null | null | src/plotting.py | pradeep90/reddit-post-classifier | 081fd1cda50a0938d1b7f32c3919defbf27bea68 | [
"Apache-2.0"
] | null | null | null | import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
PLOT_NBC = False
PLOT_LR = False
PLOT_CNN = True
# Learing curve for NBC
if PLOT_NBC:
training_fracs = [0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 1.0]
training_precision_at_5 = [1, 1, 1, 1, 1, 1]
testing_precision_at_5 = [0.77, 0.82, 0.84, 0.85, 0.86, 0.85]
plot_name = 'learning_curve_nbc_{}.png'.format(str(datetime.datetime.now()))
fig, ax = plt.subplots()
line1, = ax.plot(training_fracs, training_precision_at_5, label='train')
line2, = ax.plot(training_fracs, testing_precision_at_5, label='test')
ax.legend()
title='Training and Test Accuracies v.s. Training Fraction'
plt.xlabel('training_fracs')
plt.ylabel('Precision@5')
plt.title(title)
plt.savefig(plot_name)
if PLOT_LR:
training_fracs = [0.025, 0.05, 0.075, 0.1, 0.15, 0.2]
training_precision_at_5 = [1, 1, 1, 1, 1, 1]
testing_precision_at_5 = [0.77, 0.82, 0.84, 0.85, 0.86, 0.87]
plot_name = 'learning_curve_lr_{}.png'.format(str(datetime.datetime.now()))
fig, ax = plt.subplots()
line1, = ax.plot(training_fracs, training_precision_at_5, label='train')
line2, = ax.plot(training_fracs, testing_precision_at_5, label='test')
ax.legend()
title='Training and Test Accuracies v.s. Training Fraction'
plt.xlabel('training_fracs')
plt.ylabel('Precision@5')
plt.title(title)
plt.savefig(plot_name)
if PLOT_CNN:
data = """
80000/80000 [==============================] - 221s 3ms/step - loss: 6.6233 - acc: 0.0040 - top_k_categorical_accuracy: 0.0174 - val_loss: 6.0892 - val_acc: 0.0098 - val_top_k_categorical_accuracy: 0.0420
80000/80000 [==============================] - 221s 3ms/step - loss: 5.5705 - acc: 0.0277 - top_k_categorical_accuracy: 0.0998 - val_loss: 5.2726 - val_acc: 0.0420 - val_top_k_categorical_accuracy: 0.1362
80000/80000 [==============================] - 223s 3ms/step - loss: 4.9008 - acc: 0.0701 - top_k_categorical_accuracy: 0.2075 - val_loss: 4.7563 - val_acc: 0.0891 - val_top_k_categorical_accuracy: 0.2454
80000/80000 [==============================] - 225s 3ms/step - loss: 4.4173 - acc: 0.1218 - top_k_categorical_accuracy: 0.3048 - val_loss: 4.5053 - val_acc: 0.1213 - val_top_k_categorical_accuracy: 0.3004
80000/80000 [==============================] - 224s 3ms/step - loss: 4.0409 - acc: 0.1718 - top_k_categorical_accuracy: 0.3845 - val_loss: 4.3248 - val_acc: 0.1529 - val_top_k_categorical_accuracy: 0.3457
80000/80000 [==============================] - 222s 3ms/step - loss: 3.7427 - acc: 0.2126 - top_k_categorical_accuracy: 0.4449 - val_loss: 4.2171 - val_acc: 0.1756 - val_top_k_categorical_accuracy: 0.3790
80000/80000 [==============================] - 221s 3ms/step - loss: 3.4914 - acc: 0.2529 - top_k_categorical_accuracy: 0.4950 - val_loss: 4.3441 - val_acc: 0.1744 - val_top_k_categorical_accuracy: 0.3719
80000/80000 [==============================] - 221s 3ms/step - loss: 3.2783 - acc: 0.2870 - top_k_categorical_accuracy: 0.5333 - val_loss: 4.1594 - val_acc: 0.1989 - val_top_k_categorical_accuracy: 0.4041
80000/80000 [==============================] - 220s 3ms/step - loss: 3.0868 - acc: 0.3154 - top_k_categorical_accuracy: 0.5703 - val_loss: 4.2744 - val_acc: 0.1933 - val_top_k_categorical_accuracy: 0.3917
80000/80000 [==============================] - 220s 3ms/step - loss: 2.9134 - acc: 0.3444 - top_k_categorical_accuracy: 0.6034 - val_loss: 4.2295 - val_acc: 0.2097 - val_top_k_categorical_accuracy: 0.4143
80000/80000 [==============================] - 222s 3ms/step - loss: 2.7519 - acc: 0.3720 - top_k_categorical_accuracy: 0.6330 - val_loss: 4.3870 - val_acc: 0.2099 - val_top_k_categorical_accuracy: 0.4190
80000/80000 [==============================] - 222s 3ms/step - loss: 2.6156 - acc: 0.3946 - top_k_categorical_accuracy: 0.6581 - val_loss: 4.4451 - val_acc: 0.2042 - val_top_k_categorical_accuracy: 0.4023
80000/80000 [==============================] - 233s 3ms/step - loss: 2.4792 - acc: 0.4192 - top_k_categorical_accuracy: 0.6820 - val_loss: 4.5719 - val_acc: 0.2061 - val_top_k_categorical_accuracy: 0.4101
80000/80000 [==============================] - 227s 3ms/step - loss: 2.3596 - acc: 0.4394 - top_k_categorical_accuracy: 0.7028 - val_loss: 4.7459 - val_acc: 0.1976 - val_top_k_categorical_accuracy: 0.4000
80000/80000 [==============================] - 243s 3ms/step - loss: 2.2500 - acc: 0.4592 - top_k_categorical_accuracy: 0.7230 - val_loss: 4.8310 - val_acc: 0.2021 - val_top_k_categorical_accuracy: 0.4004
80000/80000 [==============================] - 242s 3ms/step - loss: 2.1556 - acc: 0.4772 - top_k_categorical_accuracy: 0.7412 - val_loss: 4.9553 - val_acc: 0.2046 - val_top_k_categorical_accuracy: 0.4051
80000/80000 [==============================] - 243s 3ms/step - loss: 2.0611 - acc: 0.4944 - top_k_categorical_accuracy: 0.7579 - val_loss: 5.1070 - val_acc: 0.1941 - val_top_k_categorical_accuracy: 0.3936
80000/80000 [==============================] - 242s 3ms/step - loss: 1.9717 - acc: 0.5104 - top_k_categorical_accuracy: 0.7723 - val_loss: 5.3036 - val_acc: 0.2042 - val_top_k_categorical_accuracy: 0.4021
80000/80000 [==============================] - 240s 3ms/step - loss: 1.8947 - acc: 0.5250 - top_k_categorical_accuracy: 0.7874 - val_loss: 5.4930 - val_acc: 0.1991 - val_top_k_categorical_accuracy: 0.3966
80000/80000 [==============================] - 238s 3ms/step - loss: 1.8224 - acc: 0.5395 - top_k_categorical_accuracy: 0.7985 - val_loss: 5.7421 - val_acc: 0.1953 - val_top_k_categorical_accuracy: 0.3928
80000/80000 [==============================] - 237s 3ms/step - loss: 1.7553 - acc: 0.5534 - top_k_categorical_accuracy: 0.8112 - val_loss: 5.7278 - val_acc: 0.1931 - val_top_k_categorical_accuracy: 0.3948
80000/80000 [==============================] - 236s 3ms/step - loss: 1.6928 - acc: 0.5660 - top_k_categorical_accuracy: 0.8206 - val_loss: 5.8661 - val_acc: 0.1908 - val_top_k_categorical_accuracy: 0.3825
80000/80000 [==============================] - 240s 3ms/step - loss: 1.6367 - acc: 0.5775 - top_k_categorical_accuracy: 0.8308 - val_loss: 6.0282 - val_acc: 0.1882 - val_top_k_categorical_accuracy: 0.3840
80000/80000 [==============================] - 237s 3ms/step - loss: 1.5811 - acc: 0.5891 - top_k_categorical_accuracy: 0.8403 - val_loss: 6.3243 - val_acc: 0.1915 - val_top_k_categorical_accuracy: 0.3886
80000/80000 [==============================] - 235s 3ms/step - loss: 1.5333 - acc: 0.5996 - top_k_categorical_accuracy: 0.8481 - val_loss: 6.1779 - val_acc: 0.1834 - val_top_k_categorical_accuracy: 0.3750
80000/80000 [==============================] - 255s 3ms/step - loss: 1.4889 - acc: 0.6095 - top_k_categorical_accuracy: 0.8551 - val_loss: 6.5050 - val_acc: 0.1883 - val_top_k_categorical_accuracy: 0.3822
80000/80000 [==============================] - 266s 3ms/step - loss: 1.4466 - acc: 0.6185 - top_k_categorical_accuracy: 0.8629 - val_loss: 6.6663 - val_acc: 0.1800 - val_top_k_categorical_accuracy: 0.3674
80000/80000 [==============================] - 259s 3ms/step - loss: 1.4101 - acc: 0.6263 - top_k_categorical_accuracy: 0.8694 - val_loss: 6.7527 - val_acc: 0.1837 - val_top_k_categorical_accuracy: 0.3735
80000/80000 [==============================] - 272s 3ms/step - loss: 1.3773 - acc: 0.6326 - top_k_categorical_accuracy: 0.8748 - val_loss: 6.9058 - val_acc: 0.1862 - val_top_k_categorical_accuracy: 0.3782
80000/80000 [==============================] - 253s 3ms/step - loss: 1.3401 - acc: 0.6404 - top_k_categorical_accuracy: 0.8797 - val_loss: 7.1382 - val_acc: 0.1855 - val_top_k_categorical_accuracy: 0.3767
80000/80000 [==============================] - 247s 3ms/step - loss: 1.3198 - acc: 0.6465 - top_k_categorical_accuracy: 0.8853 - val_loss: 7.1970 - val_acc: 0.1823 - val_top_k_categorical_accuracy: 0.3719
80000/80000 [==============================] - 241s 3ms/step - loss: 1.2913 - acc: 0.6535 - top_k_categorical_accuracy: 0.8886 - val_loss: 7.1783 - val_acc: 0.1801 - val_top_k_categorical_accuracy: 0.3652
80000/80000 [==============================] - 244s 3ms/step - loss: 1.2608 - acc: 0.6585 - top_k_categorical_accuracy: 0.8938 - val_loss: 7.4171 - val_acc: 0.1772 - val_top_k_categorical_accuracy: 0.3654
80000/80000 [==============================] - 245s 3ms/step - loss: 1.2429 - acc: 0.6660 - top_k_categorical_accuracy: 0.8966 - val_loss: 7.3468 - val_acc: 0.1793 - val_top_k_categorical_accuracy: 0.3670
80000/80000 [==============================] - 245s 3ms/step - loss: 1.2206 - acc: 0.6705 - top_k_categorical_accuracy: 0.9003 - val_loss: 7.7206 - val_acc: 0.1839 - val_top_k_categorical_accuracy: 0.3712
80000/80000 [==============================] - 238s 3ms/step - loss: 1.1945 - acc: 0.6777 - top_k_categorical_accuracy: 0.9044 - val_loss: 7.7350 - val_acc: 0.1785 - val_top_k_categorical_accuracy: 0.3648
80000/80000 [==============================] - 241s 3ms/step - loss: 1.1816 - acc: 0.6795 - top_k_categorical_accuracy: 0.9073 - val_loss: 7.8643 - val_acc: 0.1794 - val_top_k_categorical_accuracy: 0.3625
80000/80000 [==============================] - 231s 3ms/step - loss: 1.1680 - acc: 0.6851 - top_k_categorical_accuracy: 0.9097 - val_loss: 8.0242 - val_acc: 0.1809 - val_top_k_categorical_accuracy: 0.3685
80000/80000 [==============================] - 243s 3ms/step - loss: 1.1455 - acc: 0.6898 - top_k_categorical_accuracy: 0.9136 - val_loss: 8.0614 - val_acc: 0.1764 - val_top_k_categorical_accuracy: 0.3644
80000/80000 [==============================] - 258s 3ms/step - loss: 1.1306 - acc: 0.6925 - top_k_categorical_accuracy: 0.9152 - val_loss: 8.1652 - val_acc: 0.1784 - val_top_k_categorical_accuracy: 0.3674
80000/80000 [==============================] - 265s 3ms/step - loss: 1.1163 - acc: 0.6969 - top_k_categorical_accuracy: 0.9178 - val_loss: 8.1587 - val_acc: 0.1764 - val_top_k_categorical_accuracy: 0.3659
80000/80000 [==============================] - 270s 3ms/step - loss: 1.1088 - acc: 0.7013 - top_k_categorical_accuracy: 0.9188 - val_loss: 8.3066 - val_acc: 0.1741 - val_top_k_categorical_accuracy: 0.3579
80000/80000 [==============================] - 260s 3ms/step - loss: 1.0948 - acc: 0.7034 - top_k_categorical_accuracy: 0.9227 - val_loss: 8.3521 - val_acc: 0.1719 - val_top_k_categorical_accuracy: 0.3529
80000/80000 [==============================] - 256s 3ms/step - loss: 1.0910 - acc: 0.7053 - top_k_categorical_accuracy: 0.9235 - val_loss: 8.4043 - val_acc: 0.1704 - val_top_k_categorical_accuracy: 0.3555
80000/80000 [==============================] - 248s 3ms/step - loss: 1.0773 - acc: 0.7081 - top_k_categorical_accuracy: 0.9262 - val_loss: 8.5451 - val_acc: 0.1749 - val_top_k_categorical_accuracy: 0.3617
80000/80000 [==============================] - 248s 3ms/step - loss: 1.0668 - acc: 0.7108 - top_k_categorical_accuracy: 0.9266 - val_loss: 8.5922 - val_acc: 0.1718 - val_top_k_categorical_accuracy: 0.3588
80000/80000 [==============================] - 246s 3ms/step - loss: 1.0596 - acc: 0.7133 - top_k_categorical_accuracy: 0.9293 - val_loss: 8.5660 - val_acc: 0.1652 - val_top_k_categorical_accuracy: 0.3448
80000/80000 [==============================] - 246s 3ms/step - loss: 1.0497 - acc: 0.7182 - top_k_categorical_accuracy: 0.9305 - val_loss: 8.6918 - val_acc: 0.1758 - val_top_k_categorical_accuracy: 0.3589
80000/80000 [==============================] - 248s 3ms/step - loss: 1.0363 - acc: 0.7221 - top_k_categorical_accuracy: 0.9315 - val_loss: 8.8265 - val_acc: 0.1732 - val_top_k_categorical_accuracy: 0.3564
80000/80000 [==============================] - 248s 3ms/step - loss: 1.0340 - acc: 0.7208 - top_k_categorical_accuracy: 0.9330 - val_loss: 8.8335 - val_acc: 0.1741 - val_top_k_categorical_accuracy: 0.3563"""
#print(data)
# parse the data
xs = []
losses = []
validation_losses = []
lines = data.split('\n')
x=1
LOSS_IDX, VALIDATION_LOSS_IDX = 7,16
for line in lines:
line = line.strip()
line_tokens = line.split(' ')
if len(line_tokens) > 1:
losses.append(float(line_tokens[LOSS_IDX]))
validation_losses.append(float(line_tokens[VALIDATION_LOSS_IDX]))
xs.append(x)
x += 1
#print(losses)
#print(validation_losses)
"""
xs = xs[:15]
losses = losses[:15]
validation_losses = validation_losses[:15]
plot_name = 'learning_curve_cnn_{}.png'.format(str(datetime.datetime.now()))
fig, ax = plt.subplots()
line1, = ax.plot(xs, losses, label='Training Loss')
line2, = ax.plot(xs, validation_losses, label='Validation Loss')
title='CNN Loss Curve'
ax.legend()
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
plt.ylabel('Loss')
plt.title(title)
plt.savefig(plot_name)
"""
validation_losses = [3.78,2.26,1.77,1.52,1.37,1.29,1.22,1.18,1.14,1.12]
accuracies = [0.58,0.76,0.82,0.85,0.86,0.87,0.88,0.89,0.89,0.89]
assert(len(validation_losses) == len(accuracies))
epochs = list(range(1,len(validation_losses)+1))
plot_name = 'learning_curve_cnn_{}.png'.format(str(datetime.datetime.now()))
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('epoch')
ax1.set_ylabel('loss', color=color)
ax1.plot(epochs, validation_losses, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('accuracy', color=color) # we already handled the x-label with ax1
ax2.plot(epochs, accuracies, color=color)
ax2.tick_params(axis='y', labelcolor=color)
"""
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
"""
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.savefig(plot_name)
| 52.301471 | 208 | 0.638198 | import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
PLOT_NBC = False
PLOT_LR = False
PLOT_CNN = True
if PLOT_NBC:
training_fracs = [0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 1.0]
training_precision_at_5 = [1, 1, 1, 1, 1, 1]
testing_precision_at_5 = [0.77, 0.82, 0.84, 0.85, 0.86, 0.85]
plot_name = 'learning_curve_nbc_{}.png'.format(str(datetime.datetime.now()))
fig, ax = plt.subplots()
line1, = ax.plot(training_fracs, training_precision_at_5, label='train')
line2, = ax.plot(training_fracs, testing_precision_at_5, label='test')
ax.legend()
title='Training and Test Accuracies v.s. Training Fraction'
plt.xlabel('training_fracs')
plt.ylabel('Precision@5')
plt.title(title)
plt.savefig(plot_name)
if PLOT_LR:
training_fracs = [0.025, 0.05, 0.075, 0.1, 0.15, 0.2]
training_precision_at_5 = [1, 1, 1, 1, 1, 1]
testing_precision_at_5 = [0.77, 0.82, 0.84, 0.85, 0.86, 0.87]
plot_name = 'learning_curve_lr_{}.png'.format(str(datetime.datetime.now()))
fig, ax = plt.subplots()
line1, = ax.plot(training_fracs, training_precision_at_5, label='train')
line2, = ax.plot(training_fracs, testing_precision_at_5, label='test')
ax.legend()
title='Training and Test Accuracies v.s. Training Fraction'
plt.xlabel('training_fracs')
plt.ylabel('Precision@5')
plt.title(title)
plt.savefig(plot_name)
if PLOT_CNN:
data = """
80000/80000 [==============================] - 221s 3ms/step - loss: 6.6233 - acc: 0.0040 - top_k_categorical_accuracy: 0.0174 - val_loss: 6.0892 - val_acc: 0.0098 - val_top_k_categorical_accuracy: 0.0420
80000/80000 [==============================] - 221s 3ms/step - loss: 5.5705 - acc: 0.0277 - top_k_categorical_accuracy: 0.0998 - val_loss: 5.2726 - val_acc: 0.0420 - val_top_k_categorical_accuracy: 0.1362
80000/80000 [==============================] - 223s 3ms/step - loss: 4.9008 - acc: 0.0701 - top_k_categorical_accuracy: 0.2075 - val_loss: 4.7563 - val_acc: 0.0891 - val_top_k_categorical_accuracy: 0.2454
80000/80000 [==============================] - 225s 3ms/step - loss: 4.4173 - acc: 0.1218 - top_k_categorical_accuracy: 0.3048 - val_loss: 4.5053 - val_acc: 0.1213 - val_top_k_categorical_accuracy: 0.3004
80000/80000 [==============================] - 224s 3ms/step - loss: 4.0409 - acc: 0.1718 - top_k_categorical_accuracy: 0.3845 - val_loss: 4.3248 - val_acc: 0.1529 - val_top_k_categorical_accuracy: 0.3457
80000/80000 [==============================] - 222s 3ms/step - loss: 3.7427 - acc: 0.2126 - top_k_categorical_accuracy: 0.4449 - val_loss: 4.2171 - val_acc: 0.1756 - val_top_k_categorical_accuracy: 0.3790
80000/80000 [==============================] - 221s 3ms/step - loss: 3.4914 - acc: 0.2529 - top_k_categorical_accuracy: 0.4950 - val_loss: 4.3441 - val_acc: 0.1744 - val_top_k_categorical_accuracy: 0.3719
80000/80000 [==============================] - 221s 3ms/step - loss: 3.2783 - acc: 0.2870 - top_k_categorical_accuracy: 0.5333 - val_loss: 4.1594 - val_acc: 0.1989 - val_top_k_categorical_accuracy: 0.4041
80000/80000 [==============================] - 220s 3ms/step - loss: 3.0868 - acc: 0.3154 - top_k_categorical_accuracy: 0.5703 - val_loss: 4.2744 - val_acc: 0.1933 - val_top_k_categorical_accuracy: 0.3917
80000/80000 [==============================] - 220s 3ms/step - loss: 2.9134 - acc: 0.3444 - top_k_categorical_accuracy: 0.6034 - val_loss: 4.2295 - val_acc: 0.2097 - val_top_k_categorical_accuracy: 0.4143
80000/80000 [==============================] - 222s 3ms/step - loss: 2.7519 - acc: 0.3720 - top_k_categorical_accuracy: 0.6330 - val_loss: 4.3870 - val_acc: 0.2099 - val_top_k_categorical_accuracy: 0.4190
80000/80000 [==============================] - 222s 3ms/step - loss: 2.6156 - acc: 0.3946 - top_k_categorical_accuracy: 0.6581 - val_loss: 4.4451 - val_acc: 0.2042 - val_top_k_categorical_accuracy: 0.4023
80000/80000 [==============================] - 233s 3ms/step - loss: 2.4792 - acc: 0.4192 - top_k_categorical_accuracy: 0.6820 - val_loss: 4.5719 - val_acc: 0.2061 - val_top_k_categorical_accuracy: 0.4101
80000/80000 [==============================] - 227s 3ms/step - loss: 2.3596 - acc: 0.4394 - top_k_categorical_accuracy: 0.7028 - val_loss: 4.7459 - val_acc: 0.1976 - val_top_k_categorical_accuracy: 0.4000
80000/80000 [==============================] - 243s 3ms/step - loss: 2.2500 - acc: 0.4592 - top_k_categorical_accuracy: 0.7230 - val_loss: 4.8310 - val_acc: 0.2021 - val_top_k_categorical_accuracy: 0.4004
80000/80000 [==============================] - 242s 3ms/step - loss: 2.1556 - acc: 0.4772 - top_k_categorical_accuracy: 0.7412 - val_loss: 4.9553 - val_acc: 0.2046 - val_top_k_categorical_accuracy: 0.4051
80000/80000 [==============================] - 243s 3ms/step - loss: 2.0611 - acc: 0.4944 - top_k_categorical_accuracy: 0.7579 - val_loss: 5.1070 - val_acc: 0.1941 - val_top_k_categorical_accuracy: 0.3936
80000/80000 [==============================] - 242s 3ms/step - loss: 1.9717 - acc: 0.5104 - top_k_categorical_accuracy: 0.7723 - val_loss: 5.3036 - val_acc: 0.2042 - val_top_k_categorical_accuracy: 0.4021
80000/80000 [==============================] - 240s 3ms/step - loss: 1.8947 - acc: 0.5250 - top_k_categorical_accuracy: 0.7874 - val_loss: 5.4930 - val_acc: 0.1991 - val_top_k_categorical_accuracy: 0.3966
80000/80000 [==============================] - 238s 3ms/step - loss: 1.8224 - acc: 0.5395 - top_k_categorical_accuracy: 0.7985 - val_loss: 5.7421 - val_acc: 0.1953 - val_top_k_categorical_accuracy: 0.3928
80000/80000 [==============================] - 237s 3ms/step - loss: 1.7553 - acc: 0.5534 - top_k_categorical_accuracy: 0.8112 - val_loss: 5.7278 - val_acc: 0.1931 - val_top_k_categorical_accuracy: 0.3948
80000/80000 [==============================] - 236s 3ms/step - loss: 1.6928 - acc: 0.5660 - top_k_categorical_accuracy: 0.8206 - val_loss: 5.8661 - val_acc: 0.1908 - val_top_k_categorical_accuracy: 0.3825
80000/80000 [==============================] - 240s 3ms/step - loss: 1.6367 - acc: 0.5775 - top_k_categorical_accuracy: 0.8308 - val_loss: 6.0282 - val_acc: 0.1882 - val_top_k_categorical_accuracy: 0.3840
80000/80000 [==============================] - 237s 3ms/step - loss: 1.5811 - acc: 0.5891 - top_k_categorical_accuracy: 0.8403 - val_loss: 6.3243 - val_acc: 0.1915 - val_top_k_categorical_accuracy: 0.3886
80000/80000 [==============================] - 235s 3ms/step - loss: 1.5333 - acc: 0.5996 - top_k_categorical_accuracy: 0.8481 - val_loss: 6.1779 - val_acc: 0.1834 - val_top_k_categorical_accuracy: 0.3750
80000/80000 [==============================] - 255s 3ms/step - loss: 1.4889 - acc: 0.6095 - top_k_categorical_accuracy: 0.8551 - val_loss: 6.5050 - val_acc: 0.1883 - val_top_k_categorical_accuracy: 0.3822
80000/80000 [==============================] - 266s 3ms/step - loss: 1.4466 - acc: 0.6185 - top_k_categorical_accuracy: 0.8629 - val_loss: 6.6663 - val_acc: 0.1800 - val_top_k_categorical_accuracy: 0.3674
80000/80000 [==============================] - 259s 3ms/step - loss: 1.4101 - acc: 0.6263 - top_k_categorical_accuracy: 0.8694 - val_loss: 6.7527 - val_acc: 0.1837 - val_top_k_categorical_accuracy: 0.3735
80000/80000 [==============================] - 272s 3ms/step - loss: 1.3773 - acc: 0.6326 - top_k_categorical_accuracy: 0.8748 - val_loss: 6.9058 - val_acc: 0.1862 - val_top_k_categorical_accuracy: 0.3782
80000/80000 [==============================] - 253s 3ms/step - loss: 1.3401 - acc: 0.6404 - top_k_categorical_accuracy: 0.8797 - val_loss: 7.1382 - val_acc: 0.1855 - val_top_k_categorical_accuracy: 0.3767
80000/80000 [==============================] - 247s 3ms/step - loss: 1.3198 - acc: 0.6465 - top_k_categorical_accuracy: 0.8853 - val_loss: 7.1970 - val_acc: 0.1823 - val_top_k_categorical_accuracy: 0.3719
80000/80000 [==============================] - 241s 3ms/step - loss: 1.2913 - acc: 0.6535 - top_k_categorical_accuracy: 0.8886 - val_loss: 7.1783 - val_acc: 0.1801 - val_top_k_categorical_accuracy: 0.3652
80000/80000 [==============================] - 244s 3ms/step - loss: 1.2608 - acc: 0.6585 - top_k_categorical_accuracy: 0.8938 - val_loss: 7.4171 - val_acc: 0.1772 - val_top_k_categorical_accuracy: 0.3654
80000/80000 [==============================] - 245s 3ms/step - loss: 1.2429 - acc: 0.6660 - top_k_categorical_accuracy: 0.8966 - val_loss: 7.3468 - val_acc: 0.1793 - val_top_k_categorical_accuracy: 0.3670
80000/80000 [==============================] - 245s 3ms/step - loss: 1.2206 - acc: 0.6705 - top_k_categorical_accuracy: 0.9003 - val_loss: 7.7206 - val_acc: 0.1839 - val_top_k_categorical_accuracy: 0.3712
80000/80000 [==============================] - 238s 3ms/step - loss: 1.1945 - acc: 0.6777 - top_k_categorical_accuracy: 0.9044 - val_loss: 7.7350 - val_acc: 0.1785 - val_top_k_categorical_accuracy: 0.3648
80000/80000 [==============================] - 241s 3ms/step - loss: 1.1816 - acc: 0.6795 - top_k_categorical_accuracy: 0.9073 - val_loss: 7.8643 - val_acc: 0.1794 - val_top_k_categorical_accuracy: 0.3625
80000/80000 [==============================] - 231s 3ms/step - loss: 1.1680 - acc: 0.6851 - top_k_categorical_accuracy: 0.9097 - val_loss: 8.0242 - val_acc: 0.1809 - val_top_k_categorical_accuracy: 0.3685
80000/80000 [==============================] - 243s 3ms/step - loss: 1.1455 - acc: 0.6898 - top_k_categorical_accuracy: 0.9136 - val_loss: 8.0614 - val_acc: 0.1764 - val_top_k_categorical_accuracy: 0.3644
80000/80000 [==============================] - 258s 3ms/step - loss: 1.1306 - acc: 0.6925 - top_k_categorical_accuracy: 0.9152 - val_loss: 8.1652 - val_acc: 0.1784 - val_top_k_categorical_accuracy: 0.3674
80000/80000 [==============================] - 265s 3ms/step - loss: 1.1163 - acc: 0.6969 - top_k_categorical_accuracy: 0.9178 - val_loss: 8.1587 - val_acc: 0.1764 - val_top_k_categorical_accuracy: 0.3659
80000/80000 [==============================] - 270s 3ms/step - loss: 1.1088 - acc: 0.7013 - top_k_categorical_accuracy: 0.9188 - val_loss: 8.3066 - val_acc: 0.1741 - val_top_k_categorical_accuracy: 0.3579
80000/80000 [==============================] - 260s 3ms/step - loss: 1.0948 - acc: 0.7034 - top_k_categorical_accuracy: 0.9227 - val_loss: 8.3521 - val_acc: 0.1719 - val_top_k_categorical_accuracy: 0.3529
80000/80000 [==============================] - 256s 3ms/step - loss: 1.0910 - acc: 0.7053 - top_k_categorical_accuracy: 0.9235 - val_loss: 8.4043 - val_acc: 0.1704 - val_top_k_categorical_accuracy: 0.3555
80000/80000 [==============================] - 248s 3ms/step - loss: 1.0773 - acc: 0.7081 - top_k_categorical_accuracy: 0.9262 - val_loss: 8.5451 - val_acc: 0.1749 - val_top_k_categorical_accuracy: 0.3617
80000/80000 [==============================] - 248s 3ms/step - loss: 1.0668 - acc: 0.7108 - top_k_categorical_accuracy: 0.9266 - val_loss: 8.5922 - val_acc: 0.1718 - val_top_k_categorical_accuracy: 0.3588
80000/80000 [==============================] - 246s 3ms/step - loss: 1.0596 - acc: 0.7133 - top_k_categorical_accuracy: 0.9293 - val_loss: 8.5660 - val_acc: 0.1652 - val_top_k_categorical_accuracy: 0.3448
80000/80000 [==============================] - 246s 3ms/step - loss: 1.0497 - acc: 0.7182 - top_k_categorical_accuracy: 0.9305 - val_loss: 8.6918 - val_acc: 0.1758 - val_top_k_categorical_accuracy: 0.3589
80000/80000 [==============================] - 248s 3ms/step - loss: 1.0363 - acc: 0.7221 - top_k_categorical_accuracy: 0.9315 - val_loss: 8.8265 - val_acc: 0.1732 - val_top_k_categorical_accuracy: 0.3564
80000/80000 [==============================] - 248s 3ms/step - loss: 1.0340 - acc: 0.7208 - top_k_categorical_accuracy: 0.9330 - val_loss: 8.8335 - val_acc: 0.1741 - val_top_k_categorical_accuracy: 0.3563"""
xs = []
losses = []
validation_losses = []
lines = data.split('\n')
x=1
LOSS_IDX, VALIDATION_LOSS_IDX = 7,16
for line in lines:
line = line.strip()
line_tokens = line.split(' ')
if len(line_tokens) > 1:
losses.append(float(line_tokens[LOSS_IDX]))
validation_losses.append(float(line_tokens[VALIDATION_LOSS_IDX]))
xs.append(x)
x += 1
validation_losses = [3.78,2.26,1.77,1.52,1.37,1.29,1.22,1.18,1.14,1.12]
accuracies = [0.58,0.76,0.82,0.85,0.86,0.87,0.88,0.89,0.89,0.89]
assert(len(validation_losses) == len(accuracies))
epochs = list(range(1,len(validation_losses)+1))
plot_name = 'learning_curve_cnn_{}.png'.format(str(datetime.datetime.now()))
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('epoch')
ax1.set_ylabel('loss', color=color)
ax1.plot(epochs, validation_losses, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('accuracy', color=color)
ax2.plot(epochs, accuracies, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout()
plt.savefig(plot_name)
| true | true |
f7ffb42910b936dd5cb58fa20168d7f450a73b00 | 305 | py | Python | fastapi_cache/key_builder.py | johnR46/fastapi-cache | a81d52f25965a959f851021584feec3de926dc91 | [
"Apache-2.0"
] | null | null | null | fastapi_cache/key_builder.py | johnR46/fastapi-cache | a81d52f25965a959f851021584feec3de926dc91 | [
"Apache-2.0"
] | null | null | null | fastapi_cache/key_builder.py | johnR46/fastapi-cache | a81d52f25965a959f851021584feec3de926dc91 | [
"Apache-2.0"
] | null | null | null | import hashlib
from typing import Optional
def default_key_builder(
prefix: str,
func,
parameter: Optional[dict] = None,
):
cache_key = (
prefix + ':' + hashlib.md5(f"{func.__module__}:{func.__name__}:{parameter}".encode()).hexdigest()
)
return cache_key
| 21.785714 | 109 | 0.616393 | import hashlib
from typing import Optional
def default_key_builder(
prefix: str,
func,
parameter: Optional[dict] = None,
):
cache_key = (
prefix + ':' + hashlib.md5(f"{func.__module__}:{func.__name__}:{parameter}".encode()).hexdigest()
)
return cache_key
| true | true |
f7ffb48c665087c03efcd60e58e6dc4d2eb60da5 | 1,373 | py | Python | file_encryptor/settings.py | SleepingSoul/file-encryptor | d43ddb4c9e9c56ee67f333347da3f03f4e451c4f | [
"MIT"
] | 7 | 2015-01-03T00:08:00.000Z | 2016-02-21T02:47:23.000Z | file_encryptor/settings.py | hnjm/file-encryptor | 245191c45dbd68e09a4a8f07c5d7331be195dc6b | [
"MIT"
] | 3 | 2015-02-02T04:20:55.000Z | 2015-12-14T04:00:16.000Z | file_encryptor/settings.py | hnjm/file-encryptor | 245191c45dbd68e09a4a8f07c5d7331be195dc6b | [
"MIT"
] | 7 | 2015-01-23T12:41:39.000Z | 2015-12-17T22:01:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014 Storj Labs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The default chunk size for files.
# The current value is equivalent to 16 kb.
CHUNK_SIZE = 2**14
# The default HMAC passphrase for encrypting files.
DEFAULT_HMAC_PASSPHRASE = 'Something old, something new.'
| 42.90625 | 79 | 0.766205 |
CHUNK_SIZE = 2**14
DEFAULT_HMAC_PASSPHRASE = 'Something old, something new.'
| true | true |
f7ffb50752e74fc276963006dbf5722a06d83799 | 365 | py | Python | output/models/ms_data/particles/particles_jj002_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/particles/particles_jj002_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/particles/particles_jj002_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.particles.particles_jj002_xsd.particles_jj002 import (
B as B,
R,
Doc,
)
from output.models.ms_data.particles.particles_jj002_xsd.particles_jj002_imp import (
B as ImpB,
ExtRefType,
ImpElem1,
ImpElem2,
)
__all__ = [
"B",
"R",
"Doc",
"ImpB",
"ExtRefType",
"ImpElem1",
"ImpElem2",
]
| 16.590909 | 85 | 0.632877 | from output.models.ms_data.particles.particles_jj002_xsd.particles_jj002 import (
B as B,
R,
Doc,
)
from output.models.ms_data.particles.particles_jj002_xsd.particles_jj002_imp import (
B as ImpB,
ExtRefType,
ImpElem1,
ImpElem2,
)
__all__ = [
"B",
"R",
"Doc",
"ImpB",
"ExtRefType",
"ImpElem1",
"ImpElem2",
]
| true | true |
f7ffb51e9b01237a60725d6e6189859d404a164e | 89 | py | Python | REST API/code/db.py | ccruz182/Store-REST-API | ece3f908279aa15df04a0bab32060f5ee1fc1462 | [
"Apache-2.0"
] | null | null | null | REST API/code/db.py | ccruz182/Store-REST-API | ece3f908279aa15df04a0bab32060f5ee1fc1462 | [
"Apache-2.0"
] | null | null | null | REST API/code/db.py | ccruz182/Store-REST-API | ece3f908279aa15df04a0bab32060f5ee1fc1462 | [
"Apache-2.0"
] | null | null | null | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy() # Link Flask app with database | 29.666667 | 48 | 0.808989 | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy() | true | true |
f7ffb5379681ee5cdd484e1fb05b8a783360afb5 | 1,824 | py | Python | examples/src/main/python/streaming/hdfs_wordcount.py | MiguelPeralvo/spark | 979a73f86f77e7ae294979b7962b8ae30d38f1ff | [
"Apache-2.0"
] | 51 | 2015-01-01T12:59:51.000Z | 2020-03-30T15:40:24.000Z | examples/src/main/python/streaming/hdfs_wordcount.py | MiguelPeralvo/spark | 979a73f86f77e7ae294979b7962b8ae30d38f1ff | [
"Apache-2.0"
] | 6 | 2019-11-13T07:48:07.000Z | 2022-01-21T23:24:20.000Z | examples/src/main/python/streaming/hdfs_wordcount.py | MiguelPeralvo/spark | 979a73f86f77e7ae294979b7962b8ae30d38f1ff | [
"Apache-2.0"
] | 36 | 2015-02-12T02:57:59.000Z | 2020-07-23T22:06:59.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Counts words in new text files created in the given directory
Usage: hdfs_wordcount.py <directory>
<directory> is the directory that Spark Streaming will use to find and read new text files.
To run this on your local machine on directory `localdir`, run this example
$ bin/spark-submit examples/src/main/python/streaming/hdfs_wordcount.py localdir
Then create a text file in `localdir` and the words in the file will get counted.
"""
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: hdfs_wordcount.py <directory>"
exit(-1)
sc = SparkContext(appName="PythonStreamingHDFSWordCount")
ssc = StreamingContext(sc, 1)
lines = ssc.textFileStream(sys.argv[1])
counts = lines.flatMap(lambda line: line.split(" "))\
.map(lambda x: (x, 1))\
.reduceByKey(lambda a, b: a+b)
counts.pprint()
ssc.start()
ssc.awaitTermination()
| 36.48 | 94 | 0.724781 |
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: hdfs_wordcount.py <directory>"
exit(-1)
sc = SparkContext(appName="PythonStreamingHDFSWordCount")
ssc = StreamingContext(sc, 1)
lines = ssc.textFileStream(sys.argv[1])
counts = lines.flatMap(lambda line: line.split(" "))\
.map(lambda x: (x, 1))\
.reduceByKey(lambda a, b: a+b)
counts.pprint()
ssc.start()
ssc.awaitTermination()
| true | true |
f7ffb58199b0ff1028fb183b81068d8955e96ecd | 22,075 | py | Python | process_includes.py | andreif/generateDS | 995fa381b3f1079937bb0c1cb14f8167e879c33d | [
"MIT"
] | null | null | null | process_includes.py | andreif/generateDS | 995fa381b3f1079937bb0c1cb14f8167e879c33d | [
"MIT"
] | null | null | null | process_includes.py | andreif/generateDS | 995fa381b3f1079937bb0c1cb14f8167e879c33d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
python %prog [options] <in_schema.xsd> <out_schema.xsd>
Synopsis:
Prepare schema document. Replace include and import elements.
Examples:
python %prog myschema.xsd
python %prog myschema.xsd newschema.xsd
python %prog -f myschema.xsd newschema.xsd
cat infile.xsd | python %prog > outfile.xsd
"""
#
# Imports
import sys
import os
if sys.version_info.major == 2:
import urllib2
else:
import urllib.request
import urllib.error
import urllib.parse
import copy
from optparse import OptionParser, Values
import itertools
from copy import deepcopy
from lxml import etree
try:
from gds_inner_name_map import Inner_name_map
except ImportError:
Inner_name_map = None
#
# Globals and constants
#
# Do not modify the following VERSION comments.
# Used by updateversion.py.
##VERSION##
VERSION = '2.29.11'
##VERSION##
CatalogDict = {}
# the base url to use for all relative paths in the catalog
CatalogBaseUrl = None
#
# Exceptions
class SchemaIOError(IOError):
pass
class InnerNameMapError(Exception):
pass
def load_catalog(catalogpath):
global CatalogBaseUrl
if catalogpath:
CatalogBaseUrl = os.path.split(catalogpath)[0]
catalog = etree.parse(open(catalogpath))
for elements in catalog.getroot().findall(
"{urn:oasis:names:tc:entity:xmlns:xml:catalog}public"):
CatalogDict[elements.get("publicId")] = elements.get("uri")
#
# Functions for external use
def process_include_files(
infile, outfile, inpath='',
catalogpath=None,
fixtypenames=None,
no_collect_includes=False,
no_redefine_groups=False):
load_catalog(catalogpath)
options = Values({
'force': False,
'fixtypenames': fixtypenames,
'no_collect_includes': no_collect_includes,
'no_redefine_groups': no_redefine_groups,
})
doc, ns_dict = prep_schema_doc(infile, outfile, inpath, options)
return doc, ns_dict
def get_all_root_file_paths(
infile,
inpath='',
catalogpath=None,
shallow=False):
load_catalog(catalogpath)
doc1 = etree.parse(infile)
root1 = doc1.getroot()
rootPaths = []
params = Params()
params.parent_url = infile
params.base_url = os.path.split(inpath)[0]
get_root_file_paths(root1, params, rootPaths, shallow)
rootPaths.append(inpath)
return rootPaths
#
# Classes
class Params(object):
members = ('base_url', 'already_processed', 'parent_url', )
def __init__(self):
self.base_url = None
self.already_processed = set()
self.parent_url = None
def __setattr__(self, name, value):
if name not in self.members:
raise AttributeError('Class %s has no set-able attribute "%s"' % (
self.__class__.__name__, name, ))
self.__dict__[name] = value
#
# Functions for internal use and testing
def clear_includes_and_imports(node):
namespace = node.nsmap[node.prefix]
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
repl = etree.Comment(etree.tostring(child))
repl.tail = '\n'
node.replace(child, repl)
def get_ref_info(node, params):
# first look for the schema location in the catalog, if not
# there, then see if it's specified in the node
namespace = node.get('namespace')
url = None
baseUrl = None
if namespace in CatalogDict:
url = CatalogDict[namespace]
# setup the base url in case the path
# in the catalog was a relative path
baseUrl = CatalogBaseUrl
if not url:
url = node.get('schemaLocation')
if not url:
msg = '*** Warning: missing "schemaLocation" attribute in %s\n' % (
params.parent_url, )
sys.stderr.write(msg)
return (None, None)
# Uncomment the next lines to help track down missing schemaLocation etc.
# print '(resolve_ref) url: %s\n parent-url: %s' % (
# url, params.parent_url, )
if not baseUrl:
baseUrl = params.base_url
if baseUrl and not (
url.startswith('/') or
url.startswith('http:') or
url.startswith('ftp:')):
locn = '%s/%s' % (baseUrl, url, )
schema_name = locn
else:
locn = url
schema_name = url
return locn, schema_name
def resolve_ref(node, params, options):
content = None
locn, schema_name = get_ref_info(node, params)
if locn is not None and not (
locn.startswith('/') or
locn.startswith('http:') or
locn.startswith('ftp:')):
schema_name = os.path.abspath(locn)
if locn is not None:
if schema_name not in params.already_processed:
params.already_processed.add(schema_name)
## print 'trace --'
## print ' url: : %s' % (url, )
## print ' base : %s' % (params.base_url, )
## print ' parent : %s' % (params.parent_url, )
## print ' locn : %s' % (locn, )
## print ' schema_name : %s\n' % (schema_name, )
if locn.startswith('http:') or locn.startswith('ftp:'):
if sys.version_info.major == 2:
urllib_urlopen = urllib2.urlopen
urllib_httperror = urllib2.HTTPError
else:
urllib_urlopen = urllib.request.urlopen
urllib_httperror = urllib.error.HTTPError
try:
urlfile = urllib_urlopen(locn)
content = urlfile.read()
urlfile.close()
params.parent_url = locn
params.base_url = os.path.split(locn)[0]
except urllib_httperror:
msg = "Can't find file %s referenced in %s." % (
locn, params.parent_url, )
raise SchemaIOError(msg)
else:
if os.path.exists(locn):
infile = open(locn)
unencoded_content = infile.read()
if sys.version_info.major == 2:
content = unencoded_content
else:
content = unencoded_content.encode()
infile.close()
params.parent_url = locn
params.base_url = os.path.split(locn)[0]
if content is None:
msg = "Can't find file %s referenced in %s." % (
locn, params.parent_url, )
raise SchemaIOError(msg)
## if content is None:
## msg = "Can't find file %s referenced in %s." % (
## locn, params.parent_url, )
## raise SchemaIOError(msg)
return content
def collect_inserts(node, params, inserts, ns_dict, options):
namespace = node.nsmap[node.prefix]
roots = []
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
aux_roots = collect_inserts_aux(
child, params, inserts, ns_dict, options)
roots.extend(aux_roots)
return roots
def collect_inserts_aux(child, params, inserts, ns_dict, options):
roots = []
save_base_url = params.base_url
string_content = resolve_ref(child, params, options)
if string_content is not None:
root = etree.fromstring(string_content, base_url=params.base_url)
roots.append(root)
update_ns_dict(root, ns_dict, options)
for child1 in root:
if not isinstance(child1, etree._Comment):
namespace = child1.nsmap[child1.prefix]
if (child1.tag != '{%s}include' % (namespace, ) and
child1.tag != '{%s' % (namespace, )):
comment = etree.Comment(etree.tostring(child))
comment.tail = '\n'
inserts.append(comment)
inserts.append(child1)
insert_roots = collect_inserts(root, params, inserts, ns_dict, options)
roots.extend(insert_roots)
params.base_url = save_base_url
return roots
def update_ns_dict(root, ns_dict, options):
"""Update the namespace dictionary with the target namespace prefix,
if there is one, for each global xs:element and xs:complexType.
"""
if 'targetNamespace' in root.attrib:
namespace = root.get('targetNamespace')
defs = [nsdef for nsdef in root.nsmap.items() if nsdef[1] == namespace]
if defs:
prefix = defs[0][0]
# Get top level xs:complexType and xs:element elements.
nsmap = {'xs': 'http://www.w3.org/2001/XMLSchema'}
items1 = root.xpath('./xs:complexType', namespaces=nsmap)
items2 = root.xpath('./xs:element', namespaces=nsmap)
names = ([item.get('name') for item in items1] +
[item.get('name') for item in items2])
for name in names:
ns_dict[name] = (prefix, namespace)
def get_root_file_paths(node, params, rootPaths, shallow):
namespace = node.nsmap[node.prefix]
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
get_root_file_paths_aux(child, params, rootPaths, shallow)
def get_root_file_paths_aux(child, params, rootPaths, shallow):
save_base_url = params.base_url
path, _ = get_ref_info(child, params)
string_content = resolve_ref(child, params, None)
if string_content is not None:
if not shallow:
root = etree.fromstring(string_content, base_url=params.base_url)
get_root_file_paths(root, params, rootPaths, shallow)
if path is not None and path not in rootPaths:
rootPaths.append(path)
params.base_url = save_base_url
def make_file(outFileName, options):
outFile = None
if (not options.force) and os.path.exists(outFileName):
if sys.version_info.major == 3:
raw_input = input
reply = raw_input(
'File %s exists. Overwrite? (y/n): ' % outFileName)
if reply == 'y':
outFile = open(outFileName, 'w')
else:
outFile = open(outFileName, 'w')
return outFile
def prep_schema_doc(infile, outfile, inpath, options):
doc1 = etree.parse(infile)
root1 = doc1.getroot()
params = Params()
params.parent_url = infile
params.base_url = os.path.split(inpath)[0]
inserts = []
ns_dict = {}
if not options.no_collect_includes:
collect_inserts(root1, params, inserts, ns_dict, options)
root2 = copy.copy(root1)
clear_includes_and_imports(root2)
for insert_node in inserts:
root2.append(insert_node)
else:
root2 = root1
if not options.no_redefine_groups:
process_groups(root2)
raise_anon_complextypes(root2)
fix_type_names(root2, options)
doc2 = etree.ElementTree(root2)
if sys.version_info.major == 2:
doc2.write(outfile)
else:
outfile.write(etree.tostring(root2).decode('utf-8'))
return doc2, ns_dict
def prep_schema(inpath, outpath, options):
if inpath:
infile = open(inpath, 'r')
else:
infile = sys.stdin
if outpath:
outfile = make_file(outpath, options)
else:
outfile = sys.stdout
if outfile is None:
return
prep_schema_doc(infile, outfile, inpath, options)
if inpath:
infile.close()
if outpath:
outfile.close()
def process_groups(root):
# Get all the xs:group definitions at top level.
if root.prefix:
namespaces = {root.prefix: root.nsmap[root.prefix]}
pattern = './%s:group' % (root.prefix, )
defs = root.xpath(pattern, namespaces=namespaces)
else:
pattern = './group'
defs = root.xpath(pattern)
defs = [node for node in defs if node.get('name') is not None]
# Get all the xs:group references (below top level).
if root.prefix:
namespaces = {root.prefix: root.nsmap[root.prefix]}
pattern = './*//%s:group' % (root.prefix, )
refs = root.xpath(pattern, namespaces=namespaces)
else:
pattern = './*//group'
refs = root.xpath(pattern)
refs = [node for node in refs if node.get('ref') is not None]
# Create a dictionary of the named model groups (definitions).
def_dict = {}
for node in defs:
def_dict[trim_prefix(node.get('name'))] = node
replace_group_defs(def_dict, refs)
def fix_type_names(root, options):
fixnamespec = options.fixtypenames
if fixnamespec:
namespecs = fixnamespec.split(';')
else:
namespecs = []
for namespec in namespecs:
names = namespec.split(':')
if len(names) == 2:
oldname = names[0]
newname = names[1]
elif len(names) == 1:
oldname = names[0]
newname = '%sxx' % (oldname, )
else:
continue
# Change the name (name attribute) of the complexType.
pat = './/%s:complexType[@name="%s"]' % (
root.prefix, oldname)
elements = xpath_find(root, pat)
if len(elements) < 1:
sys.stderr.write(
"\nWarning: fix-type-names can't find complexType '%s'. "
"Exiting.\n\n" % (oldname, ))
sys.exit(1)
if len(elements) < 1:
sys.stderr.write(
"Warning: fix-type-names found more than "
"one complexType '%s'. "
"Changing first." % (oldname, ))
element = elements[0]
element.set('name', newname)
# Change the reference (type attribute) of child elements.
pat = './/%s:element' % (root.prefix, )
elements = xpath_find(root, pat)
for element in elements:
typename = element.get('type')
if not typename:
continue
names = typename.split(':')
if len(names) == 2:
typename = names[1]
elif len(names) == 1:
typename = names[0]
else:
continue
if typename != oldname:
continue
if not element.getchildren():
element.set('type', newname)
# Change the extensions ('base' attribute) that refer to the old type.
pat = './/%s:extension' % (root.prefix, )
elements = xpath_find(root, pat)
for element in elements:
typename = element.get('base')
if not typename:
continue
names = typename.split(':')
if len(names) == 2:
typename = names[1]
elif len(names) == 1:
typename = names[0]
else:
continue
if typename != oldname:
continue
element.set('base', newname)
def xpath_find(node, pat):
namespaces = {node.prefix: node.nsmap[node.prefix]}
elements = node.xpath(pat, namespaces=namespaces)
return elements
def replace_group_defs(def_dict, refs):
for ref_node in refs:
name = trim_prefix(ref_node.get('ref'))
if name is None:
continue
def_node = def_dict.get(name)
namespaces = {def_node.prefix: def_node.nsmap[def_node.prefix]}
if def_node is not None:
pattern = './%s:sequence|./%s:choice|./%s:all' % (
def_node.prefix, def_node.prefix, def_node.prefix, )
content = def_node.xpath(
pattern,
namespaces=namespaces)
if content:
content = content[0]
parent = ref_node.getparent()
for node in content:
if not isinstance(node, etree._Comment):
new_node = deepcopy(node)
# Copy minOccurs and maxOccurs attributes to new node.
value = ref_node.get('minOccurs')
if value is not None:
new_node.set('minOccurs', value)
value = ref_node.get('maxOccurs')
if value is not None:
new_node.set('maxOccurs', value)
ref_node.addprevious(new_node)
parent.remove(ref_node)
def raise_anon_complextypes(root):
""" Raise each anonymous complexType to top level and give it a name.
Rename if necessary to prevent duplicates.
"""
def_names = collect_type_names(root)
def_count = 0
# Find all complexTypes below top level.
# Raise them to top level and name them.
# Re-name if there is a duplicate (simpleType, complexType, or
# previous renamed type).
# Change the parent (xs:element) so the "type" attribute refers to
# the raised and renamed type.
# Collect the new types.
el = etree.Comment(text="Raised anonymous complexType definitions")
el.tail = "\n\n"
root.append(el)
prefix = root.prefix
if prefix:
pattern = './*/*//%s:complexType|./*/*//%s:simpleType' % (
prefix, prefix, )
element_tag = '{%s}element' % (root.nsmap[prefix], )
namespaces = {prefix: root.nsmap[prefix]}
defs = root.xpath(pattern, namespaces=namespaces)
else:
pattern = './*/*//complexType|./*/*//simpleType'
element_tag = 'element'
defs = root.xpath(pattern)
for node in defs:
parent = node.getparent()
if parent.tag != element_tag:
continue
name = parent.get('name')
if not name:
continue
type_name = '%sType' % (name, )
if Inner_name_map is None:
type_name, def_count = unique_name(type_name, def_names, def_count)
else:
type_name = map_inner_name(node, Inner_name_map)
def_names.add(type_name)
parent.set('type', type_name)
node.set('name', type_name)
# Move the complexType node to top level.
root.append(node)
def map_inner_name(node, inner_name_map):
"""Use a user-supplied mapping table to look up a name for this class/type.
"""
# find the name for the enclosing type definition and
# the name of the type definition that encloses that.
node1 = node
name2 = node1.get('name')
while name2 is None:
node1 = node1.getparent()
if node1 is None:
raise InnerNameMapError('cannot find parent with "name" attribute')
name2 = node1.get('name')
node1 = node1.getparent()
name1 = node1.get('name')
while name1 is None:
node1 = node1.getparent()
if node1 is None:
raise InnerNameMapError('cannot find parent with "name" attribute')
name1 = node1.get('name')
new_name = inner_name_map.get((name1, name2))
if new_name is None:
msg1 = '("{}", "{}")'.format(
name1, name2)
sys.stderr.write('\n*** error. Must add entry to inner_name_map:\n')
sys.stderr.write('\n {}: "xxxx",\n\n'.format(msg1))
raise InnerNameMapError('mapping missing for {}'.format(msg1))
return new_name
#
# Collect the names of all currently defined types (complexType,
# simpleType, element).
def collect_type_names(node):
prefix = node.prefix
if prefix is not None and prefix.strip():
pattern = './/%s:complexType|.//%s:simpleType|.//%s:element' % (
prefix, prefix, prefix)
# Must make sure that we have a namespace dictionary that does *not*
# have a key None.
namespaces = {prefix: node.nsmap[prefix]}
elements = node.xpath(pattern, namespaces=namespaces)
else:
pattern = './/complexType|.//simpleType|.//element'
elements = node.xpath(pattern)
names = {
el.attrib['name'] for el in elements if
'name' in el.attrib and el.getchildren()
}
return names
def unique_name(type_name, def_names, def_count):
orig_type_name = type_name
while True:
if type_name not in def_names:
return type_name, def_count
def_count += 1
type_name = '%s%d' % (orig_type_name, def_count, )
def trim_prefix(name):
names = name.split(':')
if len(names) == 1:
return names[0]
elif len(names) == 2:
return names[1]
else:
return None
USAGE_TEXT = __doc__
def usage(parser):
parser.print_help()
sys.exit(1)
def main():
parser = OptionParser(USAGE_TEXT)
parser.add_option(
"-f", "--force", action="store_true",
dest="force", default=False,
help="force overwrite without asking")
parser.add_option(
"--fix-type-names", action="store",
dest="fixtypenames", default=None,
help="Fix up (replace) complex type names.")
parser.add_option(
"--no-collect-includes", action="store_true",
dest="no_collect_includes", default=False,
help="do not process and insert schemas referenced by "
"xs:include and xs:import elements")
parser.add_option(
"--no-redefine-groups", action="store_true",
dest="no_redefine_groups", default=False,
help="do not pre-process and redefine xs:group elements")
(options, args) = parser.parse_args()
if len(args) == 2:
inpath = args[0]
outpath = args[1]
elif len(args) == 1:
inpath = args[0]
outpath = None
elif len(args) == 0:
inpath = None
outpath = None
else:
usage(parser)
prep_schema(inpath, outpath, options)
if __name__ == "__main__":
#import pdb; pdb.set_trace()
main()
| 33.497724 | 79 | 0.588086 |
import sys
import os
if sys.version_info.major == 2:
import urllib2
else:
import urllib.request
import urllib.error
import urllib.parse
import copy
from optparse import OptionParser, Values
import itertools
from copy import deepcopy
from lxml import etree
try:
from gds_inner_name_map import Inner_name_map
except ImportError:
Inner_name_map = None
.29.11'
= {}
CatalogBaseUrl = None
class SchemaIOError(IOError):
pass
class InnerNameMapError(Exception):
pass
def load_catalog(catalogpath):
global CatalogBaseUrl
if catalogpath:
CatalogBaseUrl = os.path.split(catalogpath)[0]
catalog = etree.parse(open(catalogpath))
for elements in catalog.getroot().findall(
"{urn:oasis:names:tc:entity:xmlns:xml:catalog}public"):
CatalogDict[elements.get("publicId")] = elements.get("uri")
def process_include_files(
infile, outfile, inpath='',
catalogpath=None,
fixtypenames=None,
no_collect_includes=False,
no_redefine_groups=False):
load_catalog(catalogpath)
options = Values({
'force': False,
'fixtypenames': fixtypenames,
'no_collect_includes': no_collect_includes,
'no_redefine_groups': no_redefine_groups,
})
doc, ns_dict = prep_schema_doc(infile, outfile, inpath, options)
return doc, ns_dict
def get_all_root_file_paths(
infile,
inpath='',
catalogpath=None,
shallow=False):
load_catalog(catalogpath)
doc1 = etree.parse(infile)
root1 = doc1.getroot()
rootPaths = []
params = Params()
params.parent_url = infile
params.base_url = os.path.split(inpath)[0]
get_root_file_paths(root1, params, rootPaths, shallow)
rootPaths.append(inpath)
return rootPaths
class Params(object):
members = ('base_url', 'already_processed', 'parent_url', )
def __init__(self):
self.base_url = None
self.already_processed = set()
self.parent_url = None
def __setattr__(self, name, value):
if name not in self.members:
raise AttributeError('Class %s has no set-able attribute "%s"' % (
self.__class__.__name__, name, ))
self.__dict__[name] = value
def clear_includes_and_imports(node):
namespace = node.nsmap[node.prefix]
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
repl = etree.Comment(etree.tostring(child))
repl.tail = '\n'
node.replace(child, repl)
def get_ref_info(node, params):
namespace = node.get('namespace')
url = None
baseUrl = None
if namespace in CatalogDict:
url = CatalogDict[namespace]
# setup the base url in case the path
# in the catalog was a relative path
baseUrl = CatalogBaseUrl
if not url:
url = node.get('schemaLocation')
if not url:
msg = '*** Warning: missing "schemaLocation" attribute in %s\n' % (
params.parent_url, )
sys.stderr.write(msg)
return (None, None)
# Uncomment the next lines to help track down missing schemaLocation etc.
# print '(resolve_ref) url: %s\n parent-url: %s' % (
# url, params.parent_url, )
if not baseUrl:
baseUrl = params.base_url
if baseUrl and not (
url.startswith('/') or
url.startswith('http:') or
url.startswith('ftp:')):
locn = '%s/%s' % (baseUrl, url, )
schema_name = locn
else:
locn = url
schema_name = url
return locn, schema_name
def resolve_ref(node, params, options):
content = None
locn, schema_name = get_ref_info(node, params)
if locn is not None and not (
locn.startswith('/') or
locn.startswith('http:') or
locn.startswith('ftp:')):
schema_name = os.path.abspath(locn)
if locn is not None:
if schema_name not in params.already_processed:
params.already_processed.add(schema_name)
## print 'trace --'
## print ' url: : %s' % (url, )
## print ' base : %s' % (params.base_url, )
## print ' parent : %s' % (params.parent_url, )
## print ' locn : %s' % (locn, )
## print ' schema_name : %s\n' % (schema_name, )
if locn.startswith('http:') or locn.startswith('ftp:'):
if sys.version_info.major == 2:
urllib_urlopen = urllib2.urlopen
urllib_httperror = urllib2.HTTPError
else:
urllib_urlopen = urllib.request.urlopen
urllib_httperror = urllib.error.HTTPError
try:
urlfile = urllib_urlopen(locn)
content = urlfile.read()
urlfile.close()
params.parent_url = locn
params.base_url = os.path.split(locn)[0]
except urllib_httperror:
msg = "Can't find file %s referenced in %s." % (
locn, params.parent_url, )
raise SchemaIOError(msg)
else:
if os.path.exists(locn):
infile = open(locn)
unencoded_content = infile.read()
if sys.version_info.major == 2:
content = unencoded_content
else:
content = unencoded_content.encode()
infile.close()
params.parent_url = locn
params.base_url = os.path.split(locn)[0]
if content is None:
msg = "Can't find file %s referenced in %s." % (
locn, params.parent_url, )
raise SchemaIOError(msg)
## if content is None:
## msg = "Can't find file %s referenced in %s." % (
options):
namespace = node.nsmap[node.prefix]
roots = []
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
aux_roots = collect_inserts_aux(
child, params, inserts, ns_dict, options)
roots.extend(aux_roots)
return roots
def collect_inserts_aux(child, params, inserts, ns_dict, options):
roots = []
save_base_url = params.base_url
string_content = resolve_ref(child, params, options)
if string_content is not None:
root = etree.fromstring(string_content, base_url=params.base_url)
roots.append(root)
update_ns_dict(root, ns_dict, options)
for child1 in root:
if not isinstance(child1, etree._Comment):
namespace = child1.nsmap[child1.prefix]
if (child1.tag != '{%s}include' % (namespace, ) and
child1.tag != '{%s' % (namespace, )):
comment = etree.Comment(etree.tostring(child))
comment.tail = '\n'
inserts.append(comment)
inserts.append(child1)
insert_roots = collect_inserts(root, params, inserts, ns_dict, options)
roots.extend(insert_roots)
params.base_url = save_base_url
return roots
def update_ns_dict(root, ns_dict, options):
if 'targetNamespace' in root.attrib:
namespace = root.get('targetNamespace')
defs = [nsdef for nsdef in root.nsmap.items() if nsdef[1] == namespace]
if defs:
prefix = defs[0][0]
nsmap = {'xs': 'http://www.w3.org/2001/XMLSchema'}
items1 = root.xpath('./xs:complexType', namespaces=nsmap)
items2 = root.xpath('./xs:element', namespaces=nsmap)
names = ([item.get('name') for item in items1] +
[item.get('name') for item in items2])
for name in names:
ns_dict[name] = (prefix, namespace)
def get_root_file_paths(node, params, rootPaths, shallow):
namespace = node.nsmap[node.prefix]
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
get_root_file_paths_aux(child, params, rootPaths, shallow)
def get_root_file_paths_aux(child, params, rootPaths, shallow):
save_base_url = params.base_url
path, _ = get_ref_info(child, params)
string_content = resolve_ref(child, params, None)
if string_content is not None:
if not shallow:
root = etree.fromstring(string_content, base_url=params.base_url)
get_root_file_paths(root, params, rootPaths, shallow)
if path is not None and path not in rootPaths:
rootPaths.append(path)
params.base_url = save_base_url
def make_file(outFileName, options):
outFile = None
if (not options.force) and os.path.exists(outFileName):
if sys.version_info.major == 3:
raw_input = input
reply = raw_input(
'File %s exists. Overwrite? (y/n): ' % outFileName)
if reply == 'y':
outFile = open(outFileName, 'w')
else:
outFile = open(outFileName, 'w')
return outFile
def prep_schema_doc(infile, outfile, inpath, options):
doc1 = etree.parse(infile)
root1 = doc1.getroot()
params = Params()
params.parent_url = infile
params.base_url = os.path.split(inpath)[0]
inserts = []
ns_dict = {}
if not options.no_collect_includes:
collect_inserts(root1, params, inserts, ns_dict, options)
root2 = copy.copy(root1)
clear_includes_and_imports(root2)
for insert_node in inserts:
root2.append(insert_node)
else:
root2 = root1
if not options.no_redefine_groups:
process_groups(root2)
raise_anon_complextypes(root2)
fix_type_names(root2, options)
doc2 = etree.ElementTree(root2)
if sys.version_info.major == 2:
doc2.write(outfile)
else:
outfile.write(etree.tostring(root2).decode('utf-8'))
return doc2, ns_dict
def prep_schema(inpath, outpath, options):
if inpath:
infile = open(inpath, 'r')
else:
infile = sys.stdin
if outpath:
outfile = make_file(outpath, options)
else:
outfile = sys.stdout
if outfile is None:
return
prep_schema_doc(infile, outfile, inpath, options)
if inpath:
infile.close()
if outpath:
outfile.close()
def process_groups(root):
if root.prefix:
namespaces = {root.prefix: root.nsmap[root.prefix]}
pattern = './%s:group' % (root.prefix, )
defs = root.xpath(pattern, namespaces=namespaces)
else:
pattern = './group'
defs = root.xpath(pattern)
defs = [node for node in defs if node.get('name') is not None]
if root.prefix:
namespaces = {root.prefix: root.nsmap[root.prefix]}
pattern = './*//%s:group' % (root.prefix, )
refs = root.xpath(pattern, namespaces=namespaces)
else:
pattern = './*//group'
refs = root.xpath(pattern)
refs = [node for node in refs if node.get('ref') is not None]
def_dict = {}
for node in defs:
def_dict[trim_prefix(node.get('name'))] = node
replace_group_defs(def_dict, refs)
def fix_type_names(root, options):
fixnamespec = options.fixtypenames
if fixnamespec:
namespecs = fixnamespec.split(';')
else:
namespecs = []
for namespec in namespecs:
names = namespec.split(':')
if len(names) == 2:
oldname = names[0]
newname = names[1]
elif len(names) == 1:
oldname = names[0]
newname = '%sxx' % (oldname, )
else:
continue
pat = './/%s:complexType[@name="%s"]' % (
root.prefix, oldname)
elements = xpath_find(root, pat)
if len(elements) < 1:
sys.stderr.write(
"\nWarning: fix-type-names can't find complexType '%s'. "
"Exiting.\n\n" % (oldname, ))
sys.exit(1)
if len(elements) < 1:
sys.stderr.write(
"Warning: fix-type-names found more than "
"one complexType '%s'. "
"Changing first." % (oldname, ))
element = elements[0]
element.set('name', newname)
# Change the reference (type attribute) of child elements.
pat = './/%s:element' % (root.prefix, )
elements = xpath_find(root, pat)
for element in elements:
typename = element.get('type')
if not typename:
continue
names = typename.split(':')
if len(names) == 2:
typename = names[1]
elif len(names) == 1:
typename = names[0]
else:
continue
if typename != oldname:
continue
if not element.getchildren():
element.set('type', newname)
# Change the extensions ('base' attribute) that refer to the old type.
pat = './/%s:extension' % (root.prefix, )
elements = xpath_find(root, pat)
for element in elements:
typename = element.get('base')
if not typename:
continue
names = typename.split(':')
if len(names) == 2:
typename = names[1]
elif len(names) == 1:
typename = names[0]
else:
continue
if typename != oldname:
continue
element.set('base', newname)
def xpath_find(node, pat):
namespaces = {node.prefix: node.nsmap[node.prefix]}
elements = node.xpath(pat, namespaces=namespaces)
return elements
def replace_group_defs(def_dict, refs):
for ref_node in refs:
name = trim_prefix(ref_node.get('ref'))
if name is None:
continue
def_node = def_dict.get(name)
namespaces = {def_node.prefix: def_node.nsmap[def_node.prefix]}
if def_node is not None:
pattern = './%s:sequence|./%s:choice|./%s:all' % (
def_node.prefix, def_node.prefix, def_node.prefix, )
content = def_node.xpath(
pattern,
namespaces=namespaces)
if content:
content = content[0]
parent = ref_node.getparent()
for node in content:
if not isinstance(node, etree._Comment):
new_node = deepcopy(node)
# Copy minOccurs and maxOccurs attributes to new node.
value = ref_node.get('minOccurs')
if value is not None:
new_node.set('minOccurs', value)
value = ref_node.get('maxOccurs')
if value is not None:
new_node.set('maxOccurs', value)
ref_node.addprevious(new_node)
parent.remove(ref_node)
def raise_anon_complextypes(root):
def_names = collect_type_names(root)
def_count = 0
# Find all complexTypes below top level.
# Raise them to top level and name them.
# Re-name if there is a duplicate (simpleType, complexType, or
# previous renamed type).
# Change the parent (xs:element) so the "type" attribute refers to
# the raised and renamed type.
# Collect the new types.
el = etree.Comment(text="Raised anonymous complexType definitions")
el.tail = "\n\n"
root.append(el)
prefix = root.prefix
if prefix:
pattern = './*/*//%s:complexType|./*/*//%s:simpleType' % (
prefix, prefix, )
element_tag = '{%s}element' % (root.nsmap[prefix], )
namespaces = {prefix: root.nsmap[prefix]}
defs = root.xpath(pattern, namespaces=namespaces)
else:
pattern = './*/*//complexType|./*/*//simpleType'
element_tag = 'element'
defs = root.xpath(pattern)
for node in defs:
parent = node.getparent()
if parent.tag != element_tag:
continue
name = parent.get('name')
if not name:
continue
type_name = '%sType' % (name, )
if Inner_name_map is None:
type_name, def_count = unique_name(type_name, def_names, def_count)
else:
type_name = map_inner_name(node, Inner_name_map)
def_names.add(type_name)
parent.set('type', type_name)
node.set('name', type_name)
# Move the complexType node to top level.
root.append(node)
def map_inner_name(node, inner_name_map):
# find the name for the enclosing type definition and
# the name of the type definition that encloses that.
node1 = node
name2 = node1.get('name')
while name2 is None:
node1 = node1.getparent()
if node1 is None:
raise InnerNameMapError('cannot find parent with "name" attribute')
name2 = node1.get('name')
node1 = node1.getparent()
name1 = node1.get('name')
while name1 is None:
node1 = node1.getparent()
if node1 is None:
raise InnerNameMapError('cannot find parent with "name" attribute')
name1 = node1.get('name')
new_name = inner_name_map.get((name1, name2))
if new_name is None:
msg1 = '("{}", "{}")'.format(
name1, name2)
sys.stderr.write('\n*** error. Must add entry to inner_name_map:\n')
sys.stderr.write('\n {}: "xxxx",\n\n'.format(msg1))
raise InnerNameMapError('mapping missing for {}'.format(msg1))
return new_name
#
# Collect the names of all currently defined types (complexType,
# simpleType, element).
def collect_type_names(node):
prefix = node.prefix
if prefix is not None and prefix.strip():
pattern = './/%s:complexType|.//%s:simpleType|.//%s:element' % (
prefix, prefix, prefix)
# Must make sure that we have a namespace dictionary that does *not*
# have a key None.
namespaces = {prefix: node.nsmap[prefix]}
elements = node.xpath(pattern, namespaces=namespaces)
else:
pattern = './/complexType|.//simpleType|.//element'
elements = node.xpath(pattern)
names = {
el.attrib['name'] for el in elements if
'name' in el.attrib and el.getchildren()
}
return names
def unique_name(type_name, def_names, def_count):
orig_type_name = type_name
while True:
if type_name not in def_names:
return type_name, def_count
def_count += 1
type_name = '%s%d' % (orig_type_name, def_count, )
def trim_prefix(name):
names = name.split(':')
if len(names) == 1:
return names[0]
elif len(names) == 2:
return names[1]
else:
return None
USAGE_TEXT = __doc__
def usage(parser):
parser.print_help()
sys.exit(1)
def main():
parser = OptionParser(USAGE_TEXT)
parser.add_option(
"-f", "--force", action="store_true",
dest="force", default=False,
help="force overwrite without asking")
parser.add_option(
"--fix-type-names", action="store",
dest="fixtypenames", default=None,
help="Fix up (replace) complex type names.")
parser.add_option(
"--no-collect-includes", action="store_true",
dest="no_collect_includes", default=False,
help="do not process and insert schemas referenced by "
"xs:include and xs:import elements")
parser.add_option(
"--no-redefine-groups", action="store_true",
dest="no_redefine_groups", default=False,
help="do not pre-process and redefine xs:group elements")
(options, args) = parser.parse_args()
if len(args) == 2:
inpath = args[0]
outpath = args[1]
elif len(args) == 1:
inpath = args[0]
outpath = None
elif len(args) == 0:
inpath = None
outpath = None
else:
usage(parser)
prep_schema(inpath, outpath, options)
if __name__ == "__main__":
#import pdb; pdb.set_trace()
main()
| true | true |
f7ffb5b45b477cf608a2b90e60031a8fa9501218 | 1,745 | py | Python | caffe2/python/operator_test/cast_op_test.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | 40 | 2021-06-01T07:37:59.000Z | 2022-03-25T01:42:09.000Z | caffe2/python/operator_test/cast_op_test.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | 14 | 2021-06-01T11:52:46.000Z | 2022-03-25T02:13:08.000Z | caffe2/python/operator_test/cast_op_test.py | wenhaopeter/read_pytorch_code | 491f989cd918cf08874dd4f671fb7f0142a0bc4f | [
"Intel",
"X11"
] | 7 | 2021-07-20T19:34:26.000Z | 2022-03-13T21:07:36.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import numpy as np
class TestCastOp(hu.HypothesisTestCase):
@given(**hu.gcs)
def test_cast_int_float(self, gc, dc):
data = np.random.rand(5, 5).astype(np.int32)
# from int to float
op = core.CreateOperator('Cast', 'data', 'data_cast', to=1, from_type=2)
self.assertDeviceChecks(dc, op, [data], [0])
# This is actually 0
self.assertGradientChecks(gc, op, [data], 0, [0])
@given(**hu.gcs)
def test_cast_int_float_empty(self, gc, dc):
data = np.random.rand(0).astype(np.int32)
# from int to float
op = core.CreateOperator('Cast', 'data', 'data_cast', to=1, from_type=2)
self.assertDeviceChecks(dc, op, [data], [0])
# This is actually 0
self.assertGradientChecks(gc, op, [data], 0, [0])
@given(data=hu.tensor(dtype=np.int32), **hu.gcs_cpu_only)
def test_cast_int_to_string(self, data, gc, dc):
op = core.CreateOperator(
'Cast', 'data', 'data_cast', to=core.DataType.STRING)
def ref(data):
ret = data.astype(dtype=np.str)
# the string blob will be fetched as object, we feed and re-fetch
# to mimic this.
with hu.temp_workspace('tmp_ref_int_to_string'):
workspace.FeedBlob('tmp_blob', ret)
fetched_ret = workspace.FetchBlob('tmp_blob')
return (fetched_ret, )
self.assertReferenceChecks(gc, op, inputs=[data], reference=ref)
| 35.612245 | 80 | 0.645272 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import numpy as np
class TestCastOp(hu.HypothesisTestCase):
@given(**hu.gcs)
def test_cast_int_float(self, gc, dc):
data = np.random.rand(5, 5).astype(np.int32)
op = core.CreateOperator('Cast', 'data', 'data_cast', to=1, from_type=2)
self.assertDeviceChecks(dc, op, [data], [0])
self.assertGradientChecks(gc, op, [data], 0, [0])
@given(**hu.gcs)
def test_cast_int_float_empty(self, gc, dc):
data = np.random.rand(0).astype(np.int32)
op = core.CreateOperator('Cast', 'data', 'data_cast', to=1, from_type=2)
self.assertDeviceChecks(dc, op, [data], [0])
self.assertGradientChecks(gc, op, [data], 0, [0])
@given(data=hu.tensor(dtype=np.int32), **hu.gcs_cpu_only)
def test_cast_int_to_string(self, data, gc, dc):
op = core.CreateOperator(
'Cast', 'data', 'data_cast', to=core.DataType.STRING)
def ref(data):
ret = data.astype(dtype=np.str)
with hu.temp_workspace('tmp_ref_int_to_string'):
workspace.FeedBlob('tmp_blob', ret)
fetched_ret = workspace.FetchBlob('tmp_blob')
return (fetched_ret, )
self.assertReferenceChecks(gc, op, inputs=[data], reference=ref)
| true | true |
f7ffb7441a98c6a8d5531e1958bf654731931f9a | 3,170 | py | Python | client/gui/Login.py | Howest-AdvMathProg/project-2021-Joren-vanGoethem | 224d871951ef40d0448aa33cee9064f8e0f82af7 | [
"MIT"
] | null | null | null | client/gui/Login.py | Howest-AdvMathProg/project-2021-Joren-vanGoethem | 224d871951ef40d0448aa33cee9064f8e0f82af7 | [
"MIT"
] | null | null | null | client/gui/Login.py | Howest-AdvMathProg/project-2021-Joren-vanGoethem | 224d871951ef40d0448aa33cee9064f8e0f82af7 | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import *
from PyQt5.uic import *
from PyQt5 import *
import sys # We need sys so that we can pass argv to QApplication
from gui.ui.mainwindowui import Ui_MainWindow
from User import User
from time import sleep
class Login(QDialog):
def __init__(self, MainWindow, parent=None):
super().__init__(parent)
loadUi('gui/ui/Login.ui', self)
self._MainWindow = MainWindow
self.Logged_in = False
self._input_fields = []
self.get_input_fields()
self.exec()
#override van originele functie
def accept(self):
empty = []
for field in self._input_fields:
if field.text() == "":
empty.insert(0,field) # insert to front of list so order is accurate in error message
elif "nickname" in field.objectName().lower():
NickName = field.text()
elif "name" in field.objectName().lower():
Name = field.text()
elif "email" in field.objectName().lower():
Email = field.text()
elif "password" in field.objectName().lower():
Password = field.text()
matching = False
if len(empty) > 0:
self.empty_fields(empty)
else:
matching = self.checkPasswords()
if matching:
user = User(Name, NickName, Email, Password)
self._MainWindow.User = user
self._MainWindow.backend.send_event("IDENTIFY", user.to_json())
for i in range(1000):
if self._MainWindow.backend.identified:
break
sleep(.01)
if not self._MainWindow.backend.identified:
return
self.Logged_in = True
self.close()
def checkPasswords(self):
passwords = []
for field in self._input_fields:
if "password" in field.objectName().lower():
passwords.append(field.text())
if passwords[0] == passwords[1]:
return True
else:
self.different_passwords()
return False
def empty_fields(self, fields):
html = "<ul>"
for field in fields:
html += f"<li> {field.objectName()[:-5]}</li>" # remove Field at the end of the string
html += "</ul>"
QMessageBox.about(
self,
"login error",
"<p>The following fields were not filled in correctly:</p>"
f"{html}",
)
def different_passwords(self):
QMessageBox.about(
self,
"login error",
"<p>The passwords don't match</p>",
)
def closeEvent(self, event):
if self.Logged_in:
pass
else:
sys.exit() # let the window close
def get_input_fields(self):
for widget in self.children():
if isinstance(widget, QPlainTextEdit) or isinstance(widget, QLineEdit):
if "password" in widget.objectName().lower():
widget.setEchoMode(QtGui.QLineEdit.Password)
self._input_fields.append(widget)
| 30.480769 | 101 | 0.552681 | from PyQt5.QtWidgets import *
from PyQt5.uic import *
from PyQt5 import *
import sys
from gui.ui.mainwindowui import Ui_MainWindow
from User import User
from time import sleep
class Login(QDialog):
def __init__(self, MainWindow, parent=None):
super().__init__(parent)
loadUi('gui/ui/Login.ui', self)
self._MainWindow = MainWindow
self.Logged_in = False
self._input_fields = []
self.get_input_fields()
self.exec()
def accept(self):
empty = []
for field in self._input_fields:
if field.text() == "":
empty.insert(0,field)
elif "nickname" in field.objectName().lower():
NickName = field.text()
elif "name" in field.objectName().lower():
Name = field.text()
elif "email" in field.objectName().lower():
Email = field.text()
elif "password" in field.objectName().lower():
Password = field.text()
matching = False
if len(empty) > 0:
self.empty_fields(empty)
else:
matching = self.checkPasswords()
if matching:
user = User(Name, NickName, Email, Password)
self._MainWindow.User = user
self._MainWindow.backend.send_event("IDENTIFY", user.to_json())
for i in range(1000):
if self._MainWindow.backend.identified:
break
sleep(.01)
if not self._MainWindow.backend.identified:
return
self.Logged_in = True
self.close()
def checkPasswords(self):
passwords = []
for field in self._input_fields:
if "password" in field.objectName().lower():
passwords.append(field.text())
if passwords[0] == passwords[1]:
return True
else:
self.different_passwords()
return False
def empty_fields(self, fields):
html = "<ul>"
for field in fields:
html += f"<li> {field.objectName()[:-5]}</li>"
html += "</ul>"
QMessageBox.about(
self,
"login error",
"<p>The following fields were not filled in correctly:</p>"
f"{html}",
)
def different_passwords(self):
QMessageBox.about(
self,
"login error",
"<p>The passwords don't match</p>",
)
def closeEvent(self, event):
if self.Logged_in:
pass
else:
sys.exit() # let the window close
def get_input_fields(self):
for widget in self.children():
if isinstance(widget, QPlainTextEdit) or isinstance(widget, QLineEdit):
if "password" in widget.objectName().lower():
widget.setEchoMode(QtGui.QLineEdit.Password)
self._input_fields.append(widget)
| true | true |
f7ffb84e82522ef6fe5b873eb6a334a4655116fc | 3,468 | py | Python | cp/max_subarray_sum.py | hauntarl/real-python | 6ffb535648bf5c79c90e2ed7def842078bc7807f | [
"MIT"
] | 2 | 2020-12-15T18:11:00.000Z | 2021-03-01T11:43:16.000Z | cp/max_subarray_sum.py | hauntarl/real_python | 6ffb535648bf5c79c90e2ed7def842078bc7807f | [
"MIT"
] | null | null | null | cp/max_subarray_sum.py | hauntarl/real_python | 6ffb535648bf5c79c90e2ed7def842078bc7807f | [
"MIT"
] | null | null | null | """
Given an array of n numbers, calculate the maximum subarray sum, i.e., the
largest possible sum of a sequence of consecutive values in the array. The
problem is interesting when there may be negative values in the array.
For example, in the array: [-1, 2, 4, -3, 5, 2, -5, 2]
the following subarray produces the maximum sum 10: [2, 4, -3, 5, 2]
We assume that an empty subarray is allowed, so the maximum subarray sum is
always at least 0.
"""
from util import timeit
@timeit
def algorithm_1(array: list) -> int:
"""
Algorithm 1 - Brute Force
A straightforward way to solve the problem is to go through all possible
subarrays, calculate the sum of values in each subarray and maintain the
maximum sum.
The variables [i] and [j] fix the first and last index of the subarray, and
the sum of values is calculated to the variable [cur]. The variable [best]
contains the maximum sum found during the search.
The time complexity of the algorithm is O(n^3), because it consists of
three nested loops that go through the input.
"""
best = 0
size = len(array)
for i in range(0, size):
for j in range(i, size):
curr = 0 # sum of current subarray
for k in range(i, j):
curr += array[k]
best = max(best, curr)
return best
@timeit
def algorithm_2(array: list) -> int:
"""
Algorithm 2 - Brute Force Optimized
It is easy to make Algorithm 1 more efficient by removing one loop from it.
This is possible by calculating the sum at the same time when the right end
of the subarray moves.
The time complexity is O(n^2).
"""
best = 0
size = len(array)
for i in range(0, size):
curr = 0
for j in range(i, size):
curr += array[j]
best = max(best, curr)
return best
@timeit
def algorithm_3(arr: list) -> int:
"""
Algorithm 3 - Kadane’s Algorithm
The idea is to calculate, for each array position, the maximum sum of a
subarray that ends at that position. After this, the answer for the problem
is the maximum of those sums.
Consider the subproblem of finding the maximum-sum subarray that ends at
position k. There are two possibilities:
. The subarray only contains the element at position k
. The subarray consists of a subarray that ends at position k−1, followed
by the element at position k
In the latter case, since we want to find a subarray with maximum sum, the
subarray that ends at position k −1 should also have the maximum sum. Thus,
we can solve the problem efficiently by calculating the maximum subarray
sum for each ending position from left to right.
The algorithm only contains one loop that goes through the input, so the
time complexity is O(n). This is also the best possible time complexity,
because any algorithm for the problem has to examine all array elements at
least once.
"""
best, curr = 0, 0
for elem in arr:
curr = max(elem, curr + elem)
best = max(best, curr)
return best
arr = [-1, 2, 4, -3, 5, 2, -5, 2]
algorithm_1(arr)
algorithm_2(arr)
algorithm_3(arr)
''' terminal
run algorithm_1([-1, 2, 4, -3, 5, 2, -5, 2])
got '10' in 0.0001549000 secs.
run algorithm_2([-1, 2, 4, -3, 5, 2, -5, 2])
got '10' in 0.0001001000 secs.
run algorithm_3([-1, 2, 4, -3, 5, 2, -5, 2])
got '10' in 0.0000287000 secs.
'''
| 31.243243 | 80 | 0.659458 | from util import timeit
@timeit
def algorithm_1(array: list) -> int:
best = 0
size = len(array)
for i in range(0, size):
for j in range(i, size):
curr = 0
for k in range(i, j):
curr += array[k]
best = max(best, curr)
return best
@timeit
def algorithm_2(array: list) -> int:
best = 0
size = len(array)
for i in range(0, size):
curr = 0
for j in range(i, size):
curr += array[j]
best = max(best, curr)
return best
@timeit
def algorithm_3(arr: list) -> int:
best, curr = 0, 0
for elem in arr:
curr = max(elem, curr + elem)
best = max(best, curr)
return best
arr = [-1, 2, 4, -3, 5, 2, -5, 2]
algorithm_1(arr)
algorithm_2(arr)
algorithm_3(arr)
| true | true |
f7ffb892e8610fda995948c4c171e6e76e5d5004 | 9,578 | py | Python | DropDtw-Code/train.py | Crossmdl/Crossmdl | 49f245349cc32f750bc33ef891b2ee90f60317a6 | [
"MIT"
] | null | null | null | DropDtw-Code/train.py | Crossmdl/Crossmdl | 49f245349cc32f750bc33ef891b2ee90f60317a6 | [
"MIT"
] | null | null | null | DropDtw-Code/train.py | Crossmdl/Crossmdl | 49f245349cc32f750bc33ef891b2ee90f60317a6 | [
"MIT"
] | null | null | null | import os
import torch
import argparse
import random
import torch
import numpy as np
import pytorch_lightning as pl
import torchmetrics
from copy import deepcopy, copy
import pickle as pkl
from paths import PROJECT_PATH, WEIGHTS_PATH
from models.nets import EmbeddingsMapping
from models.losses import compute_clust_loss, compute_alignment_loss
from models.visualization import visualize_drop_dtw_matching, visualize_step_strength
from data.data_module import DataModule
from data.data_utils import sample_to_device
from data.batching import unflatten_batch
from evaluate import compute_all_metrics
from utils import Namespace, load_yaml
device = "cuda" if torch.cuda.is_available() else "cpu"
# Enabling reproducibility
random.seed(10)
np.random.seed(10)
torch.manual_seed(10)
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, help="name of the experiment", default="myexp")
parser.add_argument('--dataset', type=str, default='COIN', choices=['COIN', 'CrossTask', 'YouCook2'], help="name of the dataset we are encoding")
# training hyper-parameters
parser.add_argument('--batch_size', type=int, default=24, help="batch size")
parser.add_argument('--epochs', type=int, default=10, help="batch size")
parser.add_argument('--lr', type=float, default=3e-4, help="learning rate")
parser.add_argument('--wd', type=float, default=1e-4, help="weight decay")
parser.add_argument('--n_cls', type=int, default=3, help="Number of video of one class in a batch. Must divide batch_size")
# model hyper-parameters
parser.add_argument('--video_layers', type=int, default=2, help="Number of layers in nonlinear mapping for video embeddings")
parser.add_argument('--text_layers', type=int, default=0, help="Number of layers in nonlinear mapping for text embeddings")
parser.add_argument('--batchnorm', type=int, default=0, help="Wheather to use batchnorm in models")
parser.add_argument('--pretrained_drop', action='store_true', default=False, help='Start with pre-trained drop costs')
# loss hyper-parameters
parser.add_argument('--dp_algo', type=str, default='DropDTW', choices=['DropDTW', 'OTAM', 'NW', 'DTW'], help="DP algo used for matching")
parser.add_argument('--drop_cost', type=str, default='logit', choices=['logit', 'learn'], help="The way to define drop cost")
parser.add_argument('--dtw_softning', type=str, default='prob', choices=['prob', 'gamma', 'none'], help="DP algo used for matching")
parser.add_argument('--keep_percentile', type=float, default=0.3, help="If drop cost is defined as logit, computes the percentile of drops")
parser.add_argument('--contiguous_drop', type=bool, default=True, help="Wheather to do contiguous drop in Drop-DTW")
parser.add_argument('--clust_loss_mult', type=float, default=4, help="Multiplier for the step loss")
parser.add_argument('--dtw_loss_mult', type=float, default=2.5, help="Multiplier for the dtw loss")
parser.add_argument('--dtw_xz_gamma', type=float, default=10, help="Softmax temperature for xz product, in dtw")
parser.add_argument('--dtw_min_gamma', type=float, default=1, help="Softmax temperature for softmin, in dtw")
parser.add_argument('--step_xz_gamma', type=float, default=30, help="Softmax temperature for xz product, in step loss")
parser.add_argument('--bg_scope', type=str, default='global', choices=['global', 'class', 'video'], help="The scope where the background prototype is conisdered the same")
args = parser.parse_args()
class VisualizationCallback(pl.callbacks.Callback):
def on_train_batch_end(self, trainer, pl_module, outputs, flat_batch, batch_idx, dataloader_idx):
step = trainer.global_step
if step % 10 == 0:
original_sample = sample_to_device(random.choice(unflatten_batch(flat_batch)), device)
# sample = deepcopy(original_sample)
sample = copy(original_sample)
sample['frame_features'] = pl_module.model.map_video(sample['frame_features'].to(device)).detach()
sample['step_features'] = pl_module.model.map_text(sample['step_features'].to(device)).detach()
if args.drop_cost == 'learn':
distractor = pl_module.model.compute_distractors(sample['step_features'].mean(0)).detach().cpu()
else:
distractor = None
sample_gammas = (args.dtw_xz_gamma, 1)
sample_dict = {'Ours': sample_to_device(sample, 'cpu'),
'HowTo100M': sample_to_device(original_sample, 'cpu')}
dtw_image = visualize_drop_dtw_matching(
sample_dict, distractor, gamma_f=sample_gammas,
drop_cost=args.drop_cost, keep_percentile=args.keep_percentile, shape=(10, 2))
steps_image = visualize_step_strength(
sample_dict, distractor, gamma_f=sample_gammas,
drop_cost=args.drop_cost, keep_percentile=args.keep_percentile, shape=(10, 2))
matching_picture = np.concatenate([steps_image, dtw_image], 1)
trainer.logger.experiment.add_image(
'matching_picture', matching_picture.transpose((2, 0, 1)), global_step=step)
class TrainModule(pl.LightningModule):
def __init__(self, model, data, name=None):
super(TrainModule, self).__init__()
self.name = name
self.model = model
self.data = data
self.avg_loss_metric = torchmetrics.MeanMetric()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.wd)
return optimizer
def training_step(self, flat_batch, batch_id):
flat_batch['frame_features'] = self.model.map_video(flat_batch['frame_features'])
flat_batch['step_features'] = self.model.map_text(flat_batch['step_features'])
samples = unflatten_batch(flat_batch)
if args.drop_cost == 'learn':
mean_steps = torch.stack([s['step_features'].mean(0) for s in samples], 0)
distractors = self.model.compute_distractors(mean_steps)
else:
distractors = None
# Computing total loss
total_loss = 0
if args.clust_loss_mult > 0:
clust_loss = compute_clust_loss(samples, distractors, xz_hard_ratio=1,
xz_gamma=args.step_xz_gamma, frame_gamma=10,
all_classes_distinct=(args.dataset == 'YouCook2'),
bg_scope=args.bg_scope)
self.log('train/clust_loss', clust_loss)
total_loss += args.clust_loss_mult * clust_loss
if args.dtw_loss_mult > 0:
dtw_loss = args.dtw_loss_mult * compute_alignment_loss(
samples, distractors, contiguous=args.contiguous_drop,
gamma_xz=args.dtw_xz_gamma, gamma_min=args.dtw_min_gamma,
drop_cost_type=args.drop_cost, dp_algo=args.dp_algo,
keep_percentile=args.keep_percentile, softning=args.dtw_softning)
self.log('train/dtw_loss', dtw_loss)
total_loss += dtw_loss
self.log('train/total_loss', self.avg_loss_metric(total_loss))
return total_loss
def training_epoch_end(self, training_step_outputs):
self.model.eval()
avg_total_loss = self.avg_loss_metric.compute()
print('Train Total loss: {:.2f}'.format(avg_total_loss))
self.avg_loss_metric.reset()
eval_config = Namespace(dp_algo='DropDTW', drop_cost=args.drop_cost, keep_percentile=0.3,
use_unlabeled=True, distance='inner', dataset=args.dataset)
_, _, accuracy_dtw, iou_dtw, recall = compute_all_metrics(
self.data.val_dataset, self.model, gamma=30, config=eval_config)
print("Recall is ",recall)
print("DTW Accuracy is ",accuracy_dtw)
print("DTW IoU is ",iou_dtw)
self.log("Metrics/Recall", recall)
self.log("Metrics/Accuracy", accuracy_dtw)
self.log("Metrics/IoU", iou_dtw)
def main():
with open('dataset.pickle', 'rb') as f:
data = pkl.load(f)
print(len(data))
model = EmbeddingsMapping(
d=512, learnable_drop=(args.drop_cost == 'learn'), video_layers=args.video_layers,
text_layers=args.text_layers, normalization_dataset=None,
batchnorm=args.batchnorm)
# load drop costs from a pre-trained model
if args.pretrained_drop:
# assumes that the model with the same name has been already trained
# this retraines the model, but uses drop_mapping intialization from the previous training
from glob import glob
weights_path = glob(os.path.join(WEIGHTS_PATH, args.name, "weights-epoch=*.ckpt"))[0]
state_dict = {k[6:]: v for k, v in torch.load(weights_path, map_location=device)['state_dict'].items()
if k.startswith('model.drop_mapping')}
model.load_state_dict(state_dict, strict=False)
train_module = TrainModule(model, data)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
monitor='Metrics/Recall',
dirpath=os.path.join(PROJECT_PATH, 'weights', args.name),
filename='weights-{epoch:02d}',
save_top_k=1,
mode='max',
)
vis_callback = VisualizationCallback()
logger = pl.loggers.TensorBoardLogger('tb_logs', args.name)
trainer = pl.Trainer(gpus=1, callbacks=[checkpoint_callback, vis_callback],
max_epochs=args.epochs, logger=logger)
trainer.fit(train_module, data)
if __name__ == '__main__':
print(device)
main()
| 48.619289 | 171 | 0.687409 | import os
import torch
import argparse
import random
import torch
import numpy as np
import pytorch_lightning as pl
import torchmetrics
from copy import deepcopy, copy
import pickle as pkl
from paths import PROJECT_PATH, WEIGHTS_PATH
from models.nets import EmbeddingsMapping
from models.losses import compute_clust_loss, compute_alignment_loss
from models.visualization import visualize_drop_dtw_matching, visualize_step_strength
from data.data_module import DataModule
from data.data_utils import sample_to_device
from data.batching import unflatten_batch
from evaluate import compute_all_metrics
from utils import Namespace, load_yaml
device = "cuda" if torch.cuda.is_available() else "cpu"
random.seed(10)
np.random.seed(10)
torch.manual_seed(10)
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, help="name of the experiment", default="myexp")
parser.add_argument('--dataset', type=str, default='COIN', choices=['COIN', 'CrossTask', 'YouCook2'], help="name of the dataset we are encoding")
parser.add_argument('--batch_size', type=int, default=24, help="batch size")
parser.add_argument('--epochs', type=int, default=10, help="batch size")
parser.add_argument('--lr', type=float, default=3e-4, help="learning rate")
parser.add_argument('--wd', type=float, default=1e-4, help="weight decay")
parser.add_argument('--n_cls', type=int, default=3, help="Number of video of one class in a batch. Must divide batch_size")
parser.add_argument('--video_layers', type=int, default=2, help="Number of layers in nonlinear mapping for video embeddings")
parser.add_argument('--text_layers', type=int, default=0, help="Number of layers in nonlinear mapping for text embeddings")
parser.add_argument('--batchnorm', type=int, default=0, help="Wheather to use batchnorm in models")
parser.add_argument('--pretrained_drop', action='store_true', default=False, help='Start with pre-trained drop costs')
parser.add_argument('--dp_algo', type=str, default='DropDTW', choices=['DropDTW', 'OTAM', 'NW', 'DTW'], help="DP algo used for matching")
parser.add_argument('--drop_cost', type=str, default='logit', choices=['logit', 'learn'], help="The way to define drop cost")
parser.add_argument('--dtw_softning', type=str, default='prob', choices=['prob', 'gamma', 'none'], help="DP algo used for matching")
parser.add_argument('--keep_percentile', type=float, default=0.3, help="If drop cost is defined as logit, computes the percentile of drops")
parser.add_argument('--contiguous_drop', type=bool, default=True, help="Wheather to do contiguous drop in Drop-DTW")
parser.add_argument('--clust_loss_mult', type=float, default=4, help="Multiplier for the step loss")
parser.add_argument('--dtw_loss_mult', type=float, default=2.5, help="Multiplier for the dtw loss")
parser.add_argument('--dtw_xz_gamma', type=float, default=10, help="Softmax temperature for xz product, in dtw")
parser.add_argument('--dtw_min_gamma', type=float, default=1, help="Softmax temperature for softmin, in dtw")
parser.add_argument('--step_xz_gamma', type=float, default=30, help="Softmax temperature for xz product, in step loss")
parser.add_argument('--bg_scope', type=str, default='global', choices=['global', 'class', 'video'], help="The scope where the background prototype is conisdered the same")
args = parser.parse_args()
class VisualizationCallback(pl.callbacks.Callback):
def on_train_batch_end(self, trainer, pl_module, outputs, flat_batch, batch_idx, dataloader_idx):
step = trainer.global_step
if step % 10 == 0:
original_sample = sample_to_device(random.choice(unflatten_batch(flat_batch)), device)
sample = copy(original_sample)
sample['frame_features'] = pl_module.model.map_video(sample['frame_features'].to(device)).detach()
sample['step_features'] = pl_module.model.map_text(sample['step_features'].to(device)).detach()
if args.drop_cost == 'learn':
distractor = pl_module.model.compute_distractors(sample['step_features'].mean(0)).detach().cpu()
else:
distractor = None
sample_gammas = (args.dtw_xz_gamma, 1)
sample_dict = {'Ours': sample_to_device(sample, 'cpu'),
'HowTo100M': sample_to_device(original_sample, 'cpu')}
dtw_image = visualize_drop_dtw_matching(
sample_dict, distractor, gamma_f=sample_gammas,
drop_cost=args.drop_cost, keep_percentile=args.keep_percentile, shape=(10, 2))
steps_image = visualize_step_strength(
sample_dict, distractor, gamma_f=sample_gammas,
drop_cost=args.drop_cost, keep_percentile=args.keep_percentile, shape=(10, 2))
matching_picture = np.concatenate([steps_image, dtw_image], 1)
trainer.logger.experiment.add_image(
'matching_picture', matching_picture.transpose((2, 0, 1)), global_step=step)
class TrainModule(pl.LightningModule):
def __init__(self, model, data, name=None):
super(TrainModule, self).__init__()
self.name = name
self.model = model
self.data = data
self.avg_loss_metric = torchmetrics.MeanMetric()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.wd)
return optimizer
def training_step(self, flat_batch, batch_id):
flat_batch['frame_features'] = self.model.map_video(flat_batch['frame_features'])
flat_batch['step_features'] = self.model.map_text(flat_batch['step_features'])
samples = unflatten_batch(flat_batch)
if args.drop_cost == 'learn':
mean_steps = torch.stack([s['step_features'].mean(0) for s in samples], 0)
distractors = self.model.compute_distractors(mean_steps)
else:
distractors = None
total_loss = 0
if args.clust_loss_mult > 0:
clust_loss = compute_clust_loss(samples, distractors, xz_hard_ratio=1,
xz_gamma=args.step_xz_gamma, frame_gamma=10,
all_classes_distinct=(args.dataset == 'YouCook2'),
bg_scope=args.bg_scope)
self.log('train/clust_loss', clust_loss)
total_loss += args.clust_loss_mult * clust_loss
if args.dtw_loss_mult > 0:
dtw_loss = args.dtw_loss_mult * compute_alignment_loss(
samples, distractors, contiguous=args.contiguous_drop,
gamma_xz=args.dtw_xz_gamma, gamma_min=args.dtw_min_gamma,
drop_cost_type=args.drop_cost, dp_algo=args.dp_algo,
keep_percentile=args.keep_percentile, softning=args.dtw_softning)
self.log('train/dtw_loss', dtw_loss)
total_loss += dtw_loss
self.log('train/total_loss', self.avg_loss_metric(total_loss))
return total_loss
def training_epoch_end(self, training_step_outputs):
self.model.eval()
avg_total_loss = self.avg_loss_metric.compute()
print('Train Total loss: {:.2f}'.format(avg_total_loss))
self.avg_loss_metric.reset()
eval_config = Namespace(dp_algo='DropDTW', drop_cost=args.drop_cost, keep_percentile=0.3,
use_unlabeled=True, distance='inner', dataset=args.dataset)
_, _, accuracy_dtw, iou_dtw, recall = compute_all_metrics(
self.data.val_dataset, self.model, gamma=30, config=eval_config)
print("Recall is ",recall)
print("DTW Accuracy is ",accuracy_dtw)
print("DTW IoU is ",iou_dtw)
self.log("Metrics/Recall", recall)
self.log("Metrics/Accuracy", accuracy_dtw)
self.log("Metrics/IoU", iou_dtw)
def main():
with open('dataset.pickle', 'rb') as f:
data = pkl.load(f)
print(len(data))
model = EmbeddingsMapping(
d=512, learnable_drop=(args.drop_cost == 'learn'), video_layers=args.video_layers,
text_layers=args.text_layers, normalization_dataset=None,
batchnorm=args.batchnorm)
if args.pretrained_drop:
from glob import glob
weights_path = glob(os.path.join(WEIGHTS_PATH, args.name, "weights-epoch=*.ckpt"))[0]
state_dict = {k[6:]: v for k, v in torch.load(weights_path, map_location=device)['state_dict'].items()
if k.startswith('model.drop_mapping')}
model.load_state_dict(state_dict, strict=False)
train_module = TrainModule(model, data)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
monitor='Metrics/Recall',
dirpath=os.path.join(PROJECT_PATH, 'weights', args.name),
filename='weights-{epoch:02d}',
save_top_k=1,
mode='max',
)
vis_callback = VisualizationCallback()
logger = pl.loggers.TensorBoardLogger('tb_logs', args.name)
trainer = pl.Trainer(gpus=1, callbacks=[checkpoint_callback, vis_callback],
max_epochs=args.epochs, logger=logger)
trainer.fit(train_module, data)
if __name__ == '__main__':
print(device)
main()
| true | true |
f7ffb91b351e62797bb1380ee16f6476bbcdd213 | 86,251 | py | Python | llvmlite/tests/test_ir.py | ax3l/llvmlite | 4acef2d5165768f43b315dd47ed99a5f8f63b3a8 | [
"BSD-2-Clause"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | llvmlite/tests/test_ir.py | ax3l/llvmlite | 4acef2d5165768f43b315dd47ed99a5f8f63b3a8 | [
"BSD-2-Clause"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | llvmlite/tests/test_ir.py | ax3l/llvmlite | 4acef2d5165768f43b315dd47ed99a5f8f63b3a8 | [
"BSD-2-Clause"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | """
IR Construction Tests
"""
import copy
import itertools
import pickle
import re
import textwrap
import unittest
from . import TestCase
from llvmlite import ir
from llvmlite import binding as llvm
int1 = ir.IntType(1)
int8 = ir.IntType(8)
int16 = ir.IntType(16)
int32 = ir.IntType(32)
int64 = ir.IntType(64)
hlf = ir.HalfType()
flt = ir.FloatType()
dbl = ir.DoubleType()
class TestBase(TestCase):
"""
Utilities for IR tests.
"""
def assertInText(self, pattern, text):
"""
Assert *pattern* is in *text*, ignoring any whitespace differences
(including newlines).
"""
def escape(c):
if not c.isalnum() and not c.isspace():
return '\\' + c
return c
pattern = ''.join(map(escape, pattern))
regex = re.sub(r'\s+', r'\\s*', pattern)
self.assertRegex(text, regex)
def assert_ir_line(self, line, mod):
lines = [line.strip() for line in str(mod).splitlines()]
self.assertIn(line, lines)
def assert_valid_ir(self, mod):
llvm.parse_assembly(str(mod))
def assert_pickle_correctly(self, irobject):
"""Assert that the IR object pickles and unpickles correctly.
The IR string is equal and that their type is equal
"""
newobject = pickle.loads(pickle.dumps(irobject, protocol=-1))
self.assertIs(irobject.__class__, newobject.__class__)
self.assertEqual(str(irobject), str(newobject))
return newobject
def module(self):
return ir.Module()
def function(self, module=None, name='my_func'):
module = module or self.module()
fnty = ir.FunctionType(int32, (int32, int32, dbl,
ir.PointerType(int32)))
return ir.Function(self.module(), fnty, name)
def block(self, func=None, name=''):
func = func or self.function()
return func.append_basic_block(name)
def descr(self, thing):
buf = []
thing.descr(buf)
return "".join(buf)
def _normalize_asm(self, asm):
asm = textwrap.dedent(asm)
# Normalize indent
asm = asm.replace("\n ", "\n ")
return asm
def check_descr(self, descr, asm):
expected = self._normalize_asm(asm)
self.assertEqual(descr, expected)
def check_block(self, block, asm):
self.check_descr(self.descr(block), asm)
def check_module_body(self, module, asm):
expected = self._normalize_asm(asm)
actual = module._stringify_body()
self.assertEqual(actual.strip(), expected.strip())
def check_metadata(self, module, asm):
"""
Check module metadata against *asm*.
"""
expected = self._normalize_asm(asm)
actual = module._stringify_metadata()
self.assertEqual(actual.strip(), expected.strip())
def check_func_body(self, func, asm):
expected = self._normalize_asm(asm)
actual = self.descr(func)
actual = actual.partition('{')[2].rpartition('}')[0]
self.assertEqual(actual.strip(), expected.strip())
class TestFunction(TestBase):
proto = """i32 @"my_func"(i32 %".1", i32 %".2", double %".3", i32* %".4")"""
def test_declare(self):
# A simple declaration
func = self.function()
asm = self.descr(func).strip()
self.assertEqual(asm.strip(), "declare %s" % self.proto)
def test_declare_attributes(self):
# Now with function attributes
func = self.function()
func.attributes.add("optsize")
func.attributes.add("alwaysinline")
func.attributes.alignstack = 16
tp_pers = ir.FunctionType(int8, (), var_arg=True)
pers = ir.Function(self.module(), tp_pers, '__gxx_personality_v0')
func.attributes.personality = pers
asm = self.descr(func).strip()
self.assertEqual(asm,
("declare %s alwaysinline optsize alignstack(16) "
"personality i8 (...)* @\"__gxx_personality_v0\"") %
self.proto)
# Check pickling
self.assert_pickle_correctly(func)
def test_function_attributes(self):
# Now with parameter attributes
func = self.function()
func.args[0].add_attribute("zeroext")
func.args[1].attributes.dereferenceable = 5
func.args[1].attributes.dereferenceable_or_null = 10
func.args[3].attributes.align = 4
func.args[3].add_attribute("nonnull")
func.return_value.add_attribute("noalias")
asm = self.descr(func).strip()
self.assertEqual(asm,
"""declare noalias i32 @"my_func"(i32 zeroext %".1", i32 dereferenceable(5) dereferenceable_or_null(10) %".2", double %".3", i32* nonnull align 4 %".4")""" # noqa E501
)
# Check pickling
self.assert_pickle_correctly(func)
def test_function_metadata(self):
# Now with function metadata
module = self.module()
func = self.function(module)
func.set_metadata('dbg', module.add_metadata([]))
asm = self.descr(func).strip()
self.assertEqual(asm,
"""declare i32 @"my_func"(i32 %".1", i32 %".2", double %".3", i32* %".4") !dbg !0""" # noqa E501
)
# Check pickling
self.assert_pickle_correctly(func)
def test_define(self):
# A simple definition
func = self.function()
func.attributes.add("alwaysinline")
block = func.append_basic_block('my_block')
builder = ir.IRBuilder(block)
builder.ret_void()
asm = self.descr(func)
self.check_descr(asm, """\
define {proto} alwaysinline
{{
my_block:
ret void
}}
""".format(proto=self.proto))
def test_declare_intrinsics(self):
module = self.module()
pint8 = int8.as_pointer()
powi = module.declare_intrinsic('llvm.powi', [dbl])
memset = module.declare_intrinsic('llvm.memset', [pint8, int32])
memcpy = module.declare_intrinsic('llvm.memcpy', [pint8, pint8, int32])
assume = module.declare_intrinsic('llvm.assume')
self.check_descr(self.descr(powi).strip(), """\
declare double @"llvm.powi.f64"(double %".1", i32 %".2")""")
self.check_descr(self.descr(memset).strip(), """\
declare void @"llvm.memset.p0i8.i32"(i8* %".1", i8 %".2", i32 %".3", i1 %".4")""") # noqa E501
self.check_descr(self.descr(memcpy).strip(), """\
declare void @"llvm.memcpy.p0i8.p0i8.i32"(i8* %".1", i8* %".2", i32 %".3", i1 %".4")""") # noqa E501
self.check_descr(self.descr(assume).strip(), """\
declare void @"llvm.assume"(i1 %".1")""")
def test_redeclare_intrinsic(self):
module = self.module()
powi = module.declare_intrinsic('llvm.powi', [dbl])
powi2 = module.declare_intrinsic('llvm.powi', [dbl])
self.assertIs(powi, powi2)
def test_pickling(self):
fn = self.function()
self.assert_pickle_correctly(fn)
class TestIR(TestBase):
def test_unnamed_metadata(self):
# An unnamed metadata node
mod = self.module()
mod.add_metadata([int32(123), int8(42)])
self.assert_ir_line("!0 = !{ i32 123, i8 42 }", mod)
self.assert_valid_ir(mod)
def test_unnamed_metadata_2(self):
# Several unnamed metadata nodes
mod = self.module()
# First node has a literal metadata string
m0 = mod.add_metadata([int32(123), "kernel"])
# Second node refers to the first one
m1 = mod.add_metadata([int64(456), m0])
# Third node is the same as the second one
m2 = mod.add_metadata([int64(456), m0])
self.assertIs(m2, m1)
# Fourth node refers to the first three
mod.add_metadata([m0, m1, m2])
self.assert_ir_line('!0 = !{ i32 123, !"kernel" }', mod)
self.assert_ir_line('!1 = !{ i64 456, !0 }', mod)
self.assert_ir_line('!2 = !{ !0, !1, !1 }', mod)
def test_unnamed_metadata_3(self):
# Passing nested metadata as a sequence
mod = self.module()
mod.add_metadata([int32(123), [int32(456)], [int32(789)], [int32(456)]])
self.assert_ir_line('!0 = !{ i32 456 }', mod)
self.assert_ir_line('!1 = !{ i32 789 }', mod)
self.assert_ir_line('!2 = !{ i32 123, !0, !1, !0 }', mod)
def test_metadata_string(self):
# Escaping contents of a metadata string
mod = self.module()
mod.add_metadata(["\"\\$"])
self.assert_ir_line('!0 = !{ !"\\22\\5c$" }', mod)
def test_named_metadata(self):
# Add a named metadata node and add metadata values to it
mod = self.module()
m0 = mod.add_metadata([int32(123)])
m1 = mod.add_metadata([int64(456)])
nmd = mod.add_named_metadata("foo")
nmd.add(m0)
nmd.add(m1)
nmd.add(m0)
self.assert_ir_line("!foo = !{ !0, !1, !0 }", mod)
self.assert_valid_ir(mod)
# Check get_named_metadata()
self.assertIs(nmd, mod.get_named_metadata("foo"))
with self.assertRaises(KeyError):
mod.get_named_metadata("bar")
def test_named_metadata_2(self):
# Add and set named metadata through a single add_named_metadata() call
mod = self.module()
m0 = mod.add_metadata([int32(123)])
mod.add_named_metadata("foo", m0)
mod.add_named_metadata("foo", [int64(456)])
mod.add_named_metadata("foo", ["kernel"])
mod.add_named_metadata("bar", [])
self.assert_ir_line("!foo = !{ !0, !1, !2 }", mod)
self.assert_ir_line("!0 = !{ i32 123 }", mod)
self.assert_ir_line("!1 = !{ i64 456 }", mod)
self.assert_ir_line('!2 = !{ !"kernel" }', mod)
self.assert_ir_line("!bar = !{ !3 }", mod)
self.assert_ir_line('!3 = !{ }', mod)
self.assert_valid_ir(mod)
def test_metadata_null(self):
# A null metadata (typed) value
mod = self.module()
mod.add_metadata([int32.as_pointer()(None)])
self.assert_ir_line("!0 = !{ i32* null }", mod)
self.assert_valid_ir(mod)
# A null metadata (untyped) value
mod = self.module()
mod.add_metadata([None, int32(123)])
self.assert_ir_line("!0 = !{ null, i32 123 }", mod)
self.assert_valid_ir(mod)
def test_debug_info(self):
# Add real world-looking debug information to a module
# (with various value types)
mod = self.module()
di_file = mod.add_debug_info("DIFile", {
"filename": "foo",
"directory": "bar",
})
di_func_type = mod.add_debug_info("DISubroutineType", {
# None as `null`
"types": mod.add_metadata([None]),
})
di_compileunit = mod.add_debug_info("DICompileUnit", {
"language": ir.DIToken("DW_LANG_Python"),
"file": di_file,
"producer": "ARTIQ",
"runtimeVersion": 0,
"isOptimized": True,
}, is_distinct=True)
mod.add_debug_info("DISubprogram", {
"name": "my_func",
"file": di_file,
"line": 11,
"type": di_func_type,
"isLocal": False,
"unit": di_compileunit,
}, is_distinct=True)
# Check output
strmod = str(mod)
self.assert_ir_line('!0 = !DIFile(directory: "bar", filename: "foo")',
strmod)
self.assert_ir_line('!1 = !{ null }', strmod)
self.assert_ir_line('!2 = !DISubroutineType(types: !1)', strmod)
# self.assert_ir_line('!4 = !{ !3 }', strmod)
self.assert_ir_line('!3 = distinct !DICompileUnit(file: !0, '
'isOptimized: true, language: DW_LANG_Python, '
'producer: "ARTIQ", runtimeVersion: 0)',
strmod)
self.assert_ir_line('!4 = distinct !DISubprogram(file: !0, isLocal: '
'false, line: 11, name: "my_func", type: !2, unit: '
'!3)',
strmod)
self.assert_valid_ir(mod)
def test_debug_info_2(self):
# Identical debug info nodes should be merged
mod = self.module()
di1 = mod.add_debug_info("DIFile",
{"filename": "foo",
"directory": "bar",
})
di2 = mod.add_debug_info("DIFile",
{"filename": "foo",
"directory": "bar",
})
di3 = mod.add_debug_info("DIFile",
{"filename": "bar",
"directory": "foo",
})
di4 = mod.add_debug_info("DIFile",
{"filename": "foo",
"directory": "bar",
}, is_distinct=True)
self.assertIs(di1, di2)
self.assertEqual(len({di1, di2, di3, di4}), 3)
# Check output
strmod = str(mod)
self.assert_ir_line('!0 = !DIFile(directory: "bar", filename: "foo")',
strmod)
self.assert_ir_line('!1 = !DIFile(directory: "foo", filename: "bar")',
strmod)
self.assert_ir_line('!2 = distinct !DIFile(directory: "bar", filename: '
'"foo")', strmod)
self.assert_valid_ir(mod)
def test_inline_assembly(self):
mod = self.module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo')
builder = ir.IRBuilder(foo.append_basic_block(''))
asmty = ir.FunctionType(int32, [int32])
asm = ir.InlineAsm(asmty, "mov $1, $2", "=r,r", side_effect=True)
builder.call(asm, [int32(123)])
builder.ret_void()
pat = 'call i32 asm sideeffect "mov $1, $2", "=r,r" ( i32 123 )'
self.assertInText(pat, str(mod))
self.assert_valid_ir(mod)
def test_builder_asm(self):
mod = self.module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo')
builder = ir.IRBuilder(foo.append_basic_block(''))
asmty = ir.FunctionType(int32, [int32])
builder.asm(asmty, "mov $1, $2", "=r,r", [int32(123)], side_effect=True)
builder.ret_void()
pat = 'call i32 asm sideeffect "mov $1, $2", "=r,r" ( i32 123 )'
self.assertInText(pat, str(mod))
self.assert_valid_ir(mod)
def test_builder_load_reg(self):
mod = self.module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo')
builder = ir.IRBuilder(foo.append_basic_block(''))
builder.load_reg(ir.IntType(64), "rax")
builder.ret_void()
pat = 'call i64 asm "", "={rax}"'
self.assertInText(pat, str(mod))
self.assert_valid_ir(mod)
def test_builder_store_reg(self):
mod = self.module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo')
builder = ir.IRBuilder(foo.append_basic_block(''))
builder.store_reg(int64(123), ir.IntType(64), "rax")
builder.ret_void()
pat = 'call void asm sideeffect "", "{rax}" ( i64 123 )'
self.assertInText(pat, str(mod))
self.assert_valid_ir(mod)
class TestGlobalValues(TestBase):
def test_globals_access(self):
mod = self.module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo')
ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'bar')
globdouble = ir.GlobalVariable(mod, ir.DoubleType(), 'globdouble')
self.assertEqual(mod.get_global('foo'), foo)
self.assertEqual(mod.get_global('globdouble'), globdouble)
with self.assertRaises(KeyError):
mod.get_global('kkk')
# Globals should have a useful repr()
self.assertEqual(repr(globdouble),
"<ir.GlobalVariable 'globdouble' of type 'double*'>")
def test_functions_global_values_access(self):
"""
Accessing functions and global values through Module.functions
and Module.global_values.
"""
mod = self.module()
fty = ir.FunctionType(ir.VoidType(), [])
foo = ir.Function(mod, fty, 'foo')
bar = ir.Function(mod, fty, 'bar')
globdouble = ir.GlobalVariable(mod, ir.DoubleType(), 'globdouble')
self.assertEqual(set(mod.functions), set((foo, bar)))
self.assertEqual(set(mod.global_values), set((foo, bar, globdouble)))
def test_global_variables_ir(self):
"""
IR serialization of global variables.
"""
mod = self.module()
# the following have side effects and write to self.module()
a = ir.GlobalVariable(mod, int8, 'a') # noqa F841
b = ir.GlobalVariable(mod, int8, 'b', addrspace=42) # noqa F841
# Initialized global variable doesn't default to "external"
c = ir.GlobalVariable(mod, int32, 'c')
c.initializer = int32(123)
d = ir.GlobalVariable(mod, int32, 'd')
d.global_constant = True
# Non-external linkage implies default "undef" initializer
e = ir.GlobalVariable(mod, int32, 'e')
e.linkage = "internal"
f = ir.GlobalVariable(mod, int32, 'f', addrspace=456)
f.unnamed_addr = True
g = ir.GlobalVariable(mod, int32, 'g')
g.linkage = "internal"
g.initializer = int32(123)
g.align = 16
self.check_module_body(mod, """\
@"a" = external global i8
@"b" = external addrspace(42) global i8
@"c" = global i32 123
@"d" = external constant i32
@"e" = internal global i32 undef
@"f" = external unnamed_addr addrspace(456) global i32
@"g" = internal global i32 123, align 16
""")
def test_pickle(self):
mod = self.module()
self.assert_pickle_correctly(mod)
class TestBlock(TestBase):
def test_attributes(self):
func = self.function()
block = ir.Block(parent=func, name='start')
self.assertIs(block.parent, func)
self.assertFalse(block.is_terminated)
def test_descr(self):
block = self.block(name='my_block')
self.assertEqual(self.descr(block), "my_block:\n")
block.instructions.extend(['a', 'b'])
self.assertEqual(self.descr(block), "my_block:\n a\n b\n")
def test_replace(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.add(a, b, 'c')
d = builder.sub(a, b, 'd')
builder.mul(d, b, 'e')
f = ir.Instruction(block, a.type, 'sdiv', (c, b), 'f')
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"d" = sub i32 %".1", %".2"
%"e" = mul i32 %"d", %".2"
""")
block.replace(d, f)
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"f" = sdiv i32 %"c", %".2"
%"e" = mul i32 %"f", %".2"
""")
def test_repr(self):
"""
Blocks should have a useful repr()
"""
func = self.function()
block = ir.Block(parent=func, name='start')
self.assertEqual(repr(block), "<ir.Block 'start' of type 'label'>")
class TestBuildInstructions(TestBase):
"""
Test IR generation of LLVM instructions through the IRBuilder class.
"""
maxDiff = 4000
def test_simple(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
inst = builder.add(a, b, 'res')
self.check_block(block, """\
my_block:
%"res" = add i32 %".1", %".2"
""")
# Instructions should have a useful repr()
self.assertEqual(repr(inst),
"<ir.Instruction 'res' of type 'i32', opname 'add', "
"operands (<ir.Argument '.1' of type i32>, "
"<ir.Argument '.2' of type i32>)>")
def test_binops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b, ff = builder.function.args[:3]
builder.add(a, b, 'c')
builder.fadd(a, b, 'd')
builder.sub(a, b, 'e')
builder.fsub(a, b, 'f')
builder.mul(a, b, 'g')
builder.fmul(a, b, 'h')
builder.udiv(a, b, 'i')
builder.sdiv(a, b, 'j')
builder.fdiv(a, b, 'k')
builder.urem(a, b, 'l')
builder.srem(a, b, 'm')
builder.frem(a, b, 'n')
builder.or_(a, b, 'o')
builder.and_(a, b, 'p')
builder.xor(a, b, 'q')
builder.shl(a, b, 'r')
builder.ashr(a, b, 's')
builder.lshr(a, b, 't')
with self.assertRaises(ValueError) as cm:
builder.add(a, ff)
self.assertEqual(str(cm.exception),
"Operands must be the same type, got (i32, double)")
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"d" = fadd i32 %".1", %".2"
%"e" = sub i32 %".1", %".2"
%"f" = fsub i32 %".1", %".2"
%"g" = mul i32 %".1", %".2"
%"h" = fmul i32 %".1", %".2"
%"i" = udiv i32 %".1", %".2"
%"j" = sdiv i32 %".1", %".2"
%"k" = fdiv i32 %".1", %".2"
%"l" = urem i32 %".1", %".2"
%"m" = srem i32 %".1", %".2"
%"n" = frem i32 %".1", %".2"
%"o" = or i32 %".1", %".2"
%"p" = and i32 %".1", %".2"
%"q" = xor i32 %".1", %".2"
%"r" = shl i32 %".1", %".2"
%"s" = ashr i32 %".1", %".2"
%"t" = lshr i32 %".1", %".2"
""")
def test_binop_flags(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
# As tuple
builder.add(a, b, 'c', flags=('nuw',))
# and as list
builder.sub(a, b, 'd', flags=['nuw', 'nsw'])
self.check_block(block, """\
my_block:
%"c" = add nuw i32 %".1", %".2"
%"d" = sub nuw nsw i32 %".1", %".2"
""")
def test_binop_fastmath_flags(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
# As tuple
builder.fadd(a, b, 'c', flags=('fast',))
# and as list
builder.fsub(a, b, 'd', flags=['ninf', 'nsz'])
self.check_block(block, """\
my_block:
%"c" = fadd fast i32 %".1", %".2"
%"d" = fsub ninf nsz i32 %".1", %".2"
""")
def test_binops_with_overflow(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.sadd_with_overflow(a, b, 'c')
builder.smul_with_overflow(a, b, 'd')
builder.ssub_with_overflow(a, b, 'e')
builder.uadd_with_overflow(a, b, 'f')
builder.umul_with_overflow(a, b, 'g')
builder.usub_with_overflow(a, b, 'h')
self.check_block(block, """\
my_block:
%"c" = call {i32, i1} @"llvm.sadd.with.overflow.i32"(i32 %".1", i32 %".2")
%"d" = call {i32, i1} @"llvm.smul.with.overflow.i32"(i32 %".1", i32 %".2")
%"e" = call {i32, i1} @"llvm.ssub.with.overflow.i32"(i32 %".1", i32 %".2")
%"f" = call {i32, i1} @"llvm.uadd.with.overflow.i32"(i32 %".1", i32 %".2")
%"g" = call {i32, i1} @"llvm.umul.with.overflow.i32"(i32 %".1", i32 %".2")
%"h" = call {i32, i1} @"llvm.usub.with.overflow.i32"(i32 %".1", i32 %".2")
""")
def test_unary_ops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.neg(a, 'c')
builder.not_(b, 'd')
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = sub i32 0, %".1"
%"d" = xor i32 %".2", -1
""")
def test_replace_operand(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
undef1 = ir.Constant(ir.IntType(32), ir.Undefined)
undef2 = ir.Constant(ir.IntType(32), ir.Undefined)
c = builder.add(undef1, undef2, 'c')
self.check_block(block, """\
my_block:
%"c" = add i32 undef, undef
""")
c.replace_usage(undef1, a)
c.replace_usage(undef2, b)
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
""")
def test_integer_comparisons(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.icmp_unsigned('==', a, b, 'c')
builder.icmp_unsigned('!=', a, b, 'd')
builder.icmp_unsigned('<', a, b, 'e')
builder.icmp_unsigned('<=', a, b, 'f')
builder.icmp_unsigned('>', a, b, 'g')
builder.icmp_unsigned('>=', a, b, 'h')
builder.icmp_signed('==', a, b, 'i')
builder.icmp_signed('!=', a, b, 'j')
builder.icmp_signed('<', a, b, 'k')
builder.icmp_signed('<=', a, b, 'l')
builder.icmp_signed('>', a, b, 'm')
builder.icmp_signed('>=', a, b, 'n')
with self.assertRaises(ValueError):
builder.icmp_signed('uno', a, b, 'zz')
with self.assertRaises(ValueError):
builder.icmp_signed('foo', a, b, 'zz')
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = icmp eq i32 %".1", %".2"
%"d" = icmp ne i32 %".1", %".2"
%"e" = icmp ult i32 %".1", %".2"
%"f" = icmp ule i32 %".1", %".2"
%"g" = icmp ugt i32 %".1", %".2"
%"h" = icmp uge i32 %".1", %".2"
%"i" = icmp eq i32 %".1", %".2"
%"j" = icmp ne i32 %".1", %".2"
%"k" = icmp slt i32 %".1", %".2"
%"l" = icmp sle i32 %".1", %".2"
%"m" = icmp sgt i32 %".1", %".2"
%"n" = icmp sge i32 %".1", %".2"
""")
def test_float_comparisons(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.fcmp_ordered('==', a, b, 'c')
builder.fcmp_ordered('!=', a, b, 'd')
builder.fcmp_ordered('<', a, b, 'e')
builder.fcmp_ordered('<=', a, b, 'f')
builder.fcmp_ordered('>', a, b, 'g')
builder.fcmp_ordered('>=', a, b, 'h')
builder.fcmp_unordered('==', a, b, 'i')
builder.fcmp_unordered('!=', a, b, 'j')
builder.fcmp_unordered('<', a, b, 'k')
builder.fcmp_unordered('<=', a, b, 'l')
builder.fcmp_unordered('>', a, b, 'm')
builder.fcmp_unordered('>=', a, b, 'n')
# fcmp_ordered and fcmp_unordered are the same for these cases
builder.fcmp_ordered('ord', a, b, 'u')
builder.fcmp_ordered('uno', a, b, 'v')
builder.fcmp_unordered('ord', a, b, 'w')
builder.fcmp_unordered('uno', a, b, 'x')
builder.fcmp_unordered('olt', a, b, 'y',
flags=['nnan', 'ninf', 'nsz', 'arcp', 'fast'])
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = fcmp oeq i32 %".1", %".2"
%"d" = fcmp one i32 %".1", %".2"
%"e" = fcmp olt i32 %".1", %".2"
%"f" = fcmp ole i32 %".1", %".2"
%"g" = fcmp ogt i32 %".1", %".2"
%"h" = fcmp oge i32 %".1", %".2"
%"i" = fcmp ueq i32 %".1", %".2"
%"j" = fcmp une i32 %".1", %".2"
%"k" = fcmp ult i32 %".1", %".2"
%"l" = fcmp ule i32 %".1", %".2"
%"m" = fcmp ugt i32 %".1", %".2"
%"n" = fcmp uge i32 %".1", %".2"
%"u" = fcmp ord i32 %".1", %".2"
%"v" = fcmp uno i32 %".1", %".2"
%"w" = fcmp ord i32 %".1", %".2"
%"x" = fcmp uno i32 %".1", %".2"
%"y" = fcmp nnan ninf nsz arcp fast olt i32 %".1", %".2"
""")
def test_misc_ops(self):
block = self.block(name='my_block')
t = ir.Constant(int1, True)
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.select(t, a, b, 'c')
self.assertFalse(block.is_terminated)
builder.unreachable()
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = select i1 true, i32 %".1", i32 %".2"
unreachable
""")
def test_phi(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
bb2 = builder.function.append_basic_block('b2')
bb3 = builder.function.append_basic_block('b3')
phi = builder.phi(int32, 'my_phi')
phi.add_incoming(a, bb2)
phi.add_incoming(b, bb3)
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"my_phi" = phi i32 [%".1", %"b2"], [%".2", %"b3"]
""")
def test_mem_ops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b, z = builder.function.args[:3]
c = builder.alloca(int32, name='c')
d = builder.alloca(int32, size=42, name='d') # noqa F841
e = builder.alloca(dbl, size=a, name='e')
e.align = 8
self.assertEqual(e.type, ir.PointerType(dbl))
ee = builder.store(z, e)
self.assertEqual(ee.type, ir.VoidType())
f = builder.store(b, c)
self.assertEqual(f.type, ir.VoidType())
g = builder.load(c, 'g')
self.assertEqual(g.type, int32)
# With alignment
h = builder.store(b, c, align=1)
self.assertEqual(h.type, ir.VoidType())
i = builder.load(c, 'i', align=1)
self.assertEqual(i.type, int32)
# Atomics
j = builder.store_atomic(b, c, ordering="seq_cst", align=4)
self.assertEqual(j.type, ir.VoidType())
k = builder.load_atomic(c, ordering="seq_cst", align=4, name='k')
self.assertEqual(k.type, int32)
# Not pointer types
with self.assertRaises(TypeError):
builder.store(b, a)
with self.assertRaises(TypeError):
builder.load(b)
# Mismatching pointer type
with self.assertRaises(TypeError) as cm:
builder.store(b, e)
self.assertEqual(str(cm.exception),
"cannot store i32 to double*: mismatching types")
self.check_block(block, """\
my_block:
%"c" = alloca i32
%"d" = alloca i32, i32 42
%"e" = alloca double, i32 %".1", align 8
store double %".3", double* %"e"
store i32 %".2", i32* %"c"
%"g" = load i32, i32* %"c"
store i32 %".2", i32* %"c", align 1
%"i" = load i32, i32* %"c", align 1
store atomic i32 %".2", i32* %"c" seq_cst, align 4
%"k" = load atomic i32, i32* %"c" seq_cst, align 4
""")
def test_gep(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.alloca(ir.PointerType(int32), name='c')
d = builder.gep(c, [ir.Constant(int32, 5), a], name='d')
self.assertEqual(d.type, ir.PointerType(int32))
self.check_block(block, """\
my_block:
%"c" = alloca i32*
%"d" = getelementptr i32*, i32** %"c", i32 5, i32 %".1"
""")
# XXX test with more complex types
def test_gep_castinstr(self):
# similar to:
# numba::runtime::nrtdynmod.py_define_nrt_meminfo_data()
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
int8ptr = int8.as_pointer()
ls = ir.LiteralStructType([int64, int8ptr, int8ptr, int8ptr, int64])
d = builder.bitcast(a, ls.as_pointer(), name='d')
e = builder.gep(d, [ir.Constant(int32, x) for x in [0, 3]], name='e')
self.assertEqual(e.type, ir.PointerType(int8ptr))
self.check_block(block, """\
my_block:
%"d" = bitcast i32 %".1" to {i64, i8*, i8*, i8*, i64}*
%"e" = getelementptr {i64, i8*, i8*, i8*, i64}, {i64, i8*, i8*, i8*, i64}* %"d", i32 0, i32 3
""") # noqa E501
def test_gep_castinstr_addrspace(self):
# similar to:
# numba::runtime::nrtdynmod.py_define_nrt_meminfo_data()
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
addrspace = 4
int8ptr = int8.as_pointer()
ls = ir.LiteralStructType([int64, int8ptr, int8ptr, int8ptr, int64])
d = builder.bitcast(a, ls.as_pointer(addrspace=addrspace), name='d')
e = builder.gep(d, [ir.Constant(int32, x) for x in [0, 3]], name='e')
self.assertEqual(e.type.addrspace, addrspace)
self.assertEqual(e.type, ir.PointerType(int8ptr, addrspace=addrspace))
self.check_block(block, """\
my_block:
%"d" = bitcast i32 %".1" to {i64, i8*, i8*, i8*, i64} addrspace(4)*
%"e" = getelementptr {i64, i8*, i8*, i8*, i64}, {i64, i8*, i8*, i8*, i64} addrspace(4)* %"d", i32 0, i32 3
""") # noqa E501
def test_gep_addrspace(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
addrspace = 4
c = builder.alloca(ir.PointerType(int32, addrspace=addrspace), name='c')
self.assertEqual(str(c.type), 'i32 addrspace(4)**')
self.assertEqual(c.type.pointee.addrspace, addrspace)
d = builder.gep(c, [ir.Constant(int32, 5), a], name='d')
self.assertEqual(d.type.addrspace, addrspace)
e = builder.gep(d, [ir.Constant(int32, 10)], name='e')
self.assertEqual(e.type.addrspace, addrspace)
self.check_block(block, """\
my_block:
%"c" = alloca i32 addrspace(4)*
%"d" = getelementptr i32 addrspace(4)*, i32 addrspace(4)** %"c", i32 5, i32 %".1"
%"e" = getelementptr i32, i32 addrspace(4)* %"d", i32 10
""") # noqa E501
def test_extract_insert_value(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
tp_inner = ir.LiteralStructType([int32, int1])
tp_outer = ir.LiteralStructType([int8, tp_inner])
c_inner = ir.Constant(tp_inner, (ir.Constant(int32, 4),
ir.Constant(int1, True)))
# Flat structure
c = builder.extract_value(c_inner, 0, name='c') # noqa F841
d = builder.insert_value(c_inner, a, 0, name='d') # noqa F841
e = builder.insert_value(d, ir.Constant(int1, False), 1, name='e') # noqa F841 E501
self.assertEqual(d.type, tp_inner)
self.assertEqual(e.type, tp_inner)
# Nested structure
p_outer = builder.alloca(tp_outer, name='ptr')
j = builder.load(p_outer, name='j')
k = builder.extract_value(j, 0, name='k')
l = builder.extract_value(j, 1, name='l')
m = builder.extract_value(j, (1, 0), name='m')
n = builder.extract_value(j, (1, 1), name='n')
o = builder.insert_value(j, l, 1, name='o')
p = builder.insert_value(j, a, (1, 0), name='p')
self.assertEqual(k.type, int8)
self.assertEqual(l.type, tp_inner)
self.assertEqual(m.type, int32)
self.assertEqual(n.type, int1)
self.assertEqual(o.type, tp_outer)
self.assertEqual(p.type, tp_outer)
with self.assertRaises(TypeError):
# Not an aggregate
builder.extract_value(p_outer, 0)
with self.assertRaises(TypeError):
# Indexing too deep
builder.extract_value(c_inner, (0, 0))
with self.assertRaises(TypeError):
# Index out of structure bounds
builder.extract_value(c_inner, 5)
with self.assertRaises(TypeError):
# Not an aggregate
builder.insert_value(a, b, 0)
with self.assertRaises(TypeError):
# Replacement value has the wrong type
builder.insert_value(c_inner, a, 1)
self.check_block(block, """\
my_block:
%"c" = extractvalue {i32, i1} {i32 4, i1 true}, 0
%"d" = insertvalue {i32, i1} {i32 4, i1 true}, i32 %".1", 0
%"e" = insertvalue {i32, i1} %"d", i1 false, 1
%"ptr" = alloca {i8, {i32, i1}}
%"j" = load {i8, {i32, i1}}, {i8, {i32, i1}}* %"ptr"
%"k" = extractvalue {i8, {i32, i1}} %"j", 0
%"l" = extractvalue {i8, {i32, i1}} %"j", 1
%"m" = extractvalue {i8, {i32, i1}} %"j", 1, 0
%"n" = extractvalue {i8, {i32, i1}} %"j", 1, 1
%"o" = insertvalue {i8, {i32, i1}} %"j", {i32, i1} %"l", 1
%"p" = insertvalue {i8, {i32, i1}} %"j", i32 %".1", 1, 0
""")
def test_cast_ops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b, fa, ptr = builder.function.args[:4]
c = builder.trunc(a, int8, name='c')
d = builder.zext(c, int32, name='d') # noqa F841
e = builder.sext(c, int32, name='e') # noqa F841
fb = builder.fptrunc(fa, flt, 'fb')
fc = builder.fpext(fb, dbl, 'fc') # noqa F841
g = builder.fptoui(fa, int32, 'g')
h = builder.fptosi(fa, int8, 'h')
fd = builder.uitofp(g, flt, 'fd') # noqa F841
fe = builder.sitofp(h, dbl, 'fe') # noqa F841
i = builder.ptrtoint(ptr, int32, 'i')
j = builder.inttoptr(i, ir.PointerType(int8), 'j') # noqa F841
k = builder.bitcast(a, flt, "k") # noqa F841
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = trunc i32 %".1" to i8
%"d" = zext i8 %"c" to i32
%"e" = sext i8 %"c" to i32
%"fb" = fptrunc double %".3" to float
%"fc" = fpext float %"fb" to double
%"g" = fptoui double %".3" to i32
%"h" = fptosi double %".3" to i8
%"fd" = uitofp i32 %"g" to float
%"fe" = sitofp i8 %"h" to double
%"i" = ptrtoint i32* %".4" to i32
%"j" = inttoptr i32 %"i" to i8*
%"k" = bitcast i32 %".1" to float
""")
def test_atomicrmw(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.alloca(int32, name='c')
d = builder.atomic_rmw('add', c, a, 'monotonic', 'd')
self.assertEqual(d.type, int32)
self.check_block(block, """\
my_block:
%"c" = alloca i32
%"d" = atomicrmw add i32* %"c", i32 %".1" monotonic
""")
def test_branch(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_target = builder.function.append_basic_block(name='target')
builder.branch(bb_target)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
br label %"target"
""")
def test_cbranch(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_true = builder.function.append_basic_block(name='b_true')
bb_false = builder.function.append_basic_block(name='b_false')
builder.cbranch(ir.Constant(int1, False), bb_true, bb_false)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
br i1 false, label %"b_true", label %"b_false"
""")
def test_cbranch_weights(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_true = builder.function.append_basic_block(name='b_true')
bb_false = builder.function.append_basic_block(name='b_false')
br = builder.cbranch(ir.Constant(int1, False), bb_true, bb_false)
br.set_weights([5, 42])
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
br i1 false, label %"b_true", label %"b_false", !prof !0
""")
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 5, i32 42 }
""")
def test_branch_indirect(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_1 = builder.function.append_basic_block(name='b_1')
bb_2 = builder.function.append_basic_block(name='b_2')
indirectbr = builder.branch_indirect(
ir.BlockAddress(builder.function, bb_1))
indirectbr.add_destination(bb_1)
indirectbr.add_destination(bb_2)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
indirectbr i8* blockaddress(@"my_func", %"b_1"), [label %"b_1", label %"b_2"]
""") # noqa E501
def test_returns(self):
def check(block, expected_ir):
self.assertTrue(block.is_terminated)
self.check_block(block, expected_ir)
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
builder.ret_void()
check(block, """\
my_block:
ret void
""")
block = self.block(name='other_block')
builder = ir.IRBuilder(block)
builder.ret(int32(5))
check(block, """\
other_block:
ret i32 5
""")
# With metadata
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
inst = builder.ret_void()
inst.set_metadata("dbg", block.module.add_metadata(()))
check(block, """\
my_block:
ret void, !dbg !0
""")
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
inst = builder.ret(int32(6))
inst.set_metadata("dbg", block.module.add_metadata(()))
check(block, """\
my_block:
ret i32 6, !dbg !0
""")
def test_switch(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
bb_onzero = builder.function.append_basic_block(name='onzero')
bb_onone = builder.function.append_basic_block(name='onone')
bb_ontwo = builder.function.append_basic_block(name='ontwo')
bb_else = builder.function.append_basic_block(name='otherwise')
sw = builder.switch(a, bb_else)
sw.add_case(ir.Constant(int32, 0), bb_onzero)
sw.add_case(ir.Constant(int32, 1), bb_onone)
# A plain Python value gets converted into the right IR constant
sw.add_case(2, bb_ontwo)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
switch i32 %".1", label %"otherwise" [i32 0, label %"onzero" i32 1, label %"onone" i32 2, label %"ontwo"]
""") # noqa E501
def test_call(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
tp_f = ir.FunctionType(flt, (int32, int32))
tp_g = ir.FunctionType(dbl, (int32,), var_arg=True)
tp_h = ir.FunctionType(hlf, (int32, int32))
f = ir.Function(builder.function.module, tp_f, 'f')
g = ir.Function(builder.function.module, tp_g, 'g')
h = ir.Function(builder.function.module, tp_h, 'h')
builder.call(f, (a, b), 'res_f')
builder.call(g, (b, a), 'res_g')
builder.call(h, (a, b), 'res_h')
builder.call(f, (a, b), 'res_f_fast', cconv='fastcc')
res_f_readonly = builder.call(f, (a, b), 'res_f_readonly')
res_f_readonly.attributes.add('readonly')
builder.call(f, (a, b), 'res_fast', fastmath='fast')
builder.call(f, (a, b), 'res_nnan_ninf', fastmath=('nnan', 'ninf'))
self.check_block(block, """\
my_block:
%"res_f" = call float @"f"(i32 %".1", i32 %".2")
%"res_g" = call double (i32, ...) @"g"(i32 %".2", i32 %".1")
%"res_h" = call half @"h"(i32 %".1", i32 %".2")
%"res_f_fast" = call fastcc float @"f"(i32 %".1", i32 %".2")
%"res_f_readonly" = call float @"f"(i32 %".1", i32 %".2") readonly
%"res_fast" = call fast float @"f"(i32 %".1", i32 %".2")
%"res_nnan_ninf" = call ninf nnan float @"f"(i32 %".1", i32 %".2")
""")
def test_call_metadata(self):
"""
Function calls with metadata arguments.
"""
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
dbg_declare_ty = ir.FunctionType(ir.VoidType(), [ir.MetaDataType()] * 3)
dbg_declare = ir.Function(
builder.module,
dbg_declare_ty,
'llvm.dbg.declare')
a = builder.alloca(int32, name="a")
b = builder.module.add_metadata(())
builder.call(dbg_declare, (a, b, b))
self.check_block(block, """\
my_block:
%"a" = alloca i32
call void @"llvm.dbg.declare"(metadata i32* %"a", metadata !0, metadata !0)
""") # noqa E501
def test_invoke(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
tp_f = ir.FunctionType(flt, (int32, int32))
f = ir.Function(builder.function.module, tp_f, 'f')
bb_normal = builder.function.append_basic_block(name='normal')
bb_unwind = builder.function.append_basic_block(name='unwind')
builder.invoke(f, (a, b), bb_normal, bb_unwind, 'res_f')
self.check_block(block, """\
my_block:
%"res_f" = invoke float @"f"(i32 %".1", i32 %".2")
to label %"normal" unwind label %"unwind"
""")
def test_landingpad(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
lp = builder.landingpad(ir.LiteralStructType([int32,
int8.as_pointer()]), 'lp')
int_typeinfo = ir.GlobalVariable(builder.function.module,
int8.as_pointer(), "_ZTIi")
int_typeinfo.global_constant = True
lp.add_clause(ir.CatchClause(int_typeinfo))
lp.add_clause(ir.FilterClause(ir.Constant(ir.ArrayType(
int_typeinfo.type, 1), [int_typeinfo])))
builder.resume(lp)
self.check_block(block, """\
my_block:
%"lp" = landingpad {i32, i8*}
catch i8** @"_ZTIi"
filter [1 x i8**] [i8** @"_ZTIi"]
resume {i32, i8*} %"lp"
""")
def test_assume(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.icmp_signed('>', a, b, name='c')
builder.assume(c)
self.check_block(block, """\
my_block:
%"c" = icmp sgt i32 %".1", %".2"
call void @"llvm.assume"(i1 %"c")
""")
def test_vector_ops(self):
block = self.block(name='insert_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
a.name = 'a'
b.name = 'b'
vecty = ir.VectorType(a.type, 2)
vec = ir.Constant(vecty, ir.Undefined)
idxty = ir.IntType(32)
vec = builder.insert_element(vec, a, idxty(0), name='vec1')
vec = builder.insert_element(vec, b, idxty(1), name='vec2')
self.check_block(block, """\
insert_block:
%"vec1" = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %"a", i32 0
%"vec2" = insertelement <2 x i32> %"vec1", i32 %"b", i32 1
""")
block = builder.append_basic_block("shuffle_block")
builder.branch(block)
builder.position_at_end(block)
mask = ir.Constant(vecty, [1, 0])
builder.shuffle_vector(vec, vec, mask, name='shuf')
self.check_block(block, """\
shuffle_block:
%"shuf" = shufflevector <2 x i32> %"vec2", <2 x i32> %"vec2", <2 x i32> <i32 1, i32 0>
""") # noqa E501
block = builder.append_basic_block("add_block")
builder.branch(block)
builder.position_at_end(block)
builder.add(vec, vec, name='sum')
self.check_block(block, """\
add_block:
%"sum" = add <2 x i32> %"vec2", %"vec2"
""")
block = builder.append_basic_block("extract_block")
builder.branch(block)
builder.position_at_end(block)
c = builder.extract_element(vec, idxty(0), name='ex1')
d = builder.extract_element(vec, idxty(1), name='ex2')
self.check_block(block, """\
extract_block:
%"ex1" = extractelement <2 x i32> %"vec2", i32 0
%"ex2" = extractelement <2 x i32> %"vec2", i32 1
""")
builder.ret(builder.add(c, d))
self.assert_valid_ir(builder.module)
def test_bitreverse(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int64, 5)
c = builder.bitreverse(a, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i64 @"llvm.bitreverse.i64"(i64 5)
ret i64 %"c"
""")
def test_bitreverse_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
with self.assertRaises(TypeError) as raises:
builder.bitreverse(a, name='c')
self.assertIn(
"expected an integer type, got float",
str(raises.exception))
def test_fence(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
with self.assertRaises(ValueError) as raises:
builder.fence("monotonic", None)
self.assertIn(
"Invalid fence ordering \"monotonic\"!",
str(raises.exception))
with self.assertRaises(ValueError) as raises:
builder.fence(None, "monotonic")
self.assertIn(
"Invalid fence ordering \"None\"!",
str(raises.exception))
builder.fence("acquire", None)
builder.fence("release", "singlethread")
builder.fence("acq_rel", "singlethread")
builder.fence("seq_cst")
builder.ret_void()
self.check_block(block, """\
my_block:
fence acquire
fence syncscope("singlethread") release
fence syncscope("singlethread") acq_rel
fence seq_cst
ret void
""")
def test_bswap(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int32, 5)
c = builder.bswap(a, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i32 @"llvm.bswap.i32"(i32 5)
ret i32 %"c"
""")
def test_ctpop(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
c = builder.ctpop(a, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i16 @"llvm.ctpop.i16"(i16 5)
ret i16 %"c"
""")
def test_ctlz(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
b = ir.Constant(int1, 1)
c = builder.ctlz(a, b, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i16 @"llvm.ctlz.i16"(i16 5, i1 1)
ret i16 %"c"
""")
def test_convert_to_fp16_f32(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.0)
b = builder.convert_to_fp16(a, name='b')
builder.ret(b)
self.check_block(block, """\
my_block:
%"b" = call i16 @"llvm.convert.to.fp16.f32"(float 0x4014000000000000)
ret i16 %"b"
""") # noqa E501
def test_convert_to_fp16_f32_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
with self.assertRaises(TypeError) as raises:
builder.convert_to_fp16(a, name='b')
self.assertIn(
"expected a float type, got i16",
str(raises.exception))
def test_convert_from_fp16_f32(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
b = builder.convert_from_fp16(a, name='b', to=flt)
builder.ret(b)
self.check_block(block, """\
my_block:
%"b" = call float @"llvm.convert.from.fp16.f32"(i16 5)
ret float %"b"
""")
def test_convert_from_fp16_f32_notype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.5)
with self.assertRaises(TypeError) as raises:
builder.convert_from_fp16(a, name='b')
self.assertIn(
"expected a float return type",
str(raises.exception))
def test_convert_from_fp16_f32_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.5)
with self.assertRaises(TypeError) as raises:
builder.convert_from_fp16(a, name='b', to=flt)
self.assertIn(
"expected an i16 type, got float",
str(raises.exception))
def test_convert_from_fp16_f32_wrongtype2(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.5)
with self.assertRaises(TypeError) as raises:
builder.convert_from_fp16(a, name='b', to=int16)
self.assertIn(
"expected a float type, got i16",
str(raises.exception))
def test_cttz(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int64, 5)
b = ir.Constant(int1, 1)
c = builder.cttz(a, b, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i64 @"llvm.cttz.i64"(i64 5, i1 1)
ret i64 %"c"
""")
def test_cttz_wrongflag(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int64, 5)
b = ir.Constant(int32, 3)
with self.assertRaises(TypeError) as raises:
builder.cttz(a, b, name='c')
self.assertIn(
"expected an i1 type, got i32",
str(raises.exception))
def test_cttz_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
b = ir.Constant(int1, 1)
with self.assertRaises(TypeError) as raises:
builder.cttz(a, b, name='c')
self.assertIn(
"expected an integer type, got float",
str(raises.exception))
def test_fma(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
b = ir.Constant(flt, 1)
c = ir.Constant(flt, 2)
fma = builder.fma(a, b, c, name='fma')
builder.ret(fma)
self.check_block(block, """\
my_block:
%"fma" = call float @"llvm.fma.f32"(float 0x4014000000000000, float 0x3ff0000000000000, float 0x4000000000000000)
ret float %"fma"
""") # noqa E501
def test_fma_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int32, 5)
b = ir.Constant(int32, 1)
c = ir.Constant(int32, 2)
with self.assertRaises(TypeError) as raises:
builder.fma(a, b, c, name='fma')
self.assertIn(
"expected an floating point type, got i32",
str(raises.exception))
def test_fma_mixedtypes(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
b = ir.Constant(dbl, 1)
c = ir.Constant(flt, 2)
with self.assertRaises(TypeError) as raises:
builder.fma(a, b, c, name='fma')
self.assertIn(
"expected types to be the same, got float, double, float",
str(raises.exception))
class TestBuilderMisc(TestBase):
"""
Test various other features of the IRBuilder class.
"""
def test_attributes(self):
block = self.block(name='start')
builder = ir.IRBuilder(block)
self.assertIs(builder.function, block.parent)
self.assertIsInstance(builder.function, ir.Function)
self.assertIs(builder.module, block.parent.module)
self.assertIsInstance(builder.module, ir.Module)
def test_goto_block(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.add(a, b, 'c')
bb_new = builder.append_basic_block(name='foo')
with builder.goto_block(bb_new):
builder.fadd(a, b, 'd')
with builder.goto_entry_block():
builder.sub(a, b, 'e')
builder.fsub(a, b, 'f')
builder.branch(bb_new)
builder.mul(a, b, 'g')
with builder.goto_block(bb_new):
builder.fmul(a, b, 'h')
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"e" = sub i32 %".1", %".2"
%"g" = mul i32 %".1", %".2"
""")
self.check_block(bb_new, """\
foo:
%"d" = fadd i32 %".1", %".2"
%"f" = fsub i32 %".1", %".2"
%"h" = fmul i32 %".1", %".2"
br label %"foo"
""")
def test_if_then(self):
block = self.block(name='one')
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
a = builder.add(z, z, 'a')
with builder.if_then(a) as bbend:
builder.add(z, z, 'b')
# Block will be terminated implicitly
self.assertIs(builder.block, bbend)
c = builder.add(z, z, 'c')
with builder.if_then(c):
builder.add(z, z, 'd')
builder.branch(block)
# No implicit termination
self.check_func_body(builder.function, """\
one:
%"a" = add i1 0, 0
br i1 %"a", label %"one.if", label %"one.endif"
one.if:
%"b" = add i1 0, 0
br label %"one.endif"
one.endif:
%"c" = add i1 0, 0
br i1 %"c", label %"one.endif.if", label %"one.endif.endif"
one.endif.if:
%"d" = add i1 0, 0
br label %"one"
one.endif.endif:
""")
def test_if_then_nested(self):
# Implicit termination in a nested if/then
block = self.block(name='one')
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
a = builder.add(z, z, 'a')
with builder.if_then(a):
b = builder.add(z, z, 'b')
with builder.if_then(b):
builder.add(z, z, 'c')
builder.ret_void()
self.check_func_body(builder.function, """\
one:
%"a" = add i1 0, 0
br i1 %"a", label %"one.if", label %"one.endif"
one.if:
%"b" = add i1 0, 0
br i1 %"b", label %"one.if.if", label %"one.if.endif"
one.endif:
ret void
one.if.if:
%"c" = add i1 0, 0
br label %"one.if.endif"
one.if.endif:
br label %"one.endif"
""")
def test_if_then_long_label(self):
full_label = 'Long' * 20
block = self.block(name=full_label)
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
a = builder.add(z, z, 'a')
with builder.if_then(a):
b = builder.add(z, z, 'b')
with builder.if_then(b):
builder.add(z, z, 'c')
builder.ret_void()
self.check_func_body(builder.function, """\
{full_label}:
%"a" = add i1 0, 0
br i1 %"a", label %"{label}.if", label %"{label}.endif"
{label}.if:
%"b" = add i1 0, 0
br i1 %"b", label %"{label}.if.if", label %"{label}.if.endif"
{label}.endif:
ret void
{label}.if.if:
%"c" = add i1 0, 0
br label %"{label}.if.endif"
{label}.if.endif:
br label %"{label}.endif"
""".format(full_label=full_label, label=full_label[:25] + '..'))
def test_if_then_likely(self):
def check(likely):
block = self.block(name='one')
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
with builder.if_then(z, likely=likely):
pass
self.check_block(block, """\
one:
br i1 0, label %"one.if", label %"one.endif", !prof !0
""")
return builder
builder = check(True)
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 99, i32 1 }
""")
builder = check(False)
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 1, i32 99 }
""")
def test_if_else(self):
block = self.block(name='one')
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
a = builder.add(z, z, 'a')
with builder.if_else(a) as (then, otherwise):
with then:
builder.add(z, z, 'b')
with otherwise:
builder.add(z, z, 'c')
# Each block will be terminated implicitly
with builder.if_else(a) as (then, otherwise):
with then:
builder.branch(block)
with otherwise:
builder.ret_void()
# No implicit termination
self.check_func_body(builder.function, """\
one:
%"a" = add i1 0, 0
br i1 %"a", label %"one.if", label %"one.else"
one.if:
%"b" = add i1 0, 0
br label %"one.endif"
one.else:
%"c" = add i1 0, 0
br label %"one.endif"
one.endif:
br i1 %"a", label %"one.endif.if", label %"one.endif.else"
one.endif.if:
br label %"one"
one.endif.else:
ret void
one.endif.endif:
""")
def test_if_else_likely(self):
def check(likely):
block = self.block(name='one')
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
with builder.if_else(z, likely=likely) as (then, otherwise):
with then:
builder.branch(block)
with otherwise:
builder.ret_void()
self.check_func_body(builder.function, """\
one:
br i1 0, label %"one.if", label %"one.else", !prof !0
one.if:
br label %"one"
one.else:
ret void
one.endif:
""")
return builder
builder = check(True)
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 99, i32 1 }
""")
builder = check(False)
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 1, i32 99 }
""")
def test_positioning(self):
"""
Test IRBuilder.position_{before,after,at_start,at_end}.
"""
func = self.function()
builder = ir.IRBuilder()
z = ir.Constant(int32, 0)
bb_one = func.append_basic_block(name='one')
bb_two = func.append_basic_block(name='two')
bb_three = func.append_basic_block(name='three')
# .at_start(empty block)
builder.position_at_start(bb_one)
builder.add(z, z, 'a')
# .at_end(empty block)
builder.position_at_end(bb_two)
builder.add(z, z, 'm')
builder.add(z, z, 'n')
# .at_start(block)
builder.position_at_start(bb_two)
o = builder.add(z, z, 'o')
builder.add(z, z, 'p')
# .at_end(block)
builder.position_at_end(bb_one)
b = builder.add(z, z, 'b')
# .after(instr)
builder.position_after(o)
builder.add(z, z, 'q')
# .before(instr)
builder.position_before(b)
builder.add(z, z, 'c')
self.check_block(bb_one, """\
one:
%"a" = add i32 0, 0
%"c" = add i32 0, 0
%"b" = add i32 0, 0
""")
self.check_block(bb_two, """\
two:
%"o" = add i32 0, 0
%"q" = add i32 0, 0
%"p" = add i32 0, 0
%"m" = add i32 0, 0
%"n" = add i32 0, 0
""")
self.check_block(bb_three, """\
three:
""")
def test_instruction_removal(self):
func = self.function()
builder = ir.IRBuilder()
blk = func.append_basic_block(name='entry')
builder.position_at_end(blk)
k = ir.Constant(int32, 1234)
a = builder.add(k, k, 'a')
retvoid = builder.ret_void()
self.assertTrue(blk.is_terminated)
builder.remove(retvoid)
self.assertFalse(blk.is_terminated)
b = builder.mul(a, a, 'b')
c = builder.add(b, b, 'c')
builder.remove(c)
builder.ret_void()
self.assertTrue(blk.is_terminated)
self.check_block(blk, """\
entry:
%"a" = add i32 1234, 1234
%"b" = mul i32 %"a", %"a"
ret void
""")
def test_metadata(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
builder.debug_metadata = builder.module.add_metadata([])
builder.alloca(ir.PointerType(int32), name='c')
self.check_block(block, """\
my_block:
%"c" = alloca i32*, !dbg !0
""")
class TestTypes(TestBase):
def has_logical_equality(self, ty):
while isinstance(ty, ir.PointerType):
ty = ty.pointee
return not isinstance(ty, ir.LabelType)
def assorted_types(self):
"""
A bunch of mutually unequal types
"""
# Avoid polluting the namespace
context = ir.Context()
types = [
ir.LabelType(), ir.VoidType(),
ir.FunctionType(int1, (int8, int8)), ir.FunctionType(int1, (int8,)),
ir.FunctionType(int1, (int8,), var_arg=True),
ir.FunctionType(int8, (int8,)),
int1, int8, int32, flt, dbl,
ir.ArrayType(flt, 5), ir.ArrayType(dbl, 5), ir.ArrayType(dbl, 4),
ir.LiteralStructType((int1, int8)), ir.LiteralStructType((int8,
int1)),
context.get_identified_type("MyType1"),
context.get_identified_type("MyType2"),
]
types += [ir.PointerType(tp) for tp in types
if not isinstance(tp, (ir.VoidType, ir.LabelType))]
return types
def test_pickling(self):
types = self.assorted_types()
for ty in types:
newty = self.assert_pickle_correctly(ty)
if self.has_logical_equality(ty):
self.assertEqual(newty, ty)
def test_comparisons(self):
types = self.assorted_types()
for a, b in itertools.product(types, types):
if a is not b:
self.assertFalse(a == b, (a, b))
self.assertTrue(a != b, (a, b))
# We assume copy.copy() works fine here...
for tp in types:
other = copy.copy(tp)
if self.has_logical_equality(tp):
self.assertTrue(tp == other, (tp, other))
self.assertFalse(tp != other, (tp, other))
else:
self.assertFalse(tp == other, (tp, other))
self.assertTrue(tp != other, (tp, other))
def test_str(self):
"""
Test the string representation of types.
"""
self.assertEqual(str(int1), 'i1')
self.assertEqual(str(ir.IntType(29)), 'i29')
self.assertEqual(str(flt), 'float')
self.assertEqual(str(dbl), 'double')
self.assertEqual(str(ir.VoidType()), 'void')
self.assertEqual(str(ir.FunctionType(int1, ())), 'i1 ()')
self.assertEqual(str(ir.FunctionType(int1, (flt,))), 'i1 (float)')
self.assertEqual(str(ir.FunctionType(int1, (flt, dbl))),
'i1 (float, double)')
self.assertEqual(str(ir.FunctionType(int1, (), var_arg=True)),
'i1 (...)')
self.assertEqual(str(ir.FunctionType(int1, (flt,), var_arg=True)),
'i1 (float, ...)')
self.assertEqual(str(ir.FunctionType(int1, (flt, dbl), var_arg=True)),
'i1 (float, double, ...)')
self.assertEqual(str(ir.PointerType(int32)), 'i32*')
self.assertEqual(str(ir.PointerType(ir.PointerType(int32))), 'i32**')
self.assertEqual(str(ir.ArrayType(int1, 5)), '[5 x i1]')
self.assertEqual(str(ir.ArrayType(ir.PointerType(int1), 5)),
'[5 x i1*]')
self.assertEqual(str(ir.PointerType(ir.ArrayType(int1, 5))),
'[5 x i1]*')
self.assertEqual(str(ir.LiteralStructType((int1,))), '{i1}')
self.assertEqual(str(ir.LiteralStructType((int1, flt))), '{i1, float}')
self.assertEqual(str(ir.LiteralStructType((
ir.PointerType(int1), ir.LiteralStructType((int32, int8))))),
'{i1*, {i32, i8}}')
self.assertEqual(str(ir.LiteralStructType((int1,), packed=True)),
'<{i1}>')
self.assertEqual(str(ir.LiteralStructType((int1, flt), packed=True)),
'<{i1, float}>')
# Avoid polluting the namespace
context = ir.Context()
mytype = context.get_identified_type("MyType")
self.assertEqual(str(mytype), "%\"MyType\"")
mytype1 = context.get_identified_type("MyType\\")
self.assertEqual(str(mytype1), "%\"MyType\\5c\"")
mytype2 = context.get_identified_type("MyType\"")
self.assertEqual(str(mytype2), "%\"MyType\\22\"")
def test_hash(self):
for typ in filter(self.has_logical_equality, self.assorted_types()):
self.assertEqual(hash(typ), hash(copy.copy(typ)))
def test_gep(self):
def check_constant(tp, i, expected):
actual = tp.gep(ir.Constant(int32, i))
self.assertEqual(actual, expected)
def check_index_type(tp):
index = ir.Constant(dbl, 1.0)
with self.assertRaises(TypeError):
tp.gep(index)
tp = ir.PointerType(dbl)
for i in range(5):
check_constant(tp, i, dbl)
check_index_type(tp)
tp = ir.ArrayType(int1, 3)
for i in range(3):
check_constant(tp, i, int1)
check_index_type(tp)
tp = ir.LiteralStructType((dbl, ir.LiteralStructType((int1, int8))))
check_constant(tp, 0, dbl)
check_constant(tp, 1, ir.LiteralStructType((int1, int8)))
with self.assertRaises(IndexError):
tp.gep(ir.Constant(int32, 2))
check_index_type(tp)
context = ir.Context()
tp = ir.IdentifiedStructType(context, "MyType")
tp.set_body(dbl, ir.LiteralStructType((int1, int8)))
check_constant(tp, 0, dbl)
check_constant(tp, 1, ir.LiteralStructType((int1, int8)))
with self.assertRaises(IndexError):
tp.gep(ir.Constant(int32, 2))
check_index_type(tp)
def test_abi_size(self):
td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
def check(tp, expected):
self.assertEqual(tp.get_abi_size(td), expected)
check(int8, 1)
check(int32, 4)
check(int64, 8)
check(ir.ArrayType(int8, 5), 5)
check(ir.ArrayType(int32, 5), 20)
check(ir.LiteralStructType((dbl, flt, flt)), 16)
def test_abi_alignment(self):
td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
def check(tp, expected):
self.assertIn(tp.get_abi_alignment(td), expected)
check(int8, (1, 2, 4))
check(int32, (4,))
check(int64, (8,))
check(ir.ArrayType(int8, 5), (1, 2, 4))
check(ir.ArrayType(int32, 5), (4,))
check(ir.LiteralStructType((dbl, flt, flt)), (8,))
def test_identified_struct(self):
context = ir.Context()
mytype = context.get_identified_type("MyType")
module = ir.Module(context=context)
self.assertTrue(mytype.is_opaque)
self.assert_valid_ir(module)
oldstr = str(module)
mytype.set_body(ir.IntType(32), ir.IntType(64), ir.FloatType())
self.assertFalse(mytype.is_opaque)
self.assert_valid_ir(module)
self.assertNotEqual(oldstr, str(module))
def test_target_data_non_default_context(self):
context = ir.Context()
mytype = context.get_identified_type("MyType")
mytype.elements = [ir.IntType(32)]
td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
self.assertEqual(mytype.get_abi_size(td, context=context), 4)
def test_vector(self):
vecty = ir.VectorType(ir.IntType(32), 8)
self.assertEqual(str(vecty), "<8 x i32>")
def c32(i):
return ir.Constant(int32, i)
class TestConstant(TestBase):
def test_integers(self):
c = ir.Constant(int32, 42)
self.assertEqual(str(c), 'i32 42')
c = ir.Constant(int1, 1)
self.assertEqual(str(c), 'i1 1')
c = ir.Constant(int1, 0)
self.assertEqual(str(c), 'i1 0')
c = ir.Constant(int1, True)
self.assertEqual(str(c), 'i1 true')
c = ir.Constant(int1, False)
self.assertEqual(str(c), 'i1 false')
c = ir.Constant(int1, ir.Undefined)
self.assertEqual(str(c), 'i1 undef')
c = ir.Constant(int1, None)
self.assertEqual(str(c), 'i1 0')
def test_reals(self):
# XXX Test NaNs and infs
c = ir.Constant(flt, 1.5)
self.assertEqual(str(c), 'float 0x3ff8000000000000')
c = ir.Constant(flt, -1.5)
self.assertEqual(str(c), 'float 0xbff8000000000000')
c = ir.Constant(dbl, 1.5)
self.assertEqual(str(c), 'double 0x3ff8000000000000')
c = ir.Constant(dbl, -1.5)
self.assertEqual(str(c), 'double 0xbff8000000000000')
c = ir.Constant(dbl, ir.Undefined)
self.assertEqual(str(c), 'double undef')
c = ir.Constant(dbl, None)
self.assertEqual(str(c), 'double 0.0')
def test_arrays(self):
c = ir.Constant(ir.ArrayType(int32, 3), (c32(5), c32(6), c32(4)))
self.assertEqual(str(c), '[3 x i32] [i32 5, i32 6, i32 4]')
c = ir.Constant(ir.ArrayType(int32, 2), (c32(5), c32(ir.Undefined)))
self.assertEqual(str(c), '[2 x i32] [i32 5, i32 undef]')
c = ir.Constant.literal_array((c32(5), c32(6), c32(ir.Undefined)))
self.assertEqual(str(c), '[3 x i32] [i32 5, i32 6, i32 undef]')
with self.assertRaises(TypeError) as raises:
ir.Constant.literal_array((c32(5), ir.Constant(flt, 1.5)))
self.assertEqual(str(raises.exception),
"all elements must have the same type")
c = ir.Constant(ir.ArrayType(int32, 2), ir.Undefined)
self.assertEqual(str(c), '[2 x i32] undef')
c = ir.Constant(ir.ArrayType(int32, 2), None)
self.assertEqual(str(c), '[2 x i32] zeroinitializer')
# Raw array syntax
c = ir.Constant(ir.ArrayType(int8, 11), bytearray(b"foobar_123\x80"))
self.assertEqual(str(c), r'[11 x i8] c"foobar_123\80"')
c = ir.Constant(ir.ArrayType(int8, 4), bytearray(b"\x00\x01\x04\xff"))
self.assertEqual(str(c), r'[4 x i8] c"\00\01\04\ff"')
# Recursive instantiation of inner constants
c = ir.Constant(ir.ArrayType(int32, 3), (5, ir.Undefined, 6))
self.assertEqual(str(c), '[3 x i32] [i32 5, i32 undef, i32 6]')
# Invalid number of args
with self.assertRaises(ValueError):
ir.Constant(ir.ArrayType(int32, 3), (5, 6))
def test_vector(self):
vecty = ir.VectorType(ir.IntType(32), 8)
vals = [1, 2, 4, 3, 8, 6, 9, 7]
vec = ir.Constant(vecty, vals)
vec_repr = "<8 x i32> <{}>".format(
', '.join(map('i32 {}'.format, vals)))
self.assertEqual(str(vec), vec_repr)
def test_structs(self):
st1 = ir.LiteralStructType((flt, int1))
st2 = ir.LiteralStructType((int32, st1))
c = ir.Constant(st1, (ir.Constant(ir.FloatType(), 1.5),
ir.Constant(int1, True)))
self.assertEqual(str(c),
'{float, i1} {float 0x3ff8000000000000, i1 true}')
c = ir.Constant.literal_struct((ir.Constant(ir.FloatType(), 1.5),
ir.Constant(int1, True)))
self.assertEqual(c.type, st1)
self.assertEqual(str(c),
'{float, i1} {float 0x3ff8000000000000, i1 true}')
c = ir.Constant.literal_struct((ir.Constant(ir.FloatType(), 1.5),
ir.Constant(int1, ir.Undefined)))
self.assertEqual(c.type, st1)
self.assertEqual(str(c),
'{float, i1} {float 0x3ff8000000000000, i1 undef}')
c = ir.Constant(st1, ir.Undefined)
self.assertEqual(str(c), '{float, i1} undef')
c = ir.Constant(st1, None)
self.assertEqual(str(c), '{float, i1} zeroinitializer')
# Recursive instantiation of inner constants
c1 = ir.Constant(st1, (1.5, True))
self.assertEqual(str(c1),
'{float, i1} {float 0x3ff8000000000000, i1 true}')
c2 = ir.Constant(st2, (42, c1))
self.assertEqual(str(c2), ('{i32, {float, i1}} {i32 42, {float, i1} '
'{float 0x3ff8000000000000, i1 true}}'))
c3 = ir.Constant(st2, (42, (1.5, True)))
self.assertEqual(str(c3), str(c2))
# Invalid number of args
with self.assertRaises(ValueError):
ir.Constant(st2, (4, 5, 6))
def test_undefined_literal_struct_pickling(self):
i8 = ir.IntType(8)
st = ir.Constant(ir.LiteralStructType([i8, i8]), ir.Undefined)
self.assert_pickle_correctly(st)
def test_type_instantiaton(self):
"""
Instantiating a type should create a constant.
"""
c = int8(42)
self.assertIsInstance(c, ir.Constant)
self.assertEqual(str(c), 'i8 42')
c = int1(True)
self.assertIsInstance(c, ir.Constant)
self.assertEqual(str(c), 'i1 true')
# Arrays
at = ir.ArrayType(int32, 3)
c = at([c32(4), c32(5), c32(6)])
self.assertEqual(str(c), '[3 x i32] [i32 4, i32 5, i32 6]')
c = at([4, 5, 6])
self.assertEqual(str(c), '[3 x i32] [i32 4, i32 5, i32 6]')
c = at(None)
self.assertEqual(str(c), '[3 x i32] zeroinitializer')
with self.assertRaises(ValueError):
at([4, 5, 6, 7])
# Structs
st1 = ir.LiteralStructType((flt, int1))
st2 = ir.LiteralStructType((int32, st1))
c = st1((1.5, True))
self.assertEqual(str(c), ('{float, i1} {float 0x3ff8000000000000, i1 '
'true}'))
c = st2((42, (1.5, True)))
self.assertEqual(str(c), ('{i32, {float, i1}} {i32 42, {float, i1} '
'{float 0x3ff8000000000000, i1 true}}'))
def test_repr(self):
"""
Constants should have a useful repr().
"""
c = int32(42)
self.assertEqual(repr(c), "<ir.Constant type='i32' value=42>")
def test_encoding_problem(self):
c = ir.Constant(ir.ArrayType(ir.IntType(8), 256),
bytearray(range(256)))
m = self.module()
gv = ir.GlobalVariable(m, c.type, "myconstant")
gv.global_constant = True
gv.initializer = c
# With utf-8, the following will cause:
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe0 in position
# 136: invalid continuation byte
parsed = llvm.parse_assembly(str(m))
# Make sure the encoding does not modify the IR
reparsed = llvm.parse_assembly(str(parsed))
self.assertEqual(str(parsed), str(reparsed))
def test_gep(self):
m = self.module()
tp = ir.LiteralStructType((flt, int1))
gv = ir.GlobalVariable(m, tp, "myconstant")
c = gv.gep([ir.Constant(int32, x) for x in (0, 1)])
self.assertEqual(str(c),
'getelementptr ({float, i1}, {float, i1}* @"myconstant", i32 0, i32 1)') # noqa E501
self.assertEqual(c.type, ir.PointerType(int1))
const = ir.Constant(tp, None)
with self.assertRaises(TypeError):
const.gep([ir.Constant(int32, 0)])
const_ptr = ir.Constant(tp.as_pointer(), None)
c2 = const_ptr.gep([ir.Constant(int32, 0)])
self.assertEqual(str(c2),
'getelementptr ({float, i1}, {float, i1}* null, i32 0)') # noqa E501
self.assertEqual(c.type, ir.PointerType(int1))
def test_gep_addrspace_globalvar(self):
m = self.module()
tp = ir.LiteralStructType((flt, int1))
addrspace = 4
gv = ir.GlobalVariable(m, tp, "myconstant", addrspace=addrspace)
self.assertEqual(gv.addrspace, addrspace)
c = gv.gep([ir.Constant(int32, x) for x in (0, 1)])
self.assertEqual(c.type.addrspace, addrspace)
self.assertEqual(str(c),
('getelementptr ({float, i1}, {float, i1} '
'addrspace(4)* @"myconstant", i32 0, i32 1)'))
self.assertEqual(c.type, ir.PointerType(int1, addrspace=addrspace))
def test_bitcast(self):
m = self.module()
gv = ir.GlobalVariable(m, int32, "myconstant")
c = gv.bitcast(int64.as_pointer())
self.assertEqual(str(c), 'bitcast (i32* @"myconstant" to i64*)')
def test_inttoptr(self):
c = ir.Constant(int32, 0).inttoptr(int64.as_pointer())
self.assertEqual(str(c), 'inttoptr (i32 0 to i64*)')
class TestTransforms(TestBase):
def test_call_transform(self):
mod = ir.Module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), ()), "foo")
bar = ir.Function(mod, ir.FunctionType(ir.VoidType(), ()), "bar")
builder = ir.IRBuilder()
builder.position_at_end(foo.append_basic_block())
call = builder.call(foo, ())
self.assertEqual(call.callee, foo)
modified = ir.replace_all_calls(mod, foo, bar)
self.assertIn(call, modified)
self.assertNotEqual(call.callee, foo)
self.assertEqual(call.callee, bar)
class TestSingleton(TestBase):
def test_undefined(self):
self.assertIs(ir.Undefined, ir.values._Undefined())
self.assertIs(ir.Undefined, copy.copy(ir.Undefined))
self.assertIs(ir.Undefined, copy.deepcopy(ir.Undefined))
self.assert_pickle_correctly(ir.Undefined)
if __name__ == '__main__':
unittest.main()
| 38.729681 | 193 | 0.531263 |
import copy
import itertools
import pickle
import re
import textwrap
import unittest
from . import TestCase
from llvmlite import ir
from llvmlite import binding as llvm
int1 = ir.IntType(1)
int8 = ir.IntType(8)
int16 = ir.IntType(16)
int32 = ir.IntType(32)
int64 = ir.IntType(64)
hlf = ir.HalfType()
flt = ir.FloatType()
dbl = ir.DoubleType()
class TestBase(TestCase):
def assertInText(self, pattern, text):
def escape(c):
if not c.isalnum() and not c.isspace():
return '\\' + c
return c
pattern = ''.join(map(escape, pattern))
regex = re.sub(r'\s+', r'\\s*', pattern)
self.assertRegex(text, regex)
def assert_ir_line(self, line, mod):
lines = [line.strip() for line in str(mod).splitlines()]
self.assertIn(line, lines)
def assert_valid_ir(self, mod):
llvm.parse_assembly(str(mod))
def assert_pickle_correctly(self, irobject):
newobject = pickle.loads(pickle.dumps(irobject, protocol=-1))
self.assertIs(irobject.__class__, newobject.__class__)
self.assertEqual(str(irobject), str(newobject))
return newobject
def module(self):
return ir.Module()
def function(self, module=None, name='my_func'):
module = module or self.module()
fnty = ir.FunctionType(int32, (int32, int32, dbl,
ir.PointerType(int32)))
return ir.Function(self.module(), fnty, name)
def block(self, func=None, name=''):
func = func or self.function()
return func.append_basic_block(name)
def descr(self, thing):
buf = []
thing.descr(buf)
return "".join(buf)
def _normalize_asm(self, asm):
asm = textwrap.dedent(asm)
asm = asm.replace("\n ", "\n ")
return asm
def check_descr(self, descr, asm):
expected = self._normalize_asm(asm)
self.assertEqual(descr, expected)
def check_block(self, block, asm):
self.check_descr(self.descr(block), asm)
def check_module_body(self, module, asm):
expected = self._normalize_asm(asm)
actual = module._stringify_body()
self.assertEqual(actual.strip(), expected.strip())
def check_metadata(self, module, asm):
expected = self._normalize_asm(asm)
actual = module._stringify_metadata()
self.assertEqual(actual.strip(), expected.strip())
def check_func_body(self, func, asm):
expected = self._normalize_asm(asm)
actual = self.descr(func)
actual = actual.partition('{')[2].rpartition('}')[0]
self.assertEqual(actual.strip(), expected.strip())
class TestFunction(TestBase):
proto = """i32 @"my_func"(i32 %".1", i32 %".2", double %".3", i32* %".4")"""
def test_declare(self):
func = self.function()
asm = self.descr(func).strip()
self.assertEqual(asm.strip(), "declare %s" % self.proto)
def test_declare_attributes(self):
func = self.function()
func.attributes.add("optsize")
func.attributes.add("alwaysinline")
func.attributes.alignstack = 16
tp_pers = ir.FunctionType(int8, (), var_arg=True)
pers = ir.Function(self.module(), tp_pers, '__gxx_personality_v0')
func.attributes.personality = pers
asm = self.descr(func).strip()
self.assertEqual(asm,
("declare %s alwaysinline optsize alignstack(16) "
"personality i8 (...)* @\"__gxx_personality_v0\"") %
self.proto)
self.assert_pickle_correctly(func)
def test_function_attributes(self):
func = self.function()
func.args[0].add_attribute("zeroext")
func.args[1].attributes.dereferenceable = 5
func.args[1].attributes.dereferenceable_or_null = 10
func.args[3].attributes.align = 4
func.args[3].add_attribute("nonnull")
func.return_value.add_attribute("noalias")
asm = self.descr(func).strip()
self.assertEqual(asm,
"""declare noalias i32 @"my_func"(i32 zeroext %".1", i32 dereferenceable(5) dereferenceable_or_null(10) %".2", double %".3", i32* nonnull align 4 %".4")"""
)
self.assert_pickle_correctly(func)
def test_function_metadata(self):
module = self.module()
func = self.function(module)
func.set_metadata('dbg', module.add_metadata([]))
asm = self.descr(func).strip()
self.assertEqual(asm,
"""declare i32 @"my_func"(i32 %".1", i32 %".2", double %".3", i32* %".4") !dbg !0"""
)
self.assert_pickle_correctly(func)
def test_define(self):
func = self.function()
func.attributes.add("alwaysinline")
block = func.append_basic_block('my_block')
builder = ir.IRBuilder(block)
builder.ret_void()
asm = self.descr(func)
self.check_descr(asm, """\
define {proto} alwaysinline
{{
my_block:
ret void
}}
""".format(proto=self.proto))
def test_declare_intrinsics(self):
module = self.module()
pint8 = int8.as_pointer()
powi = module.declare_intrinsic('llvm.powi', [dbl])
memset = module.declare_intrinsic('llvm.memset', [pint8, int32])
memcpy = module.declare_intrinsic('llvm.memcpy', [pint8, pint8, int32])
assume = module.declare_intrinsic('llvm.assume')
self.check_descr(self.descr(powi).strip(), """\
declare double @"llvm.powi.f64"(double %".1", i32 %".2")""")
self.check_descr(self.descr(memset).strip(), """\
declare void @"llvm.memset.p0i8.i32"(i8* %".1", i8 %".2", i32 %".3", i1 %".4")""")
self.check_descr(self.descr(memcpy).strip(), """\
declare void @"llvm.memcpy.p0i8.p0i8.i32"(i8* %".1", i8* %".2", i32 %".3", i1 %".4")""")
self.check_descr(self.descr(assume).strip(), """\
declare void @"llvm.assume"(i1 %".1")""")
def test_redeclare_intrinsic(self):
module = self.module()
powi = module.declare_intrinsic('llvm.powi', [dbl])
powi2 = module.declare_intrinsic('llvm.powi', [dbl])
self.assertIs(powi, powi2)
def test_pickling(self):
fn = self.function()
self.assert_pickle_correctly(fn)
class TestIR(TestBase):
def test_unnamed_metadata(self):
mod = self.module()
mod.add_metadata([int32(123), int8(42)])
self.assert_ir_line("!0 = !{ i32 123, i8 42 }", mod)
self.assert_valid_ir(mod)
def test_unnamed_metadata_2(self):
mod = self.module()
m0 = mod.add_metadata([int32(123), "kernel"])
m1 = mod.add_metadata([int64(456), m0])
m2 = mod.add_metadata([int64(456), m0])
self.assertIs(m2, m1)
mod.add_metadata([m0, m1, m2])
self.assert_ir_line('!0 = !{ i32 123, !"kernel" }', mod)
self.assert_ir_line('!1 = !{ i64 456, !0 }', mod)
self.assert_ir_line('!2 = !{ !0, !1, !1 }', mod)
def test_unnamed_metadata_3(self):
mod = self.module()
mod.add_metadata([int32(123), [int32(456)], [int32(789)], [int32(456)]])
self.assert_ir_line('!0 = !{ i32 456 }', mod)
self.assert_ir_line('!1 = !{ i32 789 }', mod)
self.assert_ir_line('!2 = !{ i32 123, !0, !1, !0 }', mod)
def test_metadata_string(self):
mod = self.module()
mod.add_metadata(["\"\\$"])
self.assert_ir_line('!0 = !{ !"\\22\\5c$" }', mod)
def test_named_metadata(self):
# Add a named metadata node and add metadata values to it
mod = self.module()
m0 = mod.add_metadata([int32(123)])
m1 = mod.add_metadata([int64(456)])
nmd = mod.add_named_metadata("foo")
nmd.add(m0)
nmd.add(m1)
nmd.add(m0)
self.assert_ir_line("!foo = !{ !0, !1, !0 }", mod)
self.assert_valid_ir(mod)
# Check get_named_metadata()
self.assertIs(nmd, mod.get_named_metadata("foo"))
with self.assertRaises(KeyError):
mod.get_named_metadata("bar")
def test_named_metadata_2(self):
# Add and set named metadata through a single add_named_metadata() call
mod = self.module()
m0 = mod.add_metadata([int32(123)])
mod.add_named_metadata("foo", m0)
mod.add_named_metadata("foo", [int64(456)])
mod.add_named_metadata("foo", ["kernel"])
mod.add_named_metadata("bar", [])
self.assert_ir_line("!foo = !{ !0, !1, !2 }", mod)
self.assert_ir_line("!0 = !{ i32 123 }", mod)
self.assert_ir_line("!1 = !{ i64 456 }", mod)
self.assert_ir_line('!2 = !{ !"kernel" }', mod)
self.assert_ir_line("!bar = !{ !3 }", mod)
self.assert_ir_line('!3 = !{ }', mod)
self.assert_valid_ir(mod)
def test_metadata_null(self):
# A null metadata (typed) value
mod = self.module()
mod.add_metadata([int32.as_pointer()(None)])
self.assert_ir_line("!0 = !{ i32* null }", mod)
self.assert_valid_ir(mod)
# A null metadata (untyped) value
mod = self.module()
mod.add_metadata([None, int32(123)])
self.assert_ir_line("!0 = !{ null, i32 123 }", mod)
self.assert_valid_ir(mod)
def test_debug_info(self):
# Add real world-looking debug information to a module
# (with various value types)
mod = self.module()
di_file = mod.add_debug_info("DIFile", {
"filename": "foo",
"directory": "bar",
})
di_func_type = mod.add_debug_info("DISubroutineType", {
# None as `null`
"types": mod.add_metadata([None]),
})
di_compileunit = mod.add_debug_info("DICompileUnit", {
"language": ir.DIToken("DW_LANG_Python"),
"file": di_file,
"producer": "ARTIQ",
"runtimeVersion": 0,
"isOptimized": True,
}, is_distinct=True)
mod.add_debug_info("DISubprogram", {
"name": "my_func",
"file": di_file,
"line": 11,
"type": di_func_type,
"isLocal": False,
"unit": di_compileunit,
}, is_distinct=True)
# Check output
strmod = str(mod)
self.assert_ir_line('!0 = !DIFile(directory: "bar", filename: "foo")',
strmod)
self.assert_ir_line('!1 = !{ null }', strmod)
self.assert_ir_line('!2 = !DISubroutineType(types: !1)', strmod)
# self.assert_ir_line('!4 = !{ !3 }', strmod)
self.assert_ir_line('!3 = distinct !DICompileUnit(file: !0, '
'isOptimized: true, language: DW_LANG_Python, '
'producer: "ARTIQ", runtimeVersion: 0)',
strmod)
self.assert_ir_line('!4 = distinct !DISubprogram(file: !0, isLocal: '
'false, line: 11, name: "my_func", type: !2, unit: '
'!3)',
strmod)
self.assert_valid_ir(mod)
def test_debug_info_2(self):
# Identical debug info nodes should be merged
mod = self.module()
di1 = mod.add_debug_info("DIFile",
{"filename": "foo",
"directory": "bar",
})
di2 = mod.add_debug_info("DIFile",
{"filename": "foo",
"directory": "bar",
})
di3 = mod.add_debug_info("DIFile",
{"filename": "bar",
"directory": "foo",
})
di4 = mod.add_debug_info("DIFile",
{"filename": "foo",
"directory": "bar",
}, is_distinct=True)
self.assertIs(di1, di2)
self.assertEqual(len({di1, di2, di3, di4}), 3)
# Check output
strmod = str(mod)
self.assert_ir_line('!0 = !DIFile(directory: "bar", filename: "foo")',
strmod)
self.assert_ir_line('!1 = !DIFile(directory: "foo", filename: "bar")',
strmod)
self.assert_ir_line('!2 = distinct !DIFile(directory: "bar", filename: '
'"foo")', strmod)
self.assert_valid_ir(mod)
def test_inline_assembly(self):
mod = self.module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo')
builder = ir.IRBuilder(foo.append_basic_block(''))
asmty = ir.FunctionType(int32, [int32])
asm = ir.InlineAsm(asmty, "mov $1, $2", "=r,r", side_effect=True)
builder.call(asm, [int32(123)])
builder.ret_void()
pat = 'call i32 asm sideeffect "mov $1, $2", "=r,r" ( i32 123 )'
self.assertInText(pat, str(mod))
self.assert_valid_ir(mod)
def test_builder_asm(self):
mod = self.module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo')
builder = ir.IRBuilder(foo.append_basic_block(''))
asmty = ir.FunctionType(int32, [int32])
builder.asm(asmty, "mov $1, $2", "=r,r", [int32(123)], side_effect=True)
builder.ret_void()
pat = 'call i32 asm sideeffect "mov $1, $2", "=r,r" ( i32 123 )'
self.assertInText(pat, str(mod))
self.assert_valid_ir(mod)
def test_builder_load_reg(self):
mod = self.module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo')
builder = ir.IRBuilder(foo.append_basic_block(''))
builder.load_reg(ir.IntType(64), "rax")
builder.ret_void()
pat = 'call i64 asm "", "={rax}"'
self.assertInText(pat, str(mod))
self.assert_valid_ir(mod)
def test_builder_store_reg(self):
mod = self.module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo')
builder = ir.IRBuilder(foo.append_basic_block(''))
builder.store_reg(int64(123), ir.IntType(64), "rax")
builder.ret_void()
pat = 'call void asm sideeffect "", "{rax}" ( i64 123 )'
self.assertInText(pat, str(mod))
self.assert_valid_ir(mod)
class TestGlobalValues(TestBase):
def test_globals_access(self):
mod = self.module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'foo')
ir.Function(mod, ir.FunctionType(ir.VoidType(), []), 'bar')
globdouble = ir.GlobalVariable(mod, ir.DoubleType(), 'globdouble')
self.assertEqual(mod.get_global('foo'), foo)
self.assertEqual(mod.get_global('globdouble'), globdouble)
with self.assertRaises(KeyError):
mod.get_global('kkk')
# Globals should have a useful repr()
self.assertEqual(repr(globdouble),
"<ir.GlobalVariable 'globdouble' of type 'double*'>")
def test_functions_global_values_access(self):
mod = self.module()
fty = ir.FunctionType(ir.VoidType(), [])
foo = ir.Function(mod, fty, 'foo')
bar = ir.Function(mod, fty, 'bar')
globdouble = ir.GlobalVariable(mod, ir.DoubleType(), 'globdouble')
self.assertEqual(set(mod.functions), set((foo, bar)))
self.assertEqual(set(mod.global_values), set((foo, bar, globdouble)))
def test_global_variables_ir(self):
mod = self.module()
# the following have side effects and write to self.module()
a = ir.GlobalVariable(mod, int8, 'a') # noqa F841
b = ir.GlobalVariable(mod, int8, 'b', addrspace=42) # noqa F841
# Initialized global variable doesn't default to "external"
c = ir.GlobalVariable(mod, int32, 'c')
c.initializer = int32(123)
d = ir.GlobalVariable(mod, int32, 'd')
d.global_constant = True
# Non-external linkage implies default "undef" initializer
e = ir.GlobalVariable(mod, int32, 'e')
e.linkage = "internal"
f = ir.GlobalVariable(mod, int32, 'f', addrspace=456)
f.unnamed_addr = True
g = ir.GlobalVariable(mod, int32, 'g')
g.linkage = "internal"
g.initializer = int32(123)
g.align = 16
self.check_module_body(mod, """\
@"a" = external global i8
@"b" = external addrspace(42) global i8
@"c" = global i32 123
@"d" = external constant i32
@"e" = internal global i32 undef
@"f" = external unnamed_addr addrspace(456) global i32
@"g" = internal global i32 123, align 16
""")
def test_pickle(self):
mod = self.module()
self.assert_pickle_correctly(mod)
class TestBlock(TestBase):
def test_attributes(self):
func = self.function()
block = ir.Block(parent=func, name='start')
self.assertIs(block.parent, func)
self.assertFalse(block.is_terminated)
def test_descr(self):
block = self.block(name='my_block')
self.assertEqual(self.descr(block), "my_block:\n")
block.instructions.extend(['a', 'b'])
self.assertEqual(self.descr(block), "my_block:\n a\n b\n")
def test_replace(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.add(a, b, 'c')
d = builder.sub(a, b, 'd')
builder.mul(d, b, 'e')
f = ir.Instruction(block, a.type, 'sdiv', (c, b), 'f')
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"d" = sub i32 %".1", %".2"
%"e" = mul i32 %"d", %".2"
""")
block.replace(d, f)
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"f" = sdiv i32 %"c", %".2"
%"e" = mul i32 %"f", %".2"
""")
def test_repr(self):
func = self.function()
block = ir.Block(parent=func, name='start')
self.assertEqual(repr(block), "<ir.Block 'start' of type 'label'>")
class TestBuildInstructions(TestBase):
maxDiff = 4000
def test_simple(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
inst = builder.add(a, b, 'res')
self.check_block(block, """\
my_block:
%"res" = add i32 %".1", %".2"
""")
# Instructions should have a useful repr()
self.assertEqual(repr(inst),
"<ir.Instruction 'res' of type 'i32', opname 'add', "
"operands (<ir.Argument '.1' of type i32>, "
"<ir.Argument '.2' of type i32>)>")
def test_binops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b, ff = builder.function.args[:3]
builder.add(a, b, 'c')
builder.fadd(a, b, 'd')
builder.sub(a, b, 'e')
builder.fsub(a, b, 'f')
builder.mul(a, b, 'g')
builder.fmul(a, b, 'h')
builder.udiv(a, b, 'i')
builder.sdiv(a, b, 'j')
builder.fdiv(a, b, 'k')
builder.urem(a, b, 'l')
builder.srem(a, b, 'm')
builder.frem(a, b, 'n')
builder.or_(a, b, 'o')
builder.and_(a, b, 'p')
builder.xor(a, b, 'q')
builder.shl(a, b, 'r')
builder.ashr(a, b, 's')
builder.lshr(a, b, 't')
with self.assertRaises(ValueError) as cm:
builder.add(a, ff)
self.assertEqual(str(cm.exception),
"Operands must be the same type, got (i32, double)")
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"d" = fadd i32 %".1", %".2"
%"e" = sub i32 %".1", %".2"
%"f" = fsub i32 %".1", %".2"
%"g" = mul i32 %".1", %".2"
%"h" = fmul i32 %".1", %".2"
%"i" = udiv i32 %".1", %".2"
%"j" = sdiv i32 %".1", %".2"
%"k" = fdiv i32 %".1", %".2"
%"l" = urem i32 %".1", %".2"
%"m" = srem i32 %".1", %".2"
%"n" = frem i32 %".1", %".2"
%"o" = or i32 %".1", %".2"
%"p" = and i32 %".1", %".2"
%"q" = xor i32 %".1", %".2"
%"r" = shl i32 %".1", %".2"
%"s" = ashr i32 %".1", %".2"
%"t" = lshr i32 %".1", %".2"
""")
def test_binop_flags(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
# As tuple
builder.add(a, b, 'c', flags=('nuw',))
# and as list
builder.sub(a, b, 'd', flags=['nuw', 'nsw'])
self.check_block(block, """\
my_block:
%"c" = add nuw i32 %".1", %".2"
%"d" = sub nuw nsw i32 %".1", %".2"
""")
def test_binop_fastmath_flags(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
# As tuple
builder.fadd(a, b, 'c', flags=('fast',))
# and as list
builder.fsub(a, b, 'd', flags=['ninf', 'nsz'])
self.check_block(block, """\
my_block:
%"c" = fadd fast i32 %".1", %".2"
%"d" = fsub ninf nsz i32 %".1", %".2"
""")
def test_binops_with_overflow(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.sadd_with_overflow(a, b, 'c')
builder.smul_with_overflow(a, b, 'd')
builder.ssub_with_overflow(a, b, 'e')
builder.uadd_with_overflow(a, b, 'f')
builder.umul_with_overflow(a, b, 'g')
builder.usub_with_overflow(a, b, 'h')
self.check_block(block, """\
my_block:
%"c" = call {i32, i1} @"llvm.sadd.with.overflow.i32"(i32 %".1", i32 %".2")
%"d" = call {i32, i1} @"llvm.smul.with.overflow.i32"(i32 %".1", i32 %".2")
%"e" = call {i32, i1} @"llvm.ssub.with.overflow.i32"(i32 %".1", i32 %".2")
%"f" = call {i32, i1} @"llvm.uadd.with.overflow.i32"(i32 %".1", i32 %".2")
%"g" = call {i32, i1} @"llvm.umul.with.overflow.i32"(i32 %".1", i32 %".2")
%"h" = call {i32, i1} @"llvm.usub.with.overflow.i32"(i32 %".1", i32 %".2")
""")
def test_unary_ops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.neg(a, 'c')
builder.not_(b, 'd')
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = sub i32 0, %".1"
%"d" = xor i32 %".2", -1
""")
def test_replace_operand(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
undef1 = ir.Constant(ir.IntType(32), ir.Undefined)
undef2 = ir.Constant(ir.IntType(32), ir.Undefined)
c = builder.add(undef1, undef2, 'c')
self.check_block(block, """\
my_block:
%"c" = add i32 undef, undef
""")
c.replace_usage(undef1, a)
c.replace_usage(undef2, b)
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
""")
def test_integer_comparisons(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.icmp_unsigned('==', a, b, 'c')
builder.icmp_unsigned('!=', a, b, 'd')
builder.icmp_unsigned('<', a, b, 'e')
builder.icmp_unsigned('<=', a, b, 'f')
builder.icmp_unsigned('>', a, b, 'g')
builder.icmp_unsigned('>=', a, b, 'h')
builder.icmp_signed('==', a, b, 'i')
builder.icmp_signed('!=', a, b, 'j')
builder.icmp_signed('<', a, b, 'k')
builder.icmp_signed('<=', a, b, 'l')
builder.icmp_signed('>', a, b, 'm')
builder.icmp_signed('>=', a, b, 'n')
with self.assertRaises(ValueError):
builder.icmp_signed('uno', a, b, 'zz')
with self.assertRaises(ValueError):
builder.icmp_signed('foo', a, b, 'zz')
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = icmp eq i32 %".1", %".2"
%"d" = icmp ne i32 %".1", %".2"
%"e" = icmp ult i32 %".1", %".2"
%"f" = icmp ule i32 %".1", %".2"
%"g" = icmp ugt i32 %".1", %".2"
%"h" = icmp uge i32 %".1", %".2"
%"i" = icmp eq i32 %".1", %".2"
%"j" = icmp ne i32 %".1", %".2"
%"k" = icmp slt i32 %".1", %".2"
%"l" = icmp sle i32 %".1", %".2"
%"m" = icmp sgt i32 %".1", %".2"
%"n" = icmp sge i32 %".1", %".2"
""")
def test_float_comparisons(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.fcmp_ordered('==', a, b, 'c')
builder.fcmp_ordered('!=', a, b, 'd')
builder.fcmp_ordered('<', a, b, 'e')
builder.fcmp_ordered('<=', a, b, 'f')
builder.fcmp_ordered('>', a, b, 'g')
builder.fcmp_ordered('>=', a, b, 'h')
builder.fcmp_unordered('==', a, b, 'i')
builder.fcmp_unordered('!=', a, b, 'j')
builder.fcmp_unordered('<', a, b, 'k')
builder.fcmp_unordered('<=', a, b, 'l')
builder.fcmp_unordered('>', a, b, 'm')
builder.fcmp_unordered('>=', a, b, 'n')
# fcmp_ordered and fcmp_unordered are the same for these cases
builder.fcmp_ordered('ord', a, b, 'u')
builder.fcmp_ordered('uno', a, b, 'v')
builder.fcmp_unordered('ord', a, b, 'w')
builder.fcmp_unordered('uno', a, b, 'x')
builder.fcmp_unordered('olt', a, b, 'y',
flags=['nnan', 'ninf', 'nsz', 'arcp', 'fast'])
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = fcmp oeq i32 %".1", %".2"
%"d" = fcmp one i32 %".1", %".2"
%"e" = fcmp olt i32 %".1", %".2"
%"f" = fcmp ole i32 %".1", %".2"
%"g" = fcmp ogt i32 %".1", %".2"
%"h" = fcmp oge i32 %".1", %".2"
%"i" = fcmp ueq i32 %".1", %".2"
%"j" = fcmp une i32 %".1", %".2"
%"k" = fcmp ult i32 %".1", %".2"
%"l" = fcmp ule i32 %".1", %".2"
%"m" = fcmp ugt i32 %".1", %".2"
%"n" = fcmp uge i32 %".1", %".2"
%"u" = fcmp ord i32 %".1", %".2"
%"v" = fcmp uno i32 %".1", %".2"
%"w" = fcmp ord i32 %".1", %".2"
%"x" = fcmp uno i32 %".1", %".2"
%"y" = fcmp nnan ninf nsz arcp fast olt i32 %".1", %".2"
""")
def test_misc_ops(self):
block = self.block(name='my_block')
t = ir.Constant(int1, True)
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.select(t, a, b, 'c')
self.assertFalse(block.is_terminated)
builder.unreachable()
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = select i1 true, i32 %".1", i32 %".2"
unreachable
""")
def test_phi(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
bb2 = builder.function.append_basic_block('b2')
bb3 = builder.function.append_basic_block('b3')
phi = builder.phi(int32, 'my_phi')
phi.add_incoming(a, bb2)
phi.add_incoming(b, bb3)
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"my_phi" = phi i32 [%".1", %"b2"], [%".2", %"b3"]
""")
def test_mem_ops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b, z = builder.function.args[:3]
c = builder.alloca(int32, name='c')
d = builder.alloca(int32, size=42, name='d') # noqa F841
e = builder.alloca(dbl, size=a, name='e')
e.align = 8
self.assertEqual(e.type, ir.PointerType(dbl))
ee = builder.store(z, e)
self.assertEqual(ee.type, ir.VoidType())
f = builder.store(b, c)
self.assertEqual(f.type, ir.VoidType())
g = builder.load(c, 'g')
self.assertEqual(g.type, int32)
# With alignment
h = builder.store(b, c, align=1)
self.assertEqual(h.type, ir.VoidType())
i = builder.load(c, 'i', align=1)
self.assertEqual(i.type, int32)
# Atomics
j = builder.store_atomic(b, c, ordering="seq_cst", align=4)
self.assertEqual(j.type, ir.VoidType())
k = builder.load_atomic(c, ordering="seq_cst", align=4, name='k')
self.assertEqual(k.type, int32)
# Not pointer types
with self.assertRaises(TypeError):
builder.store(b, a)
with self.assertRaises(TypeError):
builder.load(b)
# Mismatching pointer type
with self.assertRaises(TypeError) as cm:
builder.store(b, e)
self.assertEqual(str(cm.exception),
"cannot store i32 to double*: mismatching types")
self.check_block(block, """\
my_block:
%"c" = alloca i32
%"d" = alloca i32, i32 42
%"e" = alloca double, i32 %".1", align 8
store double %".3", double* %"e"
store i32 %".2", i32* %"c"
%"g" = load i32, i32* %"c"
store i32 %".2", i32* %"c", align 1
%"i" = load i32, i32* %"c", align 1
store atomic i32 %".2", i32* %"c" seq_cst, align 4
%"k" = load atomic i32, i32* %"c" seq_cst, align 4
""")
def test_gep(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.alloca(ir.PointerType(int32), name='c')
d = builder.gep(c, [ir.Constant(int32, 5), a], name='d')
self.assertEqual(d.type, ir.PointerType(int32))
self.check_block(block, """\
my_block:
%"c" = alloca i32*
%"d" = getelementptr i32*, i32** %"c", i32 5, i32 %".1"
""")
# XXX test with more complex types
def test_gep_castinstr(self):
# similar to:
# numba::runtime::nrtdynmod.py_define_nrt_meminfo_data()
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
int8ptr = int8.as_pointer()
ls = ir.LiteralStructType([int64, int8ptr, int8ptr, int8ptr, int64])
d = builder.bitcast(a, ls.as_pointer(), name='d')
e = builder.gep(d, [ir.Constant(int32, x) for x in [0, 3]], name='e')
self.assertEqual(e.type, ir.PointerType(int8ptr))
self.check_block(block, """\
my_block:
%"d" = bitcast i32 %".1" to {i64, i8*, i8*, i8*, i64}*
%"e" = getelementptr {i64, i8*, i8*, i8*, i64}, {i64, i8*, i8*, i8*, i64}* %"d", i32 0, i32 3
""") # noqa E501
def test_gep_castinstr_addrspace(self):
# similar to:
# numba::runtime::nrtdynmod.py_define_nrt_meminfo_data()
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
addrspace = 4
int8ptr = int8.as_pointer()
ls = ir.LiteralStructType([int64, int8ptr, int8ptr, int8ptr, int64])
d = builder.bitcast(a, ls.as_pointer(addrspace=addrspace), name='d')
e = builder.gep(d, [ir.Constant(int32, x) for x in [0, 3]], name='e')
self.assertEqual(e.type.addrspace, addrspace)
self.assertEqual(e.type, ir.PointerType(int8ptr, addrspace=addrspace))
self.check_block(block, """\
my_block:
%"d" = bitcast i32 %".1" to {i64, i8*, i8*, i8*, i64} addrspace(4)*
%"e" = getelementptr {i64, i8*, i8*, i8*, i64}, {i64, i8*, i8*, i8*, i64} addrspace(4)* %"d", i32 0, i32 3
""") # noqa E501
def test_gep_addrspace(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
addrspace = 4
c = builder.alloca(ir.PointerType(int32, addrspace=addrspace), name='c')
self.assertEqual(str(c.type), 'i32 addrspace(4)**')
self.assertEqual(c.type.pointee.addrspace, addrspace)
d = builder.gep(c, [ir.Constant(int32, 5), a], name='d')
self.assertEqual(d.type.addrspace, addrspace)
e = builder.gep(d, [ir.Constant(int32, 10)], name='e')
self.assertEqual(e.type.addrspace, addrspace)
self.check_block(block, """\
my_block:
%"c" = alloca i32 addrspace(4)*
%"d" = getelementptr i32 addrspace(4)*, i32 addrspace(4)** %"c", i32 5, i32 %".1"
%"e" = getelementptr i32, i32 addrspace(4)* %"d", i32 10
""") # noqa E501
def test_extract_insert_value(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
tp_inner = ir.LiteralStructType([int32, int1])
tp_outer = ir.LiteralStructType([int8, tp_inner])
c_inner = ir.Constant(tp_inner, (ir.Constant(int32, 4),
ir.Constant(int1, True)))
# Flat structure
c = builder.extract_value(c_inner, 0, name='c') # noqa F841
d = builder.insert_value(c_inner, a, 0, name='d') # noqa F841
e = builder.insert_value(d, ir.Constant(int1, False), 1, name='e') # noqa F841 E501
self.assertEqual(d.type, tp_inner)
self.assertEqual(e.type, tp_inner)
# Nested structure
p_outer = builder.alloca(tp_outer, name='ptr')
j = builder.load(p_outer, name='j')
k = builder.extract_value(j, 0, name='k')
l = builder.extract_value(j, 1, name='l')
m = builder.extract_value(j, (1, 0), name='m')
n = builder.extract_value(j, (1, 1), name='n')
o = builder.insert_value(j, l, 1, name='o')
p = builder.insert_value(j, a, (1, 0), name='p')
self.assertEqual(k.type, int8)
self.assertEqual(l.type, tp_inner)
self.assertEqual(m.type, int32)
self.assertEqual(n.type, int1)
self.assertEqual(o.type, tp_outer)
self.assertEqual(p.type, tp_outer)
with self.assertRaises(TypeError):
# Not an aggregate
builder.extract_value(p_outer, 0)
with self.assertRaises(TypeError):
# Indexing too deep
builder.extract_value(c_inner, (0, 0))
with self.assertRaises(TypeError):
# Index out of structure bounds
builder.extract_value(c_inner, 5)
with self.assertRaises(TypeError):
# Not an aggregate
builder.insert_value(a, b, 0)
with self.assertRaises(TypeError):
# Replacement value has the wrong type
builder.insert_value(c_inner, a, 1)
self.check_block(block, """\
my_block:
%"c" = extractvalue {i32, i1} {i32 4, i1 true}, 0
%"d" = insertvalue {i32, i1} {i32 4, i1 true}, i32 %".1", 0
%"e" = insertvalue {i32, i1} %"d", i1 false, 1
%"ptr" = alloca {i8, {i32, i1}}
%"j" = load {i8, {i32, i1}}, {i8, {i32, i1}}* %"ptr"
%"k" = extractvalue {i8, {i32, i1}} %"j", 0
%"l" = extractvalue {i8, {i32, i1}} %"j", 1
%"m" = extractvalue {i8, {i32, i1}} %"j", 1, 0
%"n" = extractvalue {i8, {i32, i1}} %"j", 1, 1
%"o" = insertvalue {i8, {i32, i1}} %"j", {i32, i1} %"l", 1
%"p" = insertvalue {i8, {i32, i1}} %"j", i32 %".1", 1, 0
""")
def test_cast_ops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b, fa, ptr = builder.function.args[:4]
c = builder.trunc(a, int8, name='c')
d = builder.zext(c, int32, name='d') # noqa F841
e = builder.sext(c, int32, name='e') # noqa F841
fb = builder.fptrunc(fa, flt, 'fb')
fc = builder.fpext(fb, dbl, 'fc') # noqa F841
g = builder.fptoui(fa, int32, 'g')
h = builder.fptosi(fa, int8, 'h')
fd = builder.uitofp(g, flt, 'fd') # noqa F841
fe = builder.sitofp(h, dbl, 'fe') # noqa F841
i = builder.ptrtoint(ptr, int32, 'i')
j = builder.inttoptr(i, ir.PointerType(int8), 'j') # noqa F841
k = builder.bitcast(a, flt, "k") # noqa F841
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = trunc i32 %".1" to i8
%"d" = zext i8 %"c" to i32
%"e" = sext i8 %"c" to i32
%"fb" = fptrunc double %".3" to float
%"fc" = fpext float %"fb" to double
%"g" = fptoui double %".3" to i32
%"h" = fptosi double %".3" to i8
%"fd" = uitofp i32 %"g" to float
%"fe" = sitofp i8 %"h" to double
%"i" = ptrtoint i32* %".4" to i32
%"j" = inttoptr i32 %"i" to i8*
%"k" = bitcast i32 %".1" to float
""")
def test_atomicrmw(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.alloca(int32, name='c')
d = builder.atomic_rmw('add', c, a, 'monotonic', 'd')
self.assertEqual(d.type, int32)
self.check_block(block, """\
my_block:
%"c" = alloca i32
%"d" = atomicrmw add i32* %"c", i32 %".1" monotonic
""")
def test_branch(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_target = builder.function.append_basic_block(name='target')
builder.branch(bb_target)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
br label %"target"
""")
def test_cbranch(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_true = builder.function.append_basic_block(name='b_true')
bb_false = builder.function.append_basic_block(name='b_false')
builder.cbranch(ir.Constant(int1, False), bb_true, bb_false)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
br i1 false, label %"b_true", label %"b_false"
""")
def test_cbranch_weights(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_true = builder.function.append_basic_block(name='b_true')
bb_false = builder.function.append_basic_block(name='b_false')
br = builder.cbranch(ir.Constant(int1, False), bb_true, bb_false)
br.set_weights([5, 42])
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
br i1 false, label %"b_true", label %"b_false", !prof !0
""")
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 5, i32 42 }
""")
def test_branch_indirect(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_1 = builder.function.append_basic_block(name='b_1')
bb_2 = builder.function.append_basic_block(name='b_2')
indirectbr = builder.branch_indirect(
ir.BlockAddress(builder.function, bb_1))
indirectbr.add_destination(bb_1)
indirectbr.add_destination(bb_2)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
indirectbr i8* blockaddress(@"my_func", %"b_1"), [label %"b_1", label %"b_2"]
""") # noqa E501
def test_returns(self):
def check(block, expected_ir):
self.assertTrue(block.is_terminated)
self.check_block(block, expected_ir)
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
builder.ret_void()
check(block, """\
my_block:
ret void
""")
block = self.block(name='other_block')
builder = ir.IRBuilder(block)
builder.ret(int32(5))
check(block, """\
other_block:
ret i32 5
""")
# With metadata
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
inst = builder.ret_void()
inst.set_metadata("dbg", block.module.add_metadata(()))
check(block, """\
my_block:
ret void, !dbg !0
""")
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
inst = builder.ret(int32(6))
inst.set_metadata("dbg", block.module.add_metadata(()))
check(block, """\
my_block:
ret i32 6, !dbg !0
""")
def test_switch(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
bb_onzero = builder.function.append_basic_block(name='onzero')
bb_onone = builder.function.append_basic_block(name='onone')
bb_ontwo = builder.function.append_basic_block(name='ontwo')
bb_else = builder.function.append_basic_block(name='otherwise')
sw = builder.switch(a, bb_else)
sw.add_case(ir.Constant(int32, 0), bb_onzero)
sw.add_case(ir.Constant(int32, 1), bb_onone)
# A plain Python value gets converted into the right IR constant
sw.add_case(2, bb_ontwo)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
switch i32 %".1", label %"otherwise" [i32 0, label %"onzero" i32 1, label %"onone" i32 2, label %"ontwo"]
""") # noqa E501
def test_call(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
tp_f = ir.FunctionType(flt, (int32, int32))
tp_g = ir.FunctionType(dbl, (int32,), var_arg=True)
tp_h = ir.FunctionType(hlf, (int32, int32))
f = ir.Function(builder.function.module, tp_f, 'f')
g = ir.Function(builder.function.module, tp_g, 'g')
h = ir.Function(builder.function.module, tp_h, 'h')
builder.call(f, (a, b), 'res_f')
builder.call(g, (b, a), 'res_g')
builder.call(h, (a, b), 'res_h')
builder.call(f, (a, b), 'res_f_fast', cconv='fastcc')
res_f_readonly = builder.call(f, (a, b), 'res_f_readonly')
res_f_readonly.attributes.add('readonly')
builder.call(f, (a, b), 'res_fast', fastmath='fast')
builder.call(f, (a, b), 'res_nnan_ninf', fastmath=('nnan', 'ninf'))
self.check_block(block, """\
my_block:
%"res_f" = call float @"f"(i32 %".1", i32 %".2")
%"res_g" = call double (i32, ...) @"g"(i32 %".2", i32 %".1")
%"res_h" = call half @"h"(i32 %".1", i32 %".2")
%"res_f_fast" = call fastcc float @"f"(i32 %".1", i32 %".2")
%"res_f_readonly" = call float @"f"(i32 %".1", i32 %".2") readonly
%"res_fast" = call fast float @"f"(i32 %".1", i32 %".2")
%"res_nnan_ninf" = call ninf nnan float @"f"(i32 %".1", i32 %".2")
""")
def test_call_metadata(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
dbg_declare_ty = ir.FunctionType(ir.VoidType(), [ir.MetaDataType()] * 3)
dbg_declare = ir.Function(
builder.module,
dbg_declare_ty,
'llvm.dbg.declare')
a = builder.alloca(int32, name="a")
b = builder.module.add_metadata(())
builder.call(dbg_declare, (a, b, b))
self.check_block(block, """\
my_block:
%"a" = alloca i32
call void @"llvm.dbg.declare"(metadata i32* %"a", metadata !0, metadata !0)
""") # noqa E501
def test_invoke(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
tp_f = ir.FunctionType(flt, (int32, int32))
f = ir.Function(builder.function.module, tp_f, 'f')
bb_normal = builder.function.append_basic_block(name='normal')
bb_unwind = builder.function.append_basic_block(name='unwind')
builder.invoke(f, (a, b), bb_normal, bb_unwind, 'res_f')
self.check_block(block, """\
my_block:
%"res_f" = invoke float @"f"(i32 %".1", i32 %".2")
to label %"normal" unwind label %"unwind"
""")
def test_landingpad(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
lp = builder.landingpad(ir.LiteralStructType([int32,
int8.as_pointer()]), 'lp')
int_typeinfo = ir.GlobalVariable(builder.function.module,
int8.as_pointer(), "_ZTIi")
int_typeinfo.global_constant = True
lp.add_clause(ir.CatchClause(int_typeinfo))
lp.add_clause(ir.FilterClause(ir.Constant(ir.ArrayType(
int_typeinfo.type, 1), [int_typeinfo])))
builder.resume(lp)
self.check_block(block, """\
my_block:
%"lp" = landingpad {i32, i8*}
catch i8** @"_ZTIi"
filter [1 x i8**] [i8** @"_ZTIi"]
resume {i32, i8*} %"lp"
""")
def test_assume(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.icmp_signed('>', a, b, name='c')
builder.assume(c)
self.check_block(block, """\
my_block:
%"c" = icmp sgt i32 %".1", %".2"
call void @"llvm.assume"(i1 %"c")
""")
def test_vector_ops(self):
block = self.block(name='insert_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
a.name = 'a'
b.name = 'b'
vecty = ir.VectorType(a.type, 2)
vec = ir.Constant(vecty, ir.Undefined)
idxty = ir.IntType(32)
vec = builder.insert_element(vec, a, idxty(0), name='vec1')
vec = builder.insert_element(vec, b, idxty(1), name='vec2')
self.check_block(block, """\
insert_block:
%"vec1" = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %"a", i32 0
%"vec2" = insertelement <2 x i32> %"vec1", i32 %"b", i32 1
""")
block = builder.append_basic_block("shuffle_block")
builder.branch(block)
builder.position_at_end(block)
mask = ir.Constant(vecty, [1, 0])
builder.shuffle_vector(vec, vec, mask, name='shuf')
self.check_block(block, """\
shuffle_block:
%"shuf" = shufflevector <2 x i32> %"vec2", <2 x i32> %"vec2", <2 x i32> <i32 1, i32 0>
""") # noqa E501
block = builder.append_basic_block("add_block")
builder.branch(block)
builder.position_at_end(block)
builder.add(vec, vec, name='sum')
self.check_block(block, """\
add_block:
%"sum" = add <2 x i32> %"vec2", %"vec2"
""")
block = builder.append_basic_block("extract_block")
builder.branch(block)
builder.position_at_end(block)
c = builder.extract_element(vec, idxty(0), name='ex1')
d = builder.extract_element(vec, idxty(1), name='ex2')
self.check_block(block, """\
extract_block:
%"ex1" = extractelement <2 x i32> %"vec2", i32 0
%"ex2" = extractelement <2 x i32> %"vec2", i32 1
""")
builder.ret(builder.add(c, d))
self.assert_valid_ir(builder.module)
def test_bitreverse(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int64, 5)
c = builder.bitreverse(a, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i64 @"llvm.bitreverse.i64"(i64 5)
ret i64 %"c"
""")
def test_bitreverse_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
with self.assertRaises(TypeError) as raises:
builder.bitreverse(a, name='c')
self.assertIn(
"expected an integer type, got float",
str(raises.exception))
def test_fence(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
with self.assertRaises(ValueError) as raises:
builder.fence("monotonic", None)
self.assertIn(
"Invalid fence ordering \"monotonic\"!",
str(raises.exception))
with self.assertRaises(ValueError) as raises:
builder.fence(None, "monotonic")
self.assertIn(
"Invalid fence ordering \"None\"!",
str(raises.exception))
builder.fence("acquire", None)
builder.fence("release", "singlethread")
builder.fence("acq_rel", "singlethread")
builder.fence("seq_cst")
builder.ret_void()
self.check_block(block, """\
my_block:
fence acquire
fence syncscope("singlethread") release
fence syncscope("singlethread") acq_rel
fence seq_cst
ret void
""")
def test_bswap(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int32, 5)
c = builder.bswap(a, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i32 @"llvm.bswap.i32"(i32 5)
ret i32 %"c"
""")
def test_ctpop(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
c = builder.ctpop(a, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i16 @"llvm.ctpop.i16"(i16 5)
ret i16 %"c"
""")
def test_ctlz(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
b = ir.Constant(int1, 1)
c = builder.ctlz(a, b, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i16 @"llvm.ctlz.i16"(i16 5, i1 1)
ret i16 %"c"
""")
def test_convert_to_fp16_f32(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.0)
b = builder.convert_to_fp16(a, name='b')
builder.ret(b)
self.check_block(block, """\
my_block:
%"b" = call i16 @"llvm.convert.to.fp16.f32"(float 0x4014000000000000)
ret i16 %"b"
""") # noqa E501
def test_convert_to_fp16_f32_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
with self.assertRaises(TypeError) as raises:
builder.convert_to_fp16(a, name='b')
self.assertIn(
"expected a float type, got i16",
str(raises.exception))
def test_convert_from_fp16_f32(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
b = builder.convert_from_fp16(a, name='b', to=flt)
builder.ret(b)
self.check_block(block, """\
my_block:
%"b" = call float @"llvm.convert.from.fp16.f32"(i16 5)
ret float %"b"
""")
def test_convert_from_fp16_f32_notype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.5)
with self.assertRaises(TypeError) as raises:
builder.convert_from_fp16(a, name='b')
self.assertIn(
"expected a float return type",
str(raises.exception))
def test_convert_from_fp16_f32_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.5)
with self.assertRaises(TypeError) as raises:
builder.convert_from_fp16(a, name='b', to=flt)
self.assertIn(
"expected an i16 type, got float",
str(raises.exception))
def test_convert_from_fp16_f32_wrongtype2(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.5)
with self.assertRaises(TypeError) as raises:
builder.convert_from_fp16(a, name='b', to=int16)
self.assertIn(
"expected a float type, got i16",
str(raises.exception))
def test_cttz(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int64, 5)
b = ir.Constant(int1, 1)
c = builder.cttz(a, b, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i64 @"llvm.cttz.i64"(i64 5, i1 1)
ret i64 %"c"
""")
def test_cttz_wrongflag(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int64, 5)
b = ir.Constant(int32, 3)
with self.assertRaises(TypeError) as raises:
builder.cttz(a, b, name='c')
self.assertIn(
"expected an i1 type, got i32",
str(raises.exception))
def test_cttz_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
b = ir.Constant(int1, 1)
with self.assertRaises(TypeError) as raises:
builder.cttz(a, b, name='c')
self.assertIn(
"expected an integer type, got float",
str(raises.exception))
def test_fma(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
b = ir.Constant(flt, 1)
c = ir.Constant(flt, 2)
fma = builder.fma(a, b, c, name='fma')
builder.ret(fma)
self.check_block(block, """\
my_block:
%"fma" = call float @"llvm.fma.f32"(float 0x4014000000000000, float 0x3ff0000000000000, float 0x4000000000000000)
ret float %"fma"
""") # noqa E501
def test_fma_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int32, 5)
b = ir.Constant(int32, 1)
c = ir.Constant(int32, 2)
with self.assertRaises(TypeError) as raises:
builder.fma(a, b, c, name='fma')
self.assertIn(
"expected an floating point type, got i32",
str(raises.exception))
def test_fma_mixedtypes(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
b = ir.Constant(dbl, 1)
c = ir.Constant(flt, 2)
with self.assertRaises(TypeError) as raises:
builder.fma(a, b, c, name='fma')
self.assertIn(
"expected types to be the same, got float, double, float",
str(raises.exception))
class TestBuilderMisc(TestBase):
def test_attributes(self):
block = self.block(name='start')
builder = ir.IRBuilder(block)
self.assertIs(builder.function, block.parent)
self.assertIsInstance(builder.function, ir.Function)
self.assertIs(builder.module, block.parent.module)
self.assertIsInstance(builder.module, ir.Module)
def test_goto_block(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.add(a, b, 'c')
bb_new = builder.append_basic_block(name='foo')
with builder.goto_block(bb_new):
builder.fadd(a, b, 'd')
with builder.goto_entry_block():
builder.sub(a, b, 'e')
builder.fsub(a, b, 'f')
builder.branch(bb_new)
builder.mul(a, b, 'g')
with builder.goto_block(bb_new):
builder.fmul(a, b, 'h')
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"e" = sub i32 %".1", %".2"
%"g" = mul i32 %".1", %".2"
""")
self.check_block(bb_new, """\
foo:
%"d" = fadd i32 %".1", %".2"
%"f" = fsub i32 %".1", %".2"
%"h" = fmul i32 %".1", %".2"
br label %"foo"
""")
def test_if_then(self):
block = self.block(name='one')
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
a = builder.add(z, z, 'a')
with builder.if_then(a) as bbend:
builder.add(z, z, 'b')
# Block will be terminated implicitly
self.assertIs(builder.block, bbend)
c = builder.add(z, z, 'c')
with builder.if_then(c):
builder.add(z, z, 'd')
builder.branch(block)
# No implicit termination
self.check_func_body(builder.function, """\
one:
%"a" = add i1 0, 0
br i1 %"a", label %"one.if", label %"one.endif"
one.if:
%"b" = add i1 0, 0
br label %"one.endif"
one.endif:
%"c" = add i1 0, 0
br i1 %"c", label %"one.endif.if", label %"one.endif.endif"
one.endif.if:
%"d" = add i1 0, 0
br label %"one"
one.endif.endif:
""")
def test_if_then_nested(self):
# Implicit termination in a nested if/then
block = self.block(name='one')
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
a = builder.add(z, z, 'a')
with builder.if_then(a):
b = builder.add(z, z, 'b')
with builder.if_then(b):
builder.add(z, z, 'c')
builder.ret_void()
self.check_func_body(builder.function, """\
one:
%"a" = add i1 0, 0
br i1 %"a", label %"one.if", label %"one.endif"
one.if:
%"b" = add i1 0, 0
br i1 %"b", label %"one.if.if", label %"one.if.endif"
one.endif:
ret void
one.if.if:
%"c" = add i1 0, 0
br label %"one.if.endif"
one.if.endif:
br label %"one.endif"
""")
def test_if_then_long_label(self):
full_label = 'Long' * 20
block = self.block(name=full_label)
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
a = builder.add(z, z, 'a')
with builder.if_then(a):
b = builder.add(z, z, 'b')
with builder.if_then(b):
builder.add(z, z, 'c')
builder.ret_void()
self.check_func_body(builder.function, """\
{full_label}:
%"a" = add i1 0, 0
br i1 %"a", label %"{label}.if", label %"{label}.endif"
{label}.if:
%"b" = add i1 0, 0
br i1 %"b", label %"{label}.if.if", label %"{label}.if.endif"
{label}.endif:
ret void
{label}.if.if:
%"c" = add i1 0, 0
br label %"{label}.if.endif"
{label}.if.endif:
br label %"{label}.endif"
""".format(full_label=full_label, label=full_label[:25] + '..'))
def test_if_then_likely(self):
def check(likely):
block = self.block(name='one')
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
with builder.if_then(z, likely=likely):
pass
self.check_block(block, """\
one:
br i1 0, label %"one.if", label %"one.endif", !prof !0
""")
return builder
builder = check(True)
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 99, i32 1 }
""")
builder = check(False)
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 1, i32 99 }
""")
def test_if_else(self):
block = self.block(name='one')
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
a = builder.add(z, z, 'a')
with builder.if_else(a) as (then, otherwise):
with then:
builder.add(z, z, 'b')
with otherwise:
builder.add(z, z, 'c')
# Each block will be terminated implicitly
with builder.if_else(a) as (then, otherwise):
with then:
builder.branch(block)
with otherwise:
builder.ret_void()
# No implicit termination
self.check_func_body(builder.function, """\
one:
%"a" = add i1 0, 0
br i1 %"a", label %"one.if", label %"one.else"
one.if:
%"b" = add i1 0, 0
br label %"one.endif"
one.else:
%"c" = add i1 0, 0
br label %"one.endif"
one.endif:
br i1 %"a", label %"one.endif.if", label %"one.endif.else"
one.endif.if:
br label %"one"
one.endif.else:
ret void
one.endif.endif:
""")
def test_if_else_likely(self):
def check(likely):
block = self.block(name='one')
builder = ir.IRBuilder(block)
z = ir.Constant(int1, 0)
with builder.if_else(z, likely=likely) as (then, otherwise):
with then:
builder.branch(block)
with otherwise:
builder.ret_void()
self.check_func_body(builder.function, """\
one:
br i1 0, label %"one.if", label %"one.else", !prof !0
one.if:
br label %"one"
one.else:
ret void
one.endif:
""")
return builder
builder = check(True)
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 99, i32 1 }
""")
builder = check(False)
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 1, i32 99 }
""")
def test_positioning(self):
func = self.function()
builder = ir.IRBuilder()
z = ir.Constant(int32, 0)
bb_one = func.append_basic_block(name='one')
bb_two = func.append_basic_block(name='two')
bb_three = func.append_basic_block(name='three')
# .at_start(empty block)
builder.position_at_start(bb_one)
builder.add(z, z, 'a')
# .at_end(empty block)
builder.position_at_end(bb_two)
builder.add(z, z, 'm')
builder.add(z, z, 'n')
# .at_start(block)
builder.position_at_start(bb_two)
o = builder.add(z, z, 'o')
builder.add(z, z, 'p')
# .at_end(block)
builder.position_at_end(bb_one)
b = builder.add(z, z, 'b')
# .after(instr)
builder.position_after(o)
builder.add(z, z, 'q')
# .before(instr)
builder.position_before(b)
builder.add(z, z, 'c')
self.check_block(bb_one, """\
one:
%"a" = add i32 0, 0
%"c" = add i32 0, 0
%"b" = add i32 0, 0
""")
self.check_block(bb_two, """\
two:
%"o" = add i32 0, 0
%"q" = add i32 0, 0
%"p" = add i32 0, 0
%"m" = add i32 0, 0
%"n" = add i32 0, 0
""")
self.check_block(bb_three, """\
three:
""")
def test_instruction_removal(self):
func = self.function()
builder = ir.IRBuilder()
blk = func.append_basic_block(name='entry')
builder.position_at_end(blk)
k = ir.Constant(int32, 1234)
a = builder.add(k, k, 'a')
retvoid = builder.ret_void()
self.assertTrue(blk.is_terminated)
builder.remove(retvoid)
self.assertFalse(blk.is_terminated)
b = builder.mul(a, a, 'b')
c = builder.add(b, b, 'c')
builder.remove(c)
builder.ret_void()
self.assertTrue(blk.is_terminated)
self.check_block(blk, """\
entry:
%"a" = add i32 1234, 1234
%"b" = mul i32 %"a", %"a"
ret void
""")
def test_metadata(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
builder.debug_metadata = builder.module.add_metadata([])
builder.alloca(ir.PointerType(int32), name='c')
self.check_block(block, """\
my_block:
%"c" = alloca i32*, !dbg !0
""")
class TestTypes(TestBase):
def has_logical_equality(self, ty):
while isinstance(ty, ir.PointerType):
ty = ty.pointee
return not isinstance(ty, ir.LabelType)
def assorted_types(self):
# Avoid polluting the namespace
context = ir.Context()
types = [
ir.LabelType(), ir.VoidType(),
ir.FunctionType(int1, (int8, int8)), ir.FunctionType(int1, (int8,)),
ir.FunctionType(int1, (int8,), var_arg=True),
ir.FunctionType(int8, (int8,)),
int1, int8, int32, flt, dbl,
ir.ArrayType(flt, 5), ir.ArrayType(dbl, 5), ir.ArrayType(dbl, 4),
ir.LiteralStructType((int1, int8)), ir.LiteralStructType((int8,
int1)),
context.get_identified_type("MyType1"),
context.get_identified_type("MyType2"),
]
types += [ir.PointerType(tp) for tp in types
if not isinstance(tp, (ir.VoidType, ir.LabelType))]
return types
def test_pickling(self):
types = self.assorted_types()
for ty in types:
newty = self.assert_pickle_correctly(ty)
if self.has_logical_equality(ty):
self.assertEqual(newty, ty)
def test_comparisons(self):
types = self.assorted_types()
for a, b in itertools.product(types, types):
if a is not b:
self.assertFalse(a == b, (a, b))
self.assertTrue(a != b, (a, b))
# We assume copy.copy() works fine here...
for tp in types:
other = copy.copy(tp)
if self.has_logical_equality(tp):
self.assertTrue(tp == other, (tp, other))
self.assertFalse(tp != other, (tp, other))
else:
self.assertFalse(tp == other, (tp, other))
self.assertTrue(tp != other, (tp, other))
def test_str(self):
self.assertEqual(str(int1), 'i1')
self.assertEqual(str(ir.IntType(29)), 'i29')
self.assertEqual(str(flt), 'float')
self.assertEqual(str(dbl), 'double')
self.assertEqual(str(ir.VoidType()), 'void')
self.assertEqual(str(ir.FunctionType(int1, ())), 'i1 ()')
self.assertEqual(str(ir.FunctionType(int1, (flt,))), 'i1 (float)')
self.assertEqual(str(ir.FunctionType(int1, (flt, dbl))),
'i1 (float, double)')
self.assertEqual(str(ir.FunctionType(int1, (), var_arg=True)),
'i1 (...)')
self.assertEqual(str(ir.FunctionType(int1, (flt,), var_arg=True)),
'i1 (float, ...)')
self.assertEqual(str(ir.FunctionType(int1, (flt, dbl), var_arg=True)),
'i1 (float, double, ...)')
self.assertEqual(str(ir.PointerType(int32)), 'i32*')
self.assertEqual(str(ir.PointerType(ir.PointerType(int32))), 'i32**')
self.assertEqual(str(ir.ArrayType(int1, 5)), '[5 x i1]')
self.assertEqual(str(ir.ArrayType(ir.PointerType(int1), 5)),
'[5 x i1*]')
self.assertEqual(str(ir.PointerType(ir.ArrayType(int1, 5))),
'[5 x i1]*')
self.assertEqual(str(ir.LiteralStructType((int1,))), '{i1}')
self.assertEqual(str(ir.LiteralStructType((int1, flt))), '{i1, float}')
self.assertEqual(str(ir.LiteralStructType((
ir.PointerType(int1), ir.LiteralStructType((int32, int8))))),
'{i1*, {i32, i8}}')
self.assertEqual(str(ir.LiteralStructType((int1,), packed=True)),
'<{i1}>')
self.assertEqual(str(ir.LiteralStructType((int1, flt), packed=True)),
'<{i1, float}>')
# Avoid polluting the namespace
context = ir.Context()
mytype = context.get_identified_type("MyType")
self.assertEqual(str(mytype), "%\"MyType\"")
mytype1 = context.get_identified_type("MyType\\")
self.assertEqual(str(mytype1), "%\"MyType\\5c\"")
mytype2 = context.get_identified_type("MyType\"")
self.assertEqual(str(mytype2), "%\"MyType\\22\"")
def test_hash(self):
for typ in filter(self.has_logical_equality, self.assorted_types()):
self.assertEqual(hash(typ), hash(copy.copy(typ)))
def test_gep(self):
def check_constant(tp, i, expected):
actual = tp.gep(ir.Constant(int32, i))
self.assertEqual(actual, expected)
def check_index_type(tp):
index = ir.Constant(dbl, 1.0)
with self.assertRaises(TypeError):
tp.gep(index)
tp = ir.PointerType(dbl)
for i in range(5):
check_constant(tp, i, dbl)
check_index_type(tp)
tp = ir.ArrayType(int1, 3)
for i in range(3):
check_constant(tp, i, int1)
check_index_type(tp)
tp = ir.LiteralStructType((dbl, ir.LiteralStructType((int1, int8))))
check_constant(tp, 0, dbl)
check_constant(tp, 1, ir.LiteralStructType((int1, int8)))
with self.assertRaises(IndexError):
tp.gep(ir.Constant(int32, 2))
check_index_type(tp)
context = ir.Context()
tp = ir.IdentifiedStructType(context, "MyType")
tp.set_body(dbl, ir.LiteralStructType((int1, int8)))
check_constant(tp, 0, dbl)
check_constant(tp, 1, ir.LiteralStructType((int1, int8)))
with self.assertRaises(IndexError):
tp.gep(ir.Constant(int32, 2))
check_index_type(tp)
def test_abi_size(self):
td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
def check(tp, expected):
self.assertEqual(tp.get_abi_size(td), expected)
check(int8, 1)
check(int32, 4)
check(int64, 8)
check(ir.ArrayType(int8, 5), 5)
check(ir.ArrayType(int32, 5), 20)
check(ir.LiteralStructType((dbl, flt, flt)), 16)
def test_abi_alignment(self):
td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
def check(tp, expected):
self.assertIn(tp.get_abi_alignment(td), expected)
check(int8, (1, 2, 4))
check(int32, (4,))
check(int64, (8,))
check(ir.ArrayType(int8, 5), (1, 2, 4))
check(ir.ArrayType(int32, 5), (4,))
check(ir.LiteralStructType((dbl, flt, flt)), (8,))
def test_identified_struct(self):
context = ir.Context()
mytype = context.get_identified_type("MyType")
module = ir.Module(context=context)
self.assertTrue(mytype.is_opaque)
self.assert_valid_ir(module)
oldstr = str(module)
mytype.set_body(ir.IntType(32), ir.IntType(64), ir.FloatType())
self.assertFalse(mytype.is_opaque)
self.assert_valid_ir(module)
self.assertNotEqual(oldstr, str(module))
def test_target_data_non_default_context(self):
context = ir.Context()
mytype = context.get_identified_type("MyType")
mytype.elements = [ir.IntType(32)]
td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
self.assertEqual(mytype.get_abi_size(td, context=context), 4)
def test_vector(self):
vecty = ir.VectorType(ir.IntType(32), 8)
self.assertEqual(str(vecty), "<8 x i32>")
def c32(i):
return ir.Constant(int32, i)
class TestConstant(TestBase):
def test_integers(self):
c = ir.Constant(int32, 42)
self.assertEqual(str(c), 'i32 42')
c = ir.Constant(int1, 1)
self.assertEqual(str(c), 'i1 1')
c = ir.Constant(int1, 0)
self.assertEqual(str(c), 'i1 0')
c = ir.Constant(int1, True)
self.assertEqual(str(c), 'i1 true')
c = ir.Constant(int1, False)
self.assertEqual(str(c), 'i1 false')
c = ir.Constant(int1, ir.Undefined)
self.assertEqual(str(c), 'i1 undef')
c = ir.Constant(int1, None)
self.assertEqual(str(c), 'i1 0')
def test_reals(self):
# XXX Test NaNs and infs
c = ir.Constant(flt, 1.5)
self.assertEqual(str(c), 'float 0x3ff8000000000000')
c = ir.Constant(flt, -1.5)
self.assertEqual(str(c), 'float 0xbff8000000000000')
c = ir.Constant(dbl, 1.5)
self.assertEqual(str(c), 'double 0x3ff8000000000000')
c = ir.Constant(dbl, -1.5)
self.assertEqual(str(c), 'double 0xbff8000000000000')
c = ir.Constant(dbl, ir.Undefined)
self.assertEqual(str(c), 'double undef')
c = ir.Constant(dbl, None)
self.assertEqual(str(c), 'double 0.0')
def test_arrays(self):
c = ir.Constant(ir.ArrayType(int32, 3), (c32(5), c32(6), c32(4)))
self.assertEqual(str(c), '[3 x i32] [i32 5, i32 6, i32 4]')
c = ir.Constant(ir.ArrayType(int32, 2), (c32(5), c32(ir.Undefined)))
self.assertEqual(str(c), '[2 x i32] [i32 5, i32 undef]')
c = ir.Constant.literal_array((c32(5), c32(6), c32(ir.Undefined)))
self.assertEqual(str(c), '[3 x i32] [i32 5, i32 6, i32 undef]')
with self.assertRaises(TypeError) as raises:
ir.Constant.literal_array((c32(5), ir.Constant(flt, 1.5)))
self.assertEqual(str(raises.exception),
"all elements must have the same type")
c = ir.Constant(ir.ArrayType(int32, 2), ir.Undefined)
self.assertEqual(str(c), '[2 x i32] undef')
c = ir.Constant(ir.ArrayType(int32, 2), None)
self.assertEqual(str(c), '[2 x i32] zeroinitializer')
# Raw array syntax
c = ir.Constant(ir.ArrayType(int8, 11), bytearray(b"foobar_123\x80"))
self.assertEqual(str(c), r'[11 x i8] c"foobar_123\80"')
c = ir.Constant(ir.ArrayType(int8, 4), bytearray(b"\x00\x01\x04\xff"))
self.assertEqual(str(c), r'[4 x i8] c"\00\01\04\ff"')
# Recursive instantiation of inner constants
c = ir.Constant(ir.ArrayType(int32, 3), (5, ir.Undefined, 6))
self.assertEqual(str(c), '[3 x i32] [i32 5, i32 undef, i32 6]')
# Invalid number of args
with self.assertRaises(ValueError):
ir.Constant(ir.ArrayType(int32, 3), (5, 6))
def test_vector(self):
vecty = ir.VectorType(ir.IntType(32), 8)
vals = [1, 2, 4, 3, 8, 6, 9, 7]
vec = ir.Constant(vecty, vals)
vec_repr = "<8 x i32> <{}>".format(
', '.join(map('i32 {}'.format, vals)))
self.assertEqual(str(vec), vec_repr)
def test_structs(self):
st1 = ir.LiteralStructType((flt, int1))
st2 = ir.LiteralStructType((int32, st1))
c = ir.Constant(st1, (ir.Constant(ir.FloatType(), 1.5),
ir.Constant(int1, True)))
self.assertEqual(str(c),
'{float, i1} {float 0x3ff8000000000000, i1 true}')
c = ir.Constant.literal_struct((ir.Constant(ir.FloatType(), 1.5),
ir.Constant(int1, True)))
self.assertEqual(c.type, st1)
self.assertEqual(str(c),
'{float, i1} {float 0x3ff8000000000000, i1 true}')
c = ir.Constant.literal_struct((ir.Constant(ir.FloatType(), 1.5),
ir.Constant(int1, ir.Undefined)))
self.assertEqual(c.type, st1)
self.assertEqual(str(c),
'{float, i1} {float 0x3ff8000000000000, i1 undef}')
c = ir.Constant(st1, ir.Undefined)
self.assertEqual(str(c), '{float, i1} undef')
c = ir.Constant(st1, None)
self.assertEqual(str(c), '{float, i1} zeroinitializer')
# Recursive instantiation of inner constants
c1 = ir.Constant(st1, (1.5, True))
self.assertEqual(str(c1),
'{float, i1} {float 0x3ff8000000000000, i1 true}')
c2 = ir.Constant(st2, (42, c1))
self.assertEqual(str(c2), ('{i32, {float, i1}} {i32 42, {float, i1} '
'{float 0x3ff8000000000000, i1 true}}'))
c3 = ir.Constant(st2, (42, (1.5, True)))
self.assertEqual(str(c3), str(c2))
# Invalid number of args
with self.assertRaises(ValueError):
ir.Constant(st2, (4, 5, 6))
def test_undefined_literal_struct_pickling(self):
i8 = ir.IntType(8)
st = ir.Constant(ir.LiteralStructType([i8, i8]), ir.Undefined)
self.assert_pickle_correctly(st)
def test_type_instantiaton(self):
c = int8(42)
self.assertIsInstance(c, ir.Constant)
self.assertEqual(str(c), 'i8 42')
c = int1(True)
self.assertIsInstance(c, ir.Constant)
self.assertEqual(str(c), 'i1 true')
# Arrays
at = ir.ArrayType(int32, 3)
c = at([c32(4), c32(5), c32(6)])
self.assertEqual(str(c), '[3 x i32] [i32 4, i32 5, i32 6]')
c = at([4, 5, 6])
self.assertEqual(str(c), '[3 x i32] [i32 4, i32 5, i32 6]')
c = at(None)
self.assertEqual(str(c), '[3 x i32] zeroinitializer')
with self.assertRaises(ValueError):
at([4, 5, 6, 7])
# Structs
st1 = ir.LiteralStructType((flt, int1))
st2 = ir.LiteralStructType((int32, st1))
c = st1((1.5, True))
self.assertEqual(str(c), ('{float, i1} {float 0x3ff8000000000000, i1 '
'true}'))
c = st2((42, (1.5, True)))
self.assertEqual(str(c), ('{i32, {float, i1}} {i32 42, {float, i1} '
'{float 0x3ff8000000000000, i1 true}}'))
def test_repr(self):
c = int32(42)
self.assertEqual(repr(c), "<ir.Constant type='i32' value=42>")
def test_encoding_problem(self):
c = ir.Constant(ir.ArrayType(ir.IntType(8), 256),
bytearray(range(256)))
m = self.module()
gv = ir.GlobalVariable(m, c.type, "myconstant")
gv.global_constant = True
gv.initializer = c
# With utf-8, the following will cause:
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe0 in position
parsed = llvm.parse_assembly(str(m))
reparsed = llvm.parse_assembly(str(parsed))
self.assertEqual(str(parsed), str(reparsed))
def test_gep(self):
m = self.module()
tp = ir.LiteralStructType((flt, int1))
gv = ir.GlobalVariable(m, tp, "myconstant")
c = gv.gep([ir.Constant(int32, x) for x in (0, 1)])
self.assertEqual(str(c),
'getelementptr ({float, i1}, {float, i1}* @"myconstant", i32 0, i32 1)')
self.assertEqual(c.type, ir.PointerType(int1))
const = ir.Constant(tp, None)
with self.assertRaises(TypeError):
const.gep([ir.Constant(int32, 0)])
const_ptr = ir.Constant(tp.as_pointer(), None)
c2 = const_ptr.gep([ir.Constant(int32, 0)])
self.assertEqual(str(c2),
'getelementptr ({float, i1}, {float, i1}* null, i32 0)')
self.assertEqual(c.type, ir.PointerType(int1))
def test_gep_addrspace_globalvar(self):
m = self.module()
tp = ir.LiteralStructType((flt, int1))
addrspace = 4
gv = ir.GlobalVariable(m, tp, "myconstant", addrspace=addrspace)
self.assertEqual(gv.addrspace, addrspace)
c = gv.gep([ir.Constant(int32, x) for x in (0, 1)])
self.assertEqual(c.type.addrspace, addrspace)
self.assertEqual(str(c),
('getelementptr ({float, i1}, {float, i1} '
'addrspace(4)* @"myconstant", i32 0, i32 1)'))
self.assertEqual(c.type, ir.PointerType(int1, addrspace=addrspace))
def test_bitcast(self):
m = self.module()
gv = ir.GlobalVariable(m, int32, "myconstant")
c = gv.bitcast(int64.as_pointer())
self.assertEqual(str(c), 'bitcast (i32* @"myconstant" to i64*)')
def test_inttoptr(self):
c = ir.Constant(int32, 0).inttoptr(int64.as_pointer())
self.assertEqual(str(c), 'inttoptr (i32 0 to i64*)')
class TestTransforms(TestBase):
def test_call_transform(self):
mod = ir.Module()
foo = ir.Function(mod, ir.FunctionType(ir.VoidType(), ()), "foo")
bar = ir.Function(mod, ir.FunctionType(ir.VoidType(), ()), "bar")
builder = ir.IRBuilder()
builder.position_at_end(foo.append_basic_block())
call = builder.call(foo, ())
self.assertEqual(call.callee, foo)
modified = ir.replace_all_calls(mod, foo, bar)
self.assertIn(call, modified)
self.assertNotEqual(call.callee, foo)
self.assertEqual(call.callee, bar)
class TestSingleton(TestBase):
def test_undefined(self):
self.assertIs(ir.Undefined, ir.values._Undefined())
self.assertIs(ir.Undefined, copy.copy(ir.Undefined))
self.assertIs(ir.Undefined, copy.deepcopy(ir.Undefined))
self.assert_pickle_correctly(ir.Undefined)
if __name__ == '__main__':
unittest.main()
| true | true |
f7ffb9299d605f1fc9488c1425f809d4aab539f5 | 2,194 | py | Python | HumeanPoseEstimate/loss.py | YuHe0108/cvmodule | ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd | [
"Apache-2.0"
] | null | null | null | HumeanPoseEstimate/loss.py | YuHe0108/cvmodule | ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd | [
"Apache-2.0"
] | null | null | null | HumeanPoseEstimate/loss.py | YuHe0108/cvmodule | ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd | [
"Apache-2.0"
] | null | null | null | from tensorflow import keras
import tensorflow as tf
def joint_mse_loss(y_pred, y_true, true_weight):
"""
损失函数想要表达的意思: 输出的特征图数量为关键点的数量,意味着输出的是每一个像素属于各个关键点的置信度
"""
batch_size = y_pred.shape[0]
num_of_joints = y_pred.shape[-1] # 有多少个关键点
y_pred = tf.reshape(y_pred, shape=(batch_size, -1, num_of_joints)) # 合并宽和高
heatmap_pred_list = tf.split(value=y_pred,
num_or_size_splits=num_of_joints,
axis=-1) # 拆分每一个关键点的特征图 [batch_size, -1, 1]
y_true = tf.reshape(y_true, shape=(batch_size, -1, num_of_joints))
heatmap_true_list = tf.split(value=y_true, # y_true执行与y_pred相同的操作
num_or_size_splits=num_of_joints,
axis=-1)
losses = [] # 计算每一个关键点的损失值,并累加求平均
for i in range(num_of_joints):
heatmap_pred = tf.squeeze(heatmap_pred_list[i])
heatmap_true = tf.squeeze(heatmap_true_list[i])
loss = 0.5 * tf.losses.mean_squared_error(y_pred=heatmap_pred * true_weight[:, i],
y_true=heatmap_true * true_weight[:, i])
losses.append(loss)
return tf.reduce_mean(loss)
class JointsMSELoss(object):
def __init__(self):
self.mse = tf.losses.MeanSquaredError()
def __call__(self, y_pred, target, target_weight):
batch_size = y_pred.shape[0]
num_of_joints = y_pred.shape[-1]
pred = tf.reshape(tensor=y_pred, shape=(batch_size, -1, num_of_joints))
heatmap_pred_list = tf.split(value=pred, num_or_size_splits=num_of_joints, axis=-1)
gt = tf.reshape(tensor=target, shape=(batch_size, -1, num_of_joints))
heatmap_gt_list = tf.split(value=gt, num_or_size_splits=num_of_joints, axis=-1)
loss = 0.0
for i in range(num_of_joints):
heatmap_pred = tf.squeeze(heatmap_pred_list[i])
heatmap_gt = tf.squeeze(heatmap_gt_list[i])
loss += 0.5 * self.mse(y_true=heatmap_pred * target_weight[:, i],
y_pred=heatmap_gt * target_weight[:, i])
return loss / num_of_joints
| 46.680851 | 92 | 0.608478 | from tensorflow import keras
import tensorflow as tf
def joint_mse_loss(y_pred, y_true, true_weight):
batch_size = y_pred.shape[0]
num_of_joints = y_pred.shape[-1]
y_pred = tf.reshape(y_pred, shape=(batch_size, -1, num_of_joints))
heatmap_pred_list = tf.split(value=y_pred,
num_or_size_splits=num_of_joints,
axis=-1)
y_true = tf.reshape(y_true, shape=(batch_size, -1, num_of_joints))
heatmap_true_list = tf.split(value=y_true,
num_or_size_splits=num_of_joints,
axis=-1)
losses = []
for i in range(num_of_joints):
heatmap_pred = tf.squeeze(heatmap_pred_list[i])
heatmap_true = tf.squeeze(heatmap_true_list[i])
loss = 0.5 * tf.losses.mean_squared_error(y_pred=heatmap_pred * true_weight[:, i],
y_true=heatmap_true * true_weight[:, i])
losses.append(loss)
return tf.reduce_mean(loss)
class JointsMSELoss(object):
def __init__(self):
self.mse = tf.losses.MeanSquaredError()
def __call__(self, y_pred, target, target_weight):
batch_size = y_pred.shape[0]
num_of_joints = y_pred.shape[-1]
pred = tf.reshape(tensor=y_pred, shape=(batch_size, -1, num_of_joints))
heatmap_pred_list = tf.split(value=pred, num_or_size_splits=num_of_joints, axis=-1)
gt = tf.reshape(tensor=target, shape=(batch_size, -1, num_of_joints))
heatmap_gt_list = tf.split(value=gt, num_or_size_splits=num_of_joints, axis=-1)
loss = 0.0
for i in range(num_of_joints):
heatmap_pred = tf.squeeze(heatmap_pred_list[i])
heatmap_gt = tf.squeeze(heatmap_gt_list[i])
loss += 0.5 * self.mse(y_true=heatmap_pred * target_weight[:, i],
y_pred=heatmap_gt * target_weight[:, i])
return loss / num_of_joints
| true | true |
f7ffba000e0145dc2f44fbca9f95ac3150c5fd94 | 13,021 | py | Python | caffe2/python/optimizer.py | kyper999/caffe2-2 | 7c10b470de8d19261f10e958846d7efcfd751a09 | [
"MIT"
] | null | null | null | caffe2/python/optimizer.py | kyper999/caffe2-2 | 7c10b470de8d19261f10e958846d7efcfd751a09 | [
"MIT"
] | null | null | null | caffe2/python/optimizer.py | kyper999/caffe2-2 | 7c10b470de8d19261f10e958846d7efcfd751a09 | [
"MIT"
] | null | null | null | ## @package optimizer
# Module caffe2.python.optimizer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from caffe2.python import core
from caffe2.python.modeling import parameter_info
from caffe2.proto import caffe2_pb2
_OPTIMIZER_ITERATION_NAME = "optimizer_iteration"
AuxOptimizerParams = namedtuple("AuxOptimizerParams", ["local", "shared"])
class Optimizer(object):
def __init__(self):
self._aux_params = AuxOptimizerParams(local=[], shared=[])
'''
Adds optimization operators to the net for given parameter and its gradient
Parameter is specified by either 'param' being a ParameterInfo object.
In this case param.grad has to be set
Or by 'param' being a BlobReference and 'grad' being a BlobReference for its
gradient.
'''
def __call__(self, net, param_init_net, param, grad=None):
if grad is None:
assert isinstance(param, parameter_info.ParameterInfo)
assert param.grad is not None
else:
if isinstance(param, str):
param = core.BlobReference(param)
param = parameter_info.ParameterInfo(
param_id=None, param=param, grad=grad)
self._run(net, param_init_net, param)
def _run(self, net, param_init_net, param_info):
raise Exception("Not Impelemented")
@staticmethod
def build_lr(net, param_init_net, base_learning_rate,
learning_rate_blob="lr", policy="fixed",
iter_val=0, **kwargs):
if not param_init_net.BlobIsDefined(_OPTIMIZER_ITERATION_NAME):
# Add training operators.
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
iteration = param_init_net.ConstantFill(
[], _OPTIMIZER_ITERATION_NAME, shape=[1],
value=iter_val,
dtype=core.DataType.INT64)
iter_mutex = param_init_net.CreateMutex([], ["iteration_mutex"])
net.AtomicIter([iter_mutex, iteration], [iteration])
else:
iteration = param_init_net.GetBlobRef(_OPTIMIZER_ITERATION_NAME)
# There is one interesting thing here: since we are minimizing, we are
# doing "descent" so the learning rate is set to be negative.
lr = net.LearningRate(
[iteration],
learning_rate_blob,
base_lr=-base_learning_rate,
policy=policy,
**kwargs
)
return lr, iteration
@staticmethod
def dedup(net, sparse_dedup_aggregator, grad):
assert (isinstance(grad, core.GradientSlice))
if sparse_dedup_aggregator:
return net.DeduplicateGradientSlices(
grad, aggregator=sparse_dedup_aggregator)
else:
return grad
def get_auxiliary_parameters(self):
"""Returns a list of auxiliary parameters.
Returns:
aux_params: A namedtuple, AuxParams.
aux_params.local stores a list of blobs. Each blob is a local
auxiliary parameter. A local auxiliary parameter is a parameter in
parallel to a learning rate parameter. Take adagrad as an example,
the local auxiliary parameter is the squared sum parameter, because
every learning rate has a squared sum associated with it.
aux_params.shared also stores a list of blobs. Each blob is a shared
auxiliary parameter. A shared auxiliary parameter is a parameter
that is shared across all the learning rate parameters. Take adam as
an example, the iteration parameter is a shared parameter, because
all the learning rates share the same iteration parameter.
"""
return self._aux_params
# TODO(xlwang): In transfer learning, parameter initialized from pretrained
# model might require a different learning rate than otherwise initialized.
# To this end, here we implement a python solution where
# `base_learning_rate` is scaled by `scale`, by calling
# `scale_learning_rate`; Alternatively, we can achieve same effect by
# rewriting the LearningRate operator in C++
# Note that it is the responsibility of specific optimizer to decide what
# logic should be used for `scale_learning_rate`
def scale_learning_rate(self, *args, **kwargs):
raise NotImplementedError(
"Optimizer Need to Implement `scale_learning_rate` method.")
class SgdOptimizer(Optimizer):
def __init__(self, base_learning_rate=0.01, policy='fixed',
momentum=0.0, **kwargs):
super(SgdOptimizer, self).__init__()
self.base_learning_rate = base_learning_rate
self.policy = policy
self.momentum = momentum
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.base_learning_rate <= 0:
return
lr, _ = self.build_lr(
net, param_init_net,
base_learning_rate=self.base_learning_rate,
learning_rate_blob=str(param) + "_lr",
policy=self.policy,
**(self.init_kwargs)
)
ONE = param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
self._aux_params.shared.append(ONE)
if self.momentum > 0:
momentum_data = param_init_net.ConstantFill(
param, str(param) + "_momentum", value=0.)
self._aux_params.local.append(momentum_data)
if isinstance(grad, core.GradientSlice):
assert self.momentum == 0., "Doesn't support momentum for sparse"
net.ScatterWeightedSum(
[param, ONE, grad.indices, grad.values, lr],
param
)
else:
if self.momentum > 0.:
net.MomentumSGD(
[grad, momentum_data, lr], [grad, momentum_data],
momentum=self.momentum,
nesterov=1)
coeff = ONE
else:
coeff = lr
net.WeightedSum(
[param, ONE, grad, coeff],
param
)
def scale_learning_rate(self, scale):
self.base_learning_rate *= scale
return
class AdagradOptimizer(Optimizer):
def __init__(self, alpha=0.01, epsilon=1e-4, policy="fixed",
sparse_dedup_aggregator=None, engine='', **kwargs):
super(AdagradOptimizer, self).__init__()
self.alpha = alpha
self.epsilon = epsilon
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
lr, _ = self.build_lr(
net, param_init_net,
base_learning_rate=self.alpha,
learning_rate_blob=str(param) + "_lr",
policy=self.policy,
**(self.init_kwargs)
)
param_squared_sum = param_init_net.ConstantFill(
[param],
str(param) + "_squared_sum",
value=0.0
)
self._aux_params.local.append(param_squared_sum)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseAdagrad(
[param, param_squared_sum, grad.indices, grad.values, lr],
[param, param_squared_sum],
epsilon=self.epsilon,
engine=self.engine
)
else:
net.Adagrad(
[param, param_squared_sum, grad, lr],
[param, param_squared_sum],
epsilon=self.epsilon,
engine=self.engine
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class FtrlOptimizer(Optimizer):
def __init__(self, alpha=0.01, beta=1e-4, lambda1=0, lambda2=0,
sparse_dedup_aggregator=None, engine=''):
super(FtrlOptimizer, self).__init__()
self.alpha = alpha
self.beta = beta
self.lambda1 = lambda1
self.lambda2 = lambda2
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
nz = param_init_net.ConstantFill(
[param],
str(param) + "_ftrl_nz",
extra_shape=[2],
value=0.0
)
self._aux_params.local.append(nz)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseFtrl(
[param, nz, grad.indices, grad.values],
[param, nz],
engine=self.engine,
alpha=self.alpha,
beta=self.beta,
lambda1=self.lambda1,
lambda2=self.lambda2
)
else:
net.Ftrl(
[param, nz, grad],
[param, nz],
engine=self.engine,
alpha=self.alpha,
beta=self.beta,
lambda1=self.lambda1,
lambda2=self.lambda2
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class AdamOptimizer(Optimizer):
def __init__(self, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
policy='fixed', sparse_dedup_aggregator=None,
engine='', **kwargs):
super(AdamOptimizer, self).__init__()
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
lr, iteration = self.build_lr(
net, param_init_net,
base_learning_rate=self.alpha,
learning_rate_blob=str(param) + "_lr",
policy=self.policy,
**(self.init_kwargs)
)
m1 = param_init_net.ConstantFill(
[param],
param + "_first_moment",
value=0.0
)
m2 = param_init_net.ConstantFill(
[param],
param + "_second_moment",
value=0.0
)
self._aux_params.shared.append(iteration)
self._aux_params.local.append(m1)
self._aux_params.local.append(m2)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseAdam(
[param, m1, m2, grad.indices, grad.values, lr, iteration],
[param, m1, m2],
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon
)
else:
net.Adam(
[param, m1, m2, grad, lr, iteration],
[param, m1, m2],
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
def build_sgd(model, base_learning_rate, **kwargs):
sgd_optimizer = SgdOptimizer(base_learning_rate, **kwargs)
for param_info in model.GetOptimizationParamInfo():
sgd_optimizer(model.net, model.param_init_net, param_info)
return sgd_optimizer
def build_ftrl(model, engine="SIMD", **kwargs):
if engine == "SIMD":
assert core.IsOperator('Ftrl_ENGINE_SIMD')
assert core.IsOperator('SparseFtrl_ENGINE_SIMD')
ftrl_optimizer = FtrlOptimizer(engine=engine, **kwargs)
for param_info in model.GetOptimizationParamInfo():
ftrl_optimizer(model.net, model.param_init_net, param_info)
return ftrl_optimizer
def build_adagrad(model, base_learning_rate, parameters=None, **kwargs):
adagrad_optimizer = AdagradOptimizer(alpha=base_learning_rate, **kwargs)
for param_info in model.GetOptimizationParamInfo(parameters):
adagrad_optimizer(model.net, model.param_init_net, param_info)
return adagrad_optimizer
def build_adam(model, base_learning_rate, **kwargs):
adam_optimizer = AdamOptimizer(alpha=base_learning_rate, **kwargs)
for param_info in model.GetOptimizationParamInfo():
adam_optimizer(model.net, model.param_init_net, param_info)
return adam_optimizer
| 35.097035 | 80 | 0.607173 | port absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from caffe2.python import core
from caffe2.python.modeling import parameter_info
from caffe2.proto import caffe2_pb2
_OPTIMIZER_ITERATION_NAME = "optimizer_iteration"
AuxOptimizerParams = namedtuple("AuxOptimizerParams", ["local", "shared"])
class Optimizer(object):
def __init__(self):
self._aux_params = AuxOptimizerParams(local=[], shared=[])
def __call__(self, net, param_init_net, param, grad=None):
if grad is None:
assert isinstance(param, parameter_info.ParameterInfo)
assert param.grad is not None
else:
if isinstance(param, str):
param = core.BlobReference(param)
param = parameter_info.ParameterInfo(
param_id=None, param=param, grad=grad)
self._run(net, param_init_net, param)
def _run(self, net, param_init_net, param_info):
raise Exception("Not Impelemented")
@staticmethod
def build_lr(net, param_init_net, base_learning_rate,
learning_rate_blob="lr", policy="fixed",
iter_val=0, **kwargs):
if not param_init_net.BlobIsDefined(_OPTIMIZER_ITERATION_NAME):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
iteration = param_init_net.ConstantFill(
[], _OPTIMIZER_ITERATION_NAME, shape=[1],
value=iter_val,
dtype=core.DataType.INT64)
iter_mutex = param_init_net.CreateMutex([], ["iteration_mutex"])
net.AtomicIter([iter_mutex, iteration], [iteration])
else:
iteration = param_init_net.GetBlobRef(_OPTIMIZER_ITERATION_NAME)
lr = net.LearningRate(
[iteration],
learning_rate_blob,
base_lr=-base_learning_rate,
policy=policy,
**kwargs
)
return lr, iteration
@staticmethod
def dedup(net, sparse_dedup_aggregator, grad):
assert (isinstance(grad, core.GradientSlice))
if sparse_dedup_aggregator:
return net.DeduplicateGradientSlices(
grad, aggregator=sparse_dedup_aggregator)
else:
return grad
def get_auxiliary_parameters(self):
return self._aux_params
def scale_learning_rate(self, *args, **kwargs):
raise NotImplementedError(
"Optimizer Need to Implement `scale_learning_rate` method.")
class SgdOptimizer(Optimizer):
def __init__(self, base_learning_rate=0.01, policy='fixed',
momentum=0.0, **kwargs):
super(SgdOptimizer, self).__init__()
self.base_learning_rate = base_learning_rate
self.policy = policy
self.momentum = momentum
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.base_learning_rate <= 0:
return
lr, _ = self.build_lr(
net, param_init_net,
base_learning_rate=self.base_learning_rate,
learning_rate_blob=str(param) + "_lr",
policy=self.policy,
**(self.init_kwargs)
)
ONE = param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
self._aux_params.shared.append(ONE)
if self.momentum > 0:
momentum_data = param_init_net.ConstantFill(
param, str(param) + "_momentum", value=0.)
self._aux_params.local.append(momentum_data)
if isinstance(grad, core.GradientSlice):
assert self.momentum == 0., "Doesn't support momentum for sparse"
net.ScatterWeightedSum(
[param, ONE, grad.indices, grad.values, lr],
param
)
else:
if self.momentum > 0.:
net.MomentumSGD(
[grad, momentum_data, lr], [grad, momentum_data],
momentum=self.momentum,
nesterov=1)
coeff = ONE
else:
coeff = lr
net.WeightedSum(
[param, ONE, grad, coeff],
param
)
def scale_learning_rate(self, scale):
self.base_learning_rate *= scale
return
class AdagradOptimizer(Optimizer):
def __init__(self, alpha=0.01, epsilon=1e-4, policy="fixed",
sparse_dedup_aggregator=None, engine='', **kwargs):
super(AdagradOptimizer, self).__init__()
self.alpha = alpha
self.epsilon = epsilon
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
lr, _ = self.build_lr(
net, param_init_net,
base_learning_rate=self.alpha,
learning_rate_blob=str(param) + "_lr",
policy=self.policy,
**(self.init_kwargs)
)
param_squared_sum = param_init_net.ConstantFill(
[param],
str(param) + "_squared_sum",
value=0.0
)
self._aux_params.local.append(param_squared_sum)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseAdagrad(
[param, param_squared_sum, grad.indices, grad.values, lr],
[param, param_squared_sum],
epsilon=self.epsilon,
engine=self.engine
)
else:
net.Adagrad(
[param, param_squared_sum, grad, lr],
[param, param_squared_sum],
epsilon=self.epsilon,
engine=self.engine
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class FtrlOptimizer(Optimizer):
def __init__(self, alpha=0.01, beta=1e-4, lambda1=0, lambda2=0,
sparse_dedup_aggregator=None, engine=''):
super(FtrlOptimizer, self).__init__()
self.alpha = alpha
self.beta = beta
self.lambda1 = lambda1
self.lambda2 = lambda2
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
nz = param_init_net.ConstantFill(
[param],
str(param) + "_ftrl_nz",
extra_shape=[2],
value=0.0
)
self._aux_params.local.append(nz)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseFtrl(
[param, nz, grad.indices, grad.values],
[param, nz],
engine=self.engine,
alpha=self.alpha,
beta=self.beta,
lambda1=self.lambda1,
lambda2=self.lambda2
)
else:
net.Ftrl(
[param, nz, grad],
[param, nz],
engine=self.engine,
alpha=self.alpha,
beta=self.beta,
lambda1=self.lambda1,
lambda2=self.lambda2
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class AdamOptimizer(Optimizer):
def __init__(self, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
policy='fixed', sparse_dedup_aggregator=None,
engine='', **kwargs):
super(AdamOptimizer, self).__init__()
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
self.init_kwargs = kwargs
def _run(self, net, param_init_net, param_info):
param = param_info.blob
grad = param_info.grad
if self.alpha <= 0:
return
lr, iteration = self.build_lr(
net, param_init_net,
base_learning_rate=self.alpha,
learning_rate_blob=str(param) + "_lr",
policy=self.policy,
**(self.init_kwargs)
)
m1 = param_init_net.ConstantFill(
[param],
param + "_first_moment",
value=0.0
)
m2 = param_init_net.ConstantFill(
[param],
param + "_second_moment",
value=0.0
)
self._aux_params.shared.append(iteration)
self._aux_params.local.append(m1)
self._aux_params.local.append(m2)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseAdam(
[param, m1, m2, grad.indices, grad.values, lr, iteration],
[param, m1, m2],
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon
)
else:
net.Adam(
[param, m1, m2, grad, lr, iteration],
[param, m1, m2],
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
def build_sgd(model, base_learning_rate, **kwargs):
sgd_optimizer = SgdOptimizer(base_learning_rate, **kwargs)
for param_info in model.GetOptimizationParamInfo():
sgd_optimizer(model.net, model.param_init_net, param_info)
return sgd_optimizer
def build_ftrl(model, engine="SIMD", **kwargs):
if engine == "SIMD":
assert core.IsOperator('Ftrl_ENGINE_SIMD')
assert core.IsOperator('SparseFtrl_ENGINE_SIMD')
ftrl_optimizer = FtrlOptimizer(engine=engine, **kwargs)
for param_info in model.GetOptimizationParamInfo():
ftrl_optimizer(model.net, model.param_init_net, param_info)
return ftrl_optimizer
def build_adagrad(model, base_learning_rate, parameters=None, **kwargs):
adagrad_optimizer = AdagradOptimizer(alpha=base_learning_rate, **kwargs)
for param_info in model.GetOptimizationParamInfo(parameters):
adagrad_optimizer(model.net, model.param_init_net, param_info)
return adagrad_optimizer
def build_adam(model, base_learning_rate, **kwargs):
adam_optimizer = AdamOptimizer(alpha=base_learning_rate, **kwargs)
for param_info in model.GetOptimizationParamInfo():
adam_optimizer(model.net, model.param_init_net, param_info)
return adam_optimizer
| true | true |
f7ffba573bdea60f3487d8e32bf38901f2225870 | 31,706 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/netscaler/netscaler_service.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/netscaler/netscaler_service.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/netscaler/netscaler_service.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_service
short_description: Manage service configuration in Netscaler
description:
- Manage service configuration in Netscaler.
- This module allows the creation, deletion and modification of Netscaler services.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
- This module supports check mode.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
name:
description:
- >-
Name for the service. Must begin with an ASCII alphabetic or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters. Cannot be changed after the service has been created.
- "Minimum length = 1"
ip:
description:
- "IP to assign to the service."
- "Minimum length = 1"
servername:
description:
- "Name of the server that hosts the service."
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'DTLS'
- 'NNTP'
- 'RPCSVR'
- 'DNS'
- 'ADNS'
- 'SNMP'
- 'RTSP'
- 'DHCPRA'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'DNS_TCP'
- 'ADNS_TCP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
- 'RADIUS'
- 'RADIUSListener'
- 'RDP'
- 'DIAMETER'
- 'SSL_DIAMETER'
- 'TFTP'
- 'SMPP'
- 'PPTP'
- 'GRE'
- 'SYSLOGTCP'
- 'SYSLOGUDP'
- 'FIX'
- 'SSL_FIX'
description:
- "Protocol in which data is exchanged with the service."
port:
description:
- "Port number of the service."
- "Range 1 - 65535"
- "* in CLI is represented as 65535 in NITRO API"
cleartextport:
description:
- >-
Port to which clear text data must be sent after the appliance decrypts incoming SSL traffic.
Applicable to transparent SSL services.
- "Minimum value = 1"
cachetype:
choices:
- 'TRANSPARENT'
- 'REVERSE'
- 'FORWARD'
description:
- "Cache type supported by the cache server."
maxclient:
description:
- "Maximum number of simultaneous open connections to the service."
- "Minimum value = 0"
- "Maximum value = 4294967294"
healthmonitor:
description:
- "Monitor the health of this service"
default: yes
type: bool
maxreq:
description:
- "Maximum number of requests that can be sent on a persistent connection to the service."
- "Note: Connection requests beyond this value are rejected."
- "Minimum value = 0"
- "Maximum value = 65535"
cacheable:
description:
- "Use the transparent cache redirection virtual server to forward requests to the cache server."
- "Note: Do not specify this parameter if you set the Cache Type parameter."
default: no
type: bool
cip:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Before forwarding a request to the service, insert an HTTP header with the client's IPv4 or IPv6
address as its value. Used if the server needs the client's IP address for security, accounting, or
other purposes, and setting the Use Source IP parameter is not a viable option.
cipheader:
description:
- >-
Name for the HTTP header whose value must be set to the IP address of the client. Used with the
Client IP parameter. If you set the Client IP parameter, and you do not specify a name for the
header, the appliance uses the header name specified for the global Client IP Header parameter (the
cipHeader parameter in the set ns param CLI command or the Client IP Header parameter in the
Configure HTTP Parameters dialog box at System > Settings > Change HTTP parameters). If the global
Client IP Header parameter is not specified, the appliance inserts a header with the name
"client-ip.".
- "Minimum length = 1"
usip:
description:
- >-
Use the client's IP address as the source IP address when initiating a connection to the server. When
creating a service, if you do not set this parameter, the service inherits the global Use Source IP
setting (available in the enable ns mode and disable ns mode CLI commands, or in the System >
Settings > Configure modes > Configure Modes dialog box). However, you can override this setting
after you create the service.
type: bool
pathmonitor:
description:
- "Path monitoring for clustering."
pathmonitorindv:
description:
- "Individual Path monitoring decisions."
useproxyport:
description:
- >-
Use the proxy port as the source port when initiating connections with the server. With the NO
setting, the client-side connection port is used as the source port for the server-side connection.
- "Note: This parameter is available only when the Use Source IP (USIP) parameter is set to YES."
type: bool
sp:
description:
- "Enable surge protection for the service."
type: bool
rtspsessionidremap:
description:
- "Enable RTSP session ID mapping for the service."
default: off
type: bool
clttimeout:
description:
- "Time, in seconds, after which to terminate an idle client connection."
- "Minimum value = 0"
- "Maximum value = 31536000"
svrtimeout:
description:
- "Time, in seconds, after which to terminate an idle server connection."
- "Minimum value = 0"
- "Maximum value = 31536000"
customserverid:
description:
- >-
Unique identifier for the service. Used when the persistency type for the virtual server is set to
Custom Server ID.
default: 'None'
serverid:
description:
- "The identifier for the service. This is used when the persistency type is set to Custom Server ID."
cka:
description:
- "Enable client keep-alive for the service."
type: bool
tcpb:
description:
- "Enable TCP buffering for the service."
type: bool
cmp:
description:
- "Enable compression for the service."
type: bool
maxbandwidth:
description:
- "Maximum bandwidth, in Kbps, allocated to the service."
- "Minimum value = 0"
- "Maximum value = 4294967287"
accessdown:
description:
- >-
Use Layer 2 mode to bridge the packets sent to this service if it is marked as DOWN. If the service
is DOWN, and this parameter is disabled, the packets are dropped.
default: no
type: bool
monthreshold:
description:
- >-
Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to
mark a service as UP or DOWN.
- "Minimum value = 0"
- "Maximum value = 65535"
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with a service whose state transitions from UP to DOWN. Do
not enable this option for applications that must complete their transactions.
tcpprofilename:
description:
- "Name of the TCP profile that contains TCP configuration settings for the service."
- "Minimum length = 1"
- "Maximum length = 127"
httpprofilename:
description:
- "Name of the HTTP profile that contains HTTP configuration settings for the service."
- "Minimum length = 1"
- "Maximum length = 127"
hashid:
description:
- >-
A numerical identifier that can be used by hash based load balancing methods. Must be unique for each
service.
- "Minimum value = 1"
comment:
description:
- "Any information about the service."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging of AppFlow information."
netprofile:
description:
- "Network profile to use for the service."
- "Minimum length = 1"
- "Maximum length = 127"
td:
description:
- >-
Integer value that uniquely identifies the traffic domain in which you want to configure the entity.
If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID
of 0.
- "Minimum value = 0"
- "Maximum value = 4094"
processlocal:
choices:
- 'enabled'
- 'disabled'
description:
- >-
By turning on this option packets destined to a service in a cluster will not under go any steering.
Turn this option for single packet request response mode or when the upstream device is performing a
proper RSS for connection based distribution.
dnsprofilename:
description:
- >-
Name of the DNS profile to be associated with the service. DNS profile properties will applied to the
transactions processed by a service. This parameter is valid only for ADNS and ADNS-TCP services.
- "Minimum length = 1"
- "Maximum length = 127"
ipaddress:
description:
- "The new IP address of the service."
graceful:
description:
- >-
Shut down gracefully, not accepting any new connections, and disabling the service when all of its
connections are closed.
default: no
type: bool
monitor_bindings:
description:
- A list of load balancing monitors to bind to this service.
- Each monitor entry is a dictionary which may contain the following options.
- Note that if not using the built in monitors they must first be setup.
suboptions:
monitorname:
description:
- Name of the monitor.
weight:
description:
- Weight to assign to the binding between the monitor and service.
dup_state:
choices:
- 'enabled'
- 'disabled'
description:
- State of the monitor.
- The state setting for a monitor of a given type affects all monitors of that type.
- For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled.
- If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
dup_weight:
description:
- Weight to assign to the binding between the monitor and service.
disabled:
description:
- When set to C(yes) the service state will be set to DISABLED.
- When set to C(no) the service state will be set to ENABLED.
- >-
Note that due to limitations of the underlying NITRO API a C(disabled) state change alone
does not cause the module result to report a changed status.
type: bool
default: false
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
# Monitor monitor-1 must have been already setup
- name: Setup http service
gather_facts: False
delegate_to: localhost
netscaler_service:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
state: present
name: service-http-1
servicetype: HTTP
ipaddress: 10.78.0.1
port: 80
monitor_bindings:
- monitor-1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
diff:
description: A dictionary with a list of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: "{ 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' }"
'''
import copy
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service import service
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding import service_lbmonitor_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding import lbmonitor_service_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines,
get_immutables_intersection)
def service_exists(client, module):
if service.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def service_identical(client, module, service_proxy):
service_list = service.get_filtered(client, 'name:%s' % module.params['name'])
diff_dict = service_proxy.diff_object(service_list[0])
# the actual ip address is stored in the ipaddress attribute
# of the retrieved object
if 'ip' in diff_dict:
del diff_dict['ip']
if 'graceful' in diff_dict:
del diff_dict['graceful']
if len(diff_dict) == 0:
return True
else:
return False
def diff(client, module, service_proxy):
service_list = service.get_filtered(client, 'name:%s' % module.params['name'])
diff_object = service_proxy.diff_object(service_list[0])
if 'ip' in diff_object:
del diff_object['ip']
return diff_object
def get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs):
bindings = {}
if module.params['monitor_bindings'] is not None:
for binding in module.params['monitor_bindings']:
attribute_values_dict = copy.deepcopy(binding)
# attribute_values_dict['servicename'] = module.params['name']
attribute_values_dict['servicegroupname'] = module.params['name']
binding_proxy = ConfigProxy(
actual=lbmonitor_service_binding(),
client=client,
attribute_values_dict=attribute_values_dict,
readwrite_attrs=monitor_bindings_rw_attrs,
)
key = binding_proxy.monitorname
bindings[key] = binding_proxy
return bindings
def get_actual_monitor_bindings(client, module):
bindings = {}
if service_lbmonitor_binding.count(client, module.params['name']) == 0:
return bindings
# Fallthrough to rest of execution
for binding in service_lbmonitor_binding.get(client, module.params['name']):
# Excluding default monitors since we cannot operate on them
if binding.monitor_name in ('tcp-default', 'ping-default'):
continue
key = binding.monitor_name
actual = lbmonitor_service_binding()
actual.weight = binding.weight
actual.monitorname = binding.monitor_name
actual.dup_weight = binding.dup_weight
actual.servicename = module.params['name']
bindings[key] = actual
return bindings
def monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_key_set = set(configured_proxys.keys())
actual_key_set = set(actual_bindings.keys())
symmetrical_diff = configured_key_set ^ actual_key_set
if len(symmetrical_diff) > 0:
return False
# Compare key to key
for monitor_name in configured_key_set:
proxy = configured_proxys[monitor_name]
actual = actual_bindings[monitor_name]
diff_dict = proxy.diff_object(actual)
if 'servicegroupname' in diff_dict:
if proxy.servicegroupname == actual.servicename:
del diff_dict['servicegroupname']
if len(diff_dict) > 0:
return False
# Fallthrought to success
return True
def sync_monitor_bindings(client, module, monitor_bindings_rw_attrs):
configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_keyset = set(configured_proxys.keys())
actual_keyset = set(actual_bindings.keys())
# Delete extra
delete_keys = list(actual_keyset - configured_keyset)
for monitor_name in delete_keys:
log('Deleting binding for monitor %s' % monitor_name)
lbmonitor_service_binding.delete(client, actual_bindings[monitor_name])
# Delete and re-add modified
common_keyset = list(configured_keyset & actual_keyset)
for monitor_name in common_keyset:
proxy = configured_proxys[monitor_name]
actual = actual_bindings[monitor_name]
if not proxy.has_equal_attributes(actual):
log('Deleting and re adding binding for monitor %s' % monitor_name)
lbmonitor_service_binding.delete(client, actual)
proxy.add()
# Add new
new_keys = list(configured_keyset - actual_keyset)
for monitor_name in new_keys:
log('Adding binding for monitor %s' % monitor_name)
configured_proxys[monitor_name].add()
def all_identical(client, module, service_proxy, monitor_bindings_rw_attrs):
return service_identical(client, module, service_proxy) and monitor_bindings_identical(client, module, monitor_bindings_rw_attrs)
def do_state_change(client, module, service_proxy):
if module.params['disabled']:
log('Disabling service')
result = service.disable(client, service_proxy.actual)
else:
log('Enabling service')
result = service.enable(client, service_proxy.actual)
return result
def main():
module_specific_arguments = dict(
name=dict(type='str'),
ip=dict(type='str'),
servername=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'DTLS',
'NNTP',
'RPCSVR',
'DNS',
'ADNS',
'SNMP',
'RTSP',
'DHCPRA',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'DNS_TCP',
'ADNS_TCP',
'MYSQL',
'MSSQL',
'ORACLE',
'RADIUS',
'RADIUSListener',
'RDP',
'DIAMETER',
'SSL_DIAMETER',
'TFTP',
'SMPP',
'PPTP',
'GRE',
'SYSLOGTCP',
'SYSLOGUDP',
'FIX',
'SSL_FIX'
]
),
port=dict(type='int'),
cleartextport=dict(type='int'),
cachetype=dict(
type='str',
choices=[
'TRANSPARENT',
'REVERSE',
'FORWARD',
]
),
maxclient=dict(type='float'),
healthmonitor=dict(
type='bool',
default=True,
),
maxreq=dict(type='float'),
cacheable=dict(
type='bool',
default=False,
),
cip=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
cipheader=dict(type='str'),
usip=dict(type='bool'),
useproxyport=dict(type='bool'),
sp=dict(type='bool'),
rtspsessionidremap=dict(
type='bool',
default=False,
),
clttimeout=dict(type='float'),
svrtimeout=dict(type='float'),
customserverid=dict(
type='str',
default='None',
),
cka=dict(type='bool'),
tcpb=dict(type='bool'),
cmp=dict(type='bool'),
maxbandwidth=dict(type='float'),
accessdown=dict(
type='bool',
default=False
),
monthreshold=dict(type='float'),
downstateflush=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
tcpprofilename=dict(type='str'),
httpprofilename=dict(type='str'),
hashid=dict(type='float'),
comment=dict(type='str'),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
netprofile=dict(type='str'),
processlocal=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
dnsprofilename=dict(type='str'),
ipaddress=dict(type='str'),
graceful=dict(
type='bool',
default=False,
),
)
hand_inserted_arguments = dict(
monitor_bindings=dict(type='list'),
disabled=dict(
type='bool',
default=False,
),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Fallthrough to rest of execution
# Instantiate Service Config object
readwrite_attrs = [
'name',
'ip',
'servername',
'servicetype',
'port',
'cleartextport',
'cachetype',
'maxclient',
'healthmonitor',
'maxreq',
'cacheable',
'cip',
'cipheader',
'usip',
'useproxyport',
'sp',
'rtspsessionidremap',
'clttimeout',
'svrtimeout',
'customserverid',
'cka',
'tcpb',
'cmp',
'maxbandwidth',
'accessdown',
'monthreshold',
'downstateflush',
'tcpprofilename',
'httpprofilename',
'hashid',
'comment',
'appflowlog',
'netprofile',
'processlocal',
'dnsprofilename',
'ipaddress',
'graceful',
]
readonly_attrs = [
'numofconnections',
'policyname',
'serviceconftype',
'serviceconftype2',
'value',
'gslb',
'dup_state',
'publicip',
'publicport',
'svrstate',
'monitor_state',
'monstatcode',
'lastresponse',
'responsetime',
'riseapbrstatsmsgcode2',
'monstatparam1',
'monstatparam2',
'monstatparam3',
'statechangetimesec',
'statechangetimemsec',
'tickssincelaststatechange',
'stateupdatereason',
'clmonowner',
'clmonview',
'serviceipstr',
'oracleserverversion',
]
immutable_attrs = [
'name',
'ip',
'servername',
'servicetype',
'port',
'cleartextport',
'cachetype',
'cipheader',
'serverid',
'state',
'td',
'monitor_name_svc',
'riseapbrstatsmsgcode',
'all',
'Internal',
'newname',
]
transforms = {
'pathmonitorindv': ['bool_yes_no'],
'cacheable': ['bool_yes_no'],
'cka': ['bool_yes_no'],
'pathmonitor': ['bool_yes_no'],
'tcpb': ['bool_yes_no'],
'sp': ['bool_on_off'],
'graceful': ['bool_yes_no'],
'usip': ['bool_yes_no'],
'healthmonitor': ['bool_yes_no'],
'useproxyport': ['bool_yes_no'],
'rtspsessionidremap': ['bool_on_off'],
'accessdown': ['bool_yes_no'],
'cmp': ['bool_yes_no'],
'cip': [lambda v: v.upper()],
'downstateflush': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
'processlocal': [lambda v: v.upper()],
}
monitor_bindings_rw_attrs = [
'servicename',
'servicegroupname',
'dup_state',
'dup_weight',
'monitorname',
'weight',
]
# Translate module arguments to correspondign config object attributes
if module.params['ip'] is None:
module.params['ip'] = module.params['ipaddress']
service_proxy = ConfigProxy(
actual=service(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying actions for state present')
if not service_exists(client, module):
if not module.check_mode:
service_proxy.add()
sync_monitor_bindings(client, module, monitor_bindings_rw_attrs)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not all_identical(client, module, service_proxy, monitor_bindings_rw_attrs):
# Check if we try to change value of immutable attributes
diff_dict = diff(client, module, service_proxy)
immutables_changed = get_immutables_intersection(service_proxy, diff_dict.keys())
if immutables_changed != []:
msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff_dict, **module_result)
# Service sync
if not service_identical(client, module, service_proxy):
if not module.check_mode:
service_proxy.update()
# Monitor bindings sync
if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
if not module.check_mode:
sync_monitor_bindings(client, module, monitor_bindings_rw_attrs)
module_result['changed'] = True
if not module.check_mode:
if module.params['save_config']:
client.save_config()
else:
module_result['changed'] = False
if not module.check_mode:
res = do_state_change(client, module, service_proxy)
if res.errorcode != 0:
msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message)
module.fail_json(msg=msg, **module_result)
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not service_exists(client, module):
module.fail_json(msg='Service does not exist', **module_result)
if not service_identical(client, module, service_proxy):
module.fail_json(msg='Service differs from configured', diff=diff(client, module, service_proxy), **module_result)
if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
module.fail_json(msg='Monitor bindings are not identical', **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if service_exists(client, module):
if not module.check_mode:
service_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if service_exists(client, module):
module.fail_json(msg='Service still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| 32.890041 | 144 | 0.570933 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_service
short_description: Manage service configuration in Netscaler
description:
- Manage service configuration in Netscaler.
- This module allows the creation, deletion and modification of Netscaler services.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
- This module supports check mode.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
name:
description:
- >-
Name for the service. Must begin with an ASCII alphabetic or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters. Cannot be changed after the service has been created.
- "Minimum length = 1"
ip:
description:
- "IP to assign to the service."
- "Minimum length = 1"
servername:
description:
- "Name of the server that hosts the service."
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'DTLS'
- 'NNTP'
- 'RPCSVR'
- 'DNS'
- 'ADNS'
- 'SNMP'
- 'RTSP'
- 'DHCPRA'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'DNS_TCP'
- 'ADNS_TCP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
- 'RADIUS'
- 'RADIUSListener'
- 'RDP'
- 'DIAMETER'
- 'SSL_DIAMETER'
- 'TFTP'
- 'SMPP'
- 'PPTP'
- 'GRE'
- 'SYSLOGTCP'
- 'SYSLOGUDP'
- 'FIX'
- 'SSL_FIX'
description:
- "Protocol in which data is exchanged with the service."
port:
description:
- "Port number of the service."
- "Range 1 - 65535"
- "* in CLI is represented as 65535 in NITRO API"
cleartextport:
description:
- >-
Port to which clear text data must be sent after the appliance decrypts incoming SSL traffic.
Applicable to transparent SSL services.
- "Minimum value = 1"
cachetype:
choices:
- 'TRANSPARENT'
- 'REVERSE'
- 'FORWARD'
description:
- "Cache type supported by the cache server."
maxclient:
description:
- "Maximum number of simultaneous open connections to the service."
- "Minimum value = 0"
- "Maximum value = 4294967294"
healthmonitor:
description:
- "Monitor the health of this service"
default: yes
type: bool
maxreq:
description:
- "Maximum number of requests that can be sent on a persistent connection to the service."
- "Note: Connection requests beyond this value are rejected."
- "Minimum value = 0"
- "Maximum value = 65535"
cacheable:
description:
- "Use the transparent cache redirection virtual server to forward requests to the cache server."
- "Note: Do not specify this parameter if you set the Cache Type parameter."
default: no
type: bool
cip:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Before forwarding a request to the service, insert an HTTP header with the client's IPv4 or IPv6
address as its value. Used if the server needs the client's IP address for security, accounting, or
other purposes, and setting the Use Source IP parameter is not a viable option.
cipheader:
description:
- >-
Name for the HTTP header whose value must be set to the IP address of the client. Used with the
Client IP parameter. If you set the Client IP parameter, and you do not specify a name for the
header, the appliance uses the header name specified for the global Client IP Header parameter (the
cipHeader parameter in the set ns param CLI command or the Client IP Header parameter in the
Configure HTTP Parameters dialog box at System > Settings > Change HTTP parameters). If the global
Client IP Header parameter is not specified, the appliance inserts a header with the name
"client-ip.".
- "Minimum length = 1"
usip:
description:
- >-
Use the client's IP address as the source IP address when initiating a connection to the server. When
creating a service, if you do not set this parameter, the service inherits the global Use Source IP
setting (available in the enable ns mode and disable ns mode CLI commands, or in the System >
Settings > Configure modes > Configure Modes dialog box). However, you can override this setting
after you create the service.
type: bool
pathmonitor:
description:
- "Path monitoring for clustering."
pathmonitorindv:
description:
- "Individual Path monitoring decisions."
useproxyport:
description:
- >-
Use the proxy port as the source port when initiating connections with the server. With the NO
setting, the client-side connection port is used as the source port for the server-side connection.
- "Note: This parameter is available only when the Use Source IP (USIP) parameter is set to YES."
type: bool
sp:
description:
- "Enable surge protection for the service."
type: bool
rtspsessionidremap:
description:
- "Enable RTSP session ID mapping for the service."
default: off
type: bool
clttimeout:
description:
- "Time, in seconds, after which to terminate an idle client connection."
- "Minimum value = 0"
- "Maximum value = 31536000"
svrtimeout:
description:
- "Time, in seconds, after which to terminate an idle server connection."
- "Minimum value = 0"
- "Maximum value = 31536000"
customserverid:
description:
- >-
Unique identifier for the service. Used when the persistency type for the virtual server is set to
Custom Server ID.
default: 'None'
serverid:
description:
- "The identifier for the service. This is used when the persistency type is set to Custom Server ID."
cka:
description:
- "Enable client keep-alive for the service."
type: bool
tcpb:
description:
- "Enable TCP buffering for the service."
type: bool
cmp:
description:
- "Enable compression for the service."
type: bool
maxbandwidth:
description:
- "Maximum bandwidth, in Kbps, allocated to the service."
- "Minimum value = 0"
- "Maximum value = 4294967287"
accessdown:
description:
- >-
Use Layer 2 mode to bridge the packets sent to this service if it is marked as DOWN. If the service
is DOWN, and this parameter is disabled, the packets are dropped.
default: no
type: bool
monthreshold:
description:
- >-
Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to
mark a service as UP or DOWN.
- "Minimum value = 0"
- "Maximum value = 65535"
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with a service whose state transitions from UP to DOWN. Do
not enable this option for applications that must complete their transactions.
tcpprofilename:
description:
- "Name of the TCP profile that contains TCP configuration settings for the service."
- "Minimum length = 1"
- "Maximum length = 127"
httpprofilename:
description:
- "Name of the HTTP profile that contains HTTP configuration settings for the service."
- "Minimum length = 1"
- "Maximum length = 127"
hashid:
description:
- >-
A numerical identifier that can be used by hash based load balancing methods. Must be unique for each
service.
- "Minimum value = 1"
comment:
description:
- "Any information about the service."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging of AppFlow information."
netprofile:
description:
- "Network profile to use for the service."
- "Minimum length = 1"
- "Maximum length = 127"
td:
description:
- >-
Integer value that uniquely identifies the traffic domain in which you want to configure the entity.
If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID
of 0.
- "Minimum value = 0"
- "Maximum value = 4094"
processlocal:
choices:
- 'enabled'
- 'disabled'
description:
- >-
By turning on this option packets destined to a service in a cluster will not under go any steering.
Turn this option for single packet request response mode or when the upstream device is performing a
proper RSS for connection based distribution.
dnsprofilename:
description:
- >-
Name of the DNS profile to be associated with the service. DNS profile properties will applied to the
transactions processed by a service. This parameter is valid only for ADNS and ADNS-TCP services.
- "Minimum length = 1"
- "Maximum length = 127"
ipaddress:
description:
- "The new IP address of the service."
graceful:
description:
- >-
Shut down gracefully, not accepting any new connections, and disabling the service when all of its
connections are closed.
default: no
type: bool
monitor_bindings:
description:
- A list of load balancing monitors to bind to this service.
- Each monitor entry is a dictionary which may contain the following options.
- Note that if not using the built in monitors they must first be setup.
suboptions:
monitorname:
description:
- Name of the monitor.
weight:
description:
- Weight to assign to the binding between the monitor and service.
dup_state:
choices:
- 'enabled'
- 'disabled'
description:
- State of the monitor.
- The state setting for a monitor of a given type affects all monitors of that type.
- For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled.
- If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
dup_weight:
description:
- Weight to assign to the binding between the monitor and service.
disabled:
description:
- When set to C(yes) the service state will be set to DISABLED.
- When set to C(no) the service state will be set to ENABLED.
- >-
Note that due to limitations of the underlying NITRO API a C(disabled) state change alone
does not cause the module result to report a changed status.
type: bool
default: false
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
# Monitor monitor-1 must have been already setup
- name: Setup http service
gather_facts: False
delegate_to: localhost
netscaler_service:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
state: present
name: service-http-1
servicetype: HTTP
ipaddress: 10.78.0.1
port: 80
monitor_bindings:
- monitor-1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
diff:
description: A dictionary with a list of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: "{ 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' }"
'''
import copy
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service import service
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding import service_lbmonitor_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding import lbmonitor_service_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines,
get_immutables_intersection)
def service_exists(client, module):
if service.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def service_identical(client, module, service_proxy):
service_list = service.get_filtered(client, 'name:%s' % module.params['name'])
diff_dict = service_proxy.diff_object(service_list[0])
# the actual ip address is stored in the ipaddress attribute
# of the retrieved object
if 'ip' in diff_dict:
del diff_dict['ip']
if 'graceful' in diff_dict:
del diff_dict['graceful']
if len(diff_dict) == 0:
return True
else:
return False
def diff(client, module, service_proxy):
service_list = service.get_filtered(client, 'name:%s' % module.params['name'])
diff_object = service_proxy.diff_object(service_list[0])
if 'ip' in diff_object:
del diff_object['ip']
return diff_object
def get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs):
bindings = {}
if module.params['monitor_bindings'] is not None:
for binding in module.params['monitor_bindings']:
attribute_values_dict = copy.deepcopy(binding)
# attribute_values_dict['servicename'] = module.params['name']
attribute_values_dict['servicegroupname'] = module.params['name']
binding_proxy = ConfigProxy(
actual=lbmonitor_service_binding(),
client=client,
attribute_values_dict=attribute_values_dict,
readwrite_attrs=monitor_bindings_rw_attrs,
)
key = binding_proxy.monitorname
bindings[key] = binding_proxy
return bindings
def get_actual_monitor_bindings(client, module):
bindings = {}
if service_lbmonitor_binding.count(client, module.params['name']) == 0:
return bindings
# Fallthrough to rest of execution
for binding in service_lbmonitor_binding.get(client, module.params['name']):
# Excluding default monitors since we cannot operate on them
if binding.monitor_name in ('tcp-default', 'ping-default'):
continue
key = binding.monitor_name
actual = lbmonitor_service_binding()
actual.weight = binding.weight
actual.monitorname = binding.monitor_name
actual.dup_weight = binding.dup_weight
actual.servicename = module.params['name']
bindings[key] = actual
return bindings
def monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_key_set = set(configured_proxys.keys())
actual_key_set = set(actual_bindings.keys())
symmetrical_diff = configured_key_set ^ actual_key_set
if len(symmetrical_diff) > 0:
return False
# Compare key to key
for monitor_name in configured_key_set:
proxy = configured_proxys[monitor_name]
actual = actual_bindings[monitor_name]
diff_dict = proxy.diff_object(actual)
if 'servicegroupname' in diff_dict:
if proxy.servicegroupname == actual.servicename:
del diff_dict['servicegroupname']
if len(diff_dict) > 0:
return False
# Fallthrought to success
return True
def sync_monitor_bindings(client, module, monitor_bindings_rw_attrs):
configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_keyset = set(configured_proxys.keys())
actual_keyset = set(actual_bindings.keys())
# Delete extra
delete_keys = list(actual_keyset - configured_keyset)
for monitor_name in delete_keys:
log('Deleting binding for monitor %s' % monitor_name)
lbmonitor_service_binding.delete(client, actual_bindings[monitor_name])
# Delete and re-add modified
common_keyset = list(configured_keyset & actual_keyset)
for monitor_name in common_keyset:
proxy = configured_proxys[monitor_name]
actual = actual_bindings[monitor_name]
if not proxy.has_equal_attributes(actual):
log('Deleting and re adding binding for monitor %s' % monitor_name)
lbmonitor_service_binding.delete(client, actual)
proxy.add()
# Add new
new_keys = list(configured_keyset - actual_keyset)
for monitor_name in new_keys:
log('Adding binding for monitor %s' % monitor_name)
configured_proxys[monitor_name].add()
def all_identical(client, module, service_proxy, monitor_bindings_rw_attrs):
return service_identical(client, module, service_proxy) and monitor_bindings_identical(client, module, monitor_bindings_rw_attrs)
def do_state_change(client, module, service_proxy):
if module.params['disabled']:
log('Disabling service')
result = service.disable(client, service_proxy.actual)
else:
log('Enabling service')
result = service.enable(client, service_proxy.actual)
return result
def main():
module_specific_arguments = dict(
name=dict(type='str'),
ip=dict(type='str'),
servername=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'DTLS',
'NNTP',
'RPCSVR',
'DNS',
'ADNS',
'SNMP',
'RTSP',
'DHCPRA',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'DNS_TCP',
'ADNS_TCP',
'MYSQL',
'MSSQL',
'ORACLE',
'RADIUS',
'RADIUSListener',
'RDP',
'DIAMETER',
'SSL_DIAMETER',
'TFTP',
'SMPP',
'PPTP',
'GRE',
'SYSLOGTCP',
'SYSLOGUDP',
'FIX',
'SSL_FIX'
]
),
port=dict(type='int'),
cleartextport=dict(type='int'),
cachetype=dict(
type='str',
choices=[
'TRANSPARENT',
'REVERSE',
'FORWARD',
]
),
maxclient=dict(type='float'),
healthmonitor=dict(
type='bool',
default=True,
),
maxreq=dict(type='float'),
cacheable=dict(
type='bool',
default=False,
),
cip=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
cipheader=dict(type='str'),
usip=dict(type='bool'),
useproxyport=dict(type='bool'),
sp=dict(type='bool'),
rtspsessionidremap=dict(
type='bool',
default=False,
),
clttimeout=dict(type='float'),
svrtimeout=dict(type='float'),
customserverid=dict(
type='str',
default='None',
),
cka=dict(type='bool'),
tcpb=dict(type='bool'),
cmp=dict(type='bool'),
maxbandwidth=dict(type='float'),
accessdown=dict(
type='bool',
default=False
),
monthreshold=dict(type='float'),
downstateflush=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
tcpprofilename=dict(type='str'),
httpprofilename=dict(type='str'),
hashid=dict(type='float'),
comment=dict(type='str'),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
netprofile=dict(type='str'),
processlocal=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
dnsprofilename=dict(type='str'),
ipaddress=dict(type='str'),
graceful=dict(
type='bool',
default=False,
),
)
hand_inserted_arguments = dict(
monitor_bindings=dict(type='list'),
disabled=dict(
type='bool',
default=False,
),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Fallthrough to rest of execution
# Instantiate Service Config object
readwrite_attrs = [
'name',
'ip',
'servername',
'servicetype',
'port',
'cleartextport',
'cachetype',
'maxclient',
'healthmonitor',
'maxreq',
'cacheable',
'cip',
'cipheader',
'usip',
'useproxyport',
'sp',
'rtspsessionidremap',
'clttimeout',
'svrtimeout',
'customserverid',
'cka',
'tcpb',
'cmp',
'maxbandwidth',
'accessdown',
'monthreshold',
'downstateflush',
'tcpprofilename',
'httpprofilename',
'hashid',
'comment',
'appflowlog',
'netprofile',
'processlocal',
'dnsprofilename',
'ipaddress',
'graceful',
]
readonly_attrs = [
'numofconnections',
'policyname',
'serviceconftype',
'serviceconftype2',
'value',
'gslb',
'dup_state',
'publicip',
'publicport',
'svrstate',
'monitor_state',
'monstatcode',
'lastresponse',
'responsetime',
'riseapbrstatsmsgcode2',
'monstatparam1',
'monstatparam2',
'monstatparam3',
'statechangetimesec',
'statechangetimemsec',
'tickssincelaststatechange',
'stateupdatereason',
'clmonowner',
'clmonview',
'serviceipstr',
'oracleserverversion',
]
immutable_attrs = [
'name',
'ip',
'servername',
'servicetype',
'port',
'cleartextport',
'cachetype',
'cipheader',
'serverid',
'state',
'td',
'monitor_name_svc',
'riseapbrstatsmsgcode',
'all',
'Internal',
'newname',
]
transforms = {
'pathmonitorindv': ['bool_yes_no'],
'cacheable': ['bool_yes_no'],
'cka': ['bool_yes_no'],
'pathmonitor': ['bool_yes_no'],
'tcpb': ['bool_yes_no'],
'sp': ['bool_on_off'],
'graceful': ['bool_yes_no'],
'usip': ['bool_yes_no'],
'healthmonitor': ['bool_yes_no'],
'useproxyport': ['bool_yes_no'],
'rtspsessionidremap': ['bool_on_off'],
'accessdown': ['bool_yes_no'],
'cmp': ['bool_yes_no'],
'cip': [lambda v: v.upper()],
'downstateflush': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
'processlocal': [lambda v: v.upper()],
}
monitor_bindings_rw_attrs = [
'servicename',
'servicegroupname',
'dup_state',
'dup_weight',
'monitorname',
'weight',
]
# Translate module arguments to correspondign config object attributes
if module.params['ip'] is None:
module.params['ip'] = module.params['ipaddress']
service_proxy = ConfigProxy(
actual=service(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying actions for state present')
if not service_exists(client, module):
if not module.check_mode:
service_proxy.add()
sync_monitor_bindings(client, module, monitor_bindings_rw_attrs)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not all_identical(client, module, service_proxy, monitor_bindings_rw_attrs):
# Check if we try to change value of immutable attributes
diff_dict = diff(client, module, service_proxy)
immutables_changed = get_immutables_intersection(service_proxy, diff_dict.keys())
if immutables_changed != []:
msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff_dict, **module_result)
# Service sync
if not service_identical(client, module, service_proxy):
if not module.check_mode:
service_proxy.update()
# Monitor bindings sync
if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
if not module.check_mode:
sync_monitor_bindings(client, module, monitor_bindings_rw_attrs)
module_result['changed'] = True
if not module.check_mode:
if module.params['save_config']:
client.save_config()
else:
module_result['changed'] = False
if not module.check_mode:
res = do_state_change(client, module, service_proxy)
if res.errorcode != 0:
msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message)
module.fail_json(msg=msg, **module_result)
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not service_exists(client, module):
module.fail_json(msg='Service does not exist', **module_result)
if not service_identical(client, module, service_proxy):
module.fail_json(msg='Service differs from configured', diff=diff(client, module, service_proxy), **module_result)
if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
module.fail_json(msg='Monitor bindings are not identical', **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if service_exists(client, module):
if not module.check_mode:
service_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if service_exists(client, module):
module.fail_json(msg='Service still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| true | true |
f7ffba5c041af60da82ca90652319fd5556eae04 | 1,038 | py | Python | plots/test 1 total count.py | IvanIFChen/Passiotic | f6a035fad5c6a6372721e2f74f9abdc98d0ffe67 | [
"MIT"
] | 1 | 2019-03-15T15:45:46.000Z | 2019-03-15T15:45:46.000Z | plots/test 1 total count.py | cjcodell1/Passiotic | f6a035fad5c6a6372721e2f74f9abdc98d0ffe67 | [
"MIT"
] | null | null | null | plots/test 1 total count.py | cjcodell1/Passiotic | f6a035fad5c6a6372721e2f74f9abdc98d0ffe67 | [
"MIT"
] | 1 | 2019-07-03T15:49:51.000Z | 2019-07-03T15:49:51.000Z | import matplotlib.pyplot as plt
import json
from dateutil import parser
from pprint import pprint
from collections import defaultdict
from datetime import datetime, timedelta
def load_from_file(filename):
data = defaultdict(int)
raw_data = []
with open(filename, 'r') as f:
for line in f.readlines():
x = json.loads(line)
date = parser.parse(x['end_time']['s'])
pi = x['pi_id']['s']
if pi == 'pi_2':
date = date - timedelta(minutes=6)
raw_data.append(date.minute)
for d in raw_data:
data[d] += 1
return data
if __name__ == '__main__':
data = load_from_file('dynamo_exports/First-Test-Snell-2nd-Floor')
data = [(key, data[key]) for key in data.keys()]
data.sort(key=lambda x: x[0])
f, ax = plt.subplots(1)
ydata = [x[1] for x in data]
xdata = ['11:{} AM'.format(x[0]) for x in data]
ax.plot(xdata, ydata, label='total devices')
ax.set_ylim(bottom=0, top=100)
plt.legend()
plt.show()
| 28.833333 | 70 | 0.60501 | import matplotlib.pyplot as plt
import json
from dateutil import parser
from pprint import pprint
from collections import defaultdict
from datetime import datetime, timedelta
def load_from_file(filename):
data = defaultdict(int)
raw_data = []
with open(filename, 'r') as f:
for line in f.readlines():
x = json.loads(line)
date = parser.parse(x['end_time']['s'])
pi = x['pi_id']['s']
if pi == 'pi_2':
date = date - timedelta(minutes=6)
raw_data.append(date.minute)
for d in raw_data:
data[d] += 1
return data
if __name__ == '__main__':
data = load_from_file('dynamo_exports/First-Test-Snell-2nd-Floor')
data = [(key, data[key]) for key in data.keys()]
data.sort(key=lambda x: x[0])
f, ax = plt.subplots(1)
ydata = [x[1] for x in data]
xdata = ['11:{} AM'.format(x[0]) for x in data]
ax.plot(xdata, ydata, label='total devices')
ax.set_ylim(bottom=0, top=100)
plt.legend()
plt.show()
| true | true |
f7ffbb10489dfcbfb2d4ef2719161bc10a92cca6 | 1,553 | py | Python | 3. Algorithms/04. String Process Algorithm/KMP Algorithm Design/kmp_algorithm.py | oneonlee/Computer-Science | 4a3e2bf92986b5db3967d788832bca353fe71e61 | [
"MIT"
] | 1 | 2021-10-19T20:06:55.000Z | 2021-10-19T20:06:55.000Z | 3. Algorithms/04. String Process Algorithm/KMP Algorithm Design/kmp_algorithm.py | oneonlee/Computer-Science | 4a3e2bf92986b5db3967d788832bca353fe71e61 | [
"MIT"
] | null | null | null | 3. Algorithms/04. String Process Algorithm/KMP Algorithm Design/kmp_algorithm.py | oneonlee/Computer-Science | 4a3e2bf92986b5db3967d788832bca353fe71e61 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""kmp_algorithm.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1wsUiWXaWKuIxv-udYnxqV463PD4z2ps-
"""
def init_next(pattern):
M = len(pattern)
next = [0 for _ in range(M)] # 정보 저장용 배열
j = 0 # 배열의 값을 불러오고, 패턴의 인덱스에 접근
for i in range(1, M): # 배열에 값 저장하기 위해 활용하는 인덱스
# j가 0이 되거나, i와 j의 pattern 접근 값이 같아질 때까지 진행
while j > 0 and pattern[i] != pattern[j]:
j = next[j-1] # 이전의 일치한 곳까지 돌아가서 다시 비교
# 값이 일치하는 경우,
if pattern[i] == pattern[j] :
# j의 값을 1 증가시키고 그 값을 next에 갱신
j += 1
next[i] = j
return next
def KMP(pattern, text):
M = len(pattern)
N = len(text)
next = init_next(pattern)
j = 0
for i in range(N):
# 단어와 패턴이 일치하지 않을 때
while j > 0 and text[i] != pattern[j] :
j = next[j-1] # 이전의 일치한 곳까지 돌아가서 다시 비교
# 만약 j가 패턴의 끝까지 도달하였다면,
if text[i] == pattern[j]:
if j == M - 1 :
print("패턴이 발생한 위치:", i - (M - 1))
j = next[j] # 위치를 옮겨주고 다시 탐색
else: # 해당 인덱스에서 값이 일치한다면, j를 1 증가시킴
j += 1
print("탐색 종료")
text1 = "ababababcababababcaabbabababcaab"
pattern1 = "abababca"
KMP(pattern1, text1)
text2 = "This class is an algorithm design class. Therefore, students will have time to learn about algorithms and implement each algorithm themselves."
pattern2 = "algorithm"
KMP(pattern2, text2)
| 26.775862 | 152 | 0.553123 |
def init_next(pattern):
M = len(pattern)
next = [0 for _ in range(M)]
j = 0
for i in range(1, M):
while j > 0 and pattern[i] != pattern[j]:
j = next[j-1]
if pattern[i] == pattern[j] :
j += 1
next[i] = j
return next
def KMP(pattern, text):
M = len(pattern)
N = len(text)
next = init_next(pattern)
j = 0
for i in range(N):
while j > 0 and text[i] != pattern[j] :
j = next[j-1]
if text[i] == pattern[j]:
if j == M - 1 :
print("패턴이 발생한 위치:", i - (M - 1))
j = next[j]
else:
j += 1
print("탐색 종료")
text1 = "ababababcababababcaabbabababcaab"
pattern1 = "abababca"
KMP(pattern1, text1)
text2 = "This class is an algorithm design class. Therefore, students will have time to learn about algorithms and implement each algorithm themselves."
pattern2 = "algorithm"
KMP(pattern2, text2)
| true | true |
f7ffbd76818cc21e689c35f422047192d559c410 | 766 | py | Python | bin/test-server.py | phorward/flare | 89a20bd1fb5ef7d0deebbd1f76c58a063e86f41e | [
"MIT"
] | null | null | null | bin/test-server.py | phorward/flare | 89a20bd1fb5ef7d0deebbd1f76c58a063e86f41e | [
"MIT"
] | null | null | null | bin/test-server.py | phorward/flare | 89a20bd1fb5ef7d0deebbd1f76c58a063e86f41e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import socketserver
from http.server import SimpleHTTPRequestHandler
class pyodideHttpServer(SimpleHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.extensions_map.update(
{
".wasm": "application/wasm",
}
)
super().__init__(request, client_address, server)
def end_headers(self):
# self.send_header("Access-Control-Allow-Origin", "*")
super().end_headers()
port = 8080
Handler = pyodideHttpServer
socketserver.TCPServer.allow_reuse_address = True
with socketserver.TCPServer(("", port), Handler) as httpd:
print("http://localhost:%d" % port)
httpd.allow_reuse_address = True
httpd.serve_forever()
| 25.533333 | 62 | 0.67624 |
import os
import socketserver
from http.server import SimpleHTTPRequestHandler
class pyodideHttpServer(SimpleHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.extensions_map.update(
{
".wasm": "application/wasm",
}
)
super().__init__(request, client_address, server)
def end_headers(self):
super().end_headers()
port = 8080
Handler = pyodideHttpServer
socketserver.TCPServer.allow_reuse_address = True
with socketserver.TCPServer(("", port), Handler) as httpd:
print("http://localhost:%d" % port)
httpd.allow_reuse_address = True
httpd.serve_forever()
| true | true |
f7ffbe1d06bacea70998662dd496f56a97c01054 | 4,410 | py | Python | src/cosalib/cli.py | nikita-dubrovskii/coreos-assembler | 378fb9d7670a32b1c16835739e82c15bb0e8f6aa | [
"Apache-2.0"
] | null | null | null | src/cosalib/cli.py | nikita-dubrovskii/coreos-assembler | 378fb9d7670a32b1c16835739e82c15bb0e8f6aa | [
"Apache-2.0"
] | null | null | null | src/cosalib/cli.py | nikita-dubrovskii/coreos-assembler | 378fb9d7670a32b1c16835739e82c15bb0e8f6aa | [
"Apache-2.0"
] | null | null | null | # NOTE: PYTHONUNBUFFERED is set in the entrypoint for unbuffered output
# pylint: disable=C0103
import argparse
import logging as log
import os
from cosalib import (
aliyun,
aws,
azure,
gcp,
vultr,
exoscale
)
CLOUD_CLI_TARGET = {
"aws": (aws.aws_cli,
aws.aws_run_ore,
aws.aws_run_ore_replicate),
"aliyun": (aliyun.aliyun_cli,
aliyun.aliyun_run_ore,
aliyun.aliyun_run_ore_replicate),
"azure": (azure.azure_cli,
azure.azure_run_ore,
azure.azure_run_ore_replicate),
"gcp": (gcp.gcp_cli,
gcp.gcp_run_ore,
gcp.gcp_run_ore_replicate),
"vultr": (vultr.vultr_cli,
vultr.vultr_run_ore,
vultr.vultr_run_ore_replicate),
"exoscale": (exoscale.exoscale_cli,
exoscale.exoscale_run_ore,
exoscale.exoscale_run_ore_replicate),
}
def cloud_clis():
return CLOUD_CLI_TARGET.keys()
def get_cloud_ore_cmds(target):
_, orecmd, orerep = CLOUD_CLI_TARGET[target]
return orecmd, orerep
def get_cloud_cli(target, parser=None):
if parser is None:
parser = BuildCli()
cli_func, _, _ = CLOUD_CLI_TARGET[target]
return cli_func(parser)
class Cli(argparse.ArgumentParser):
"""
Abstraction for executing commands from the cli.
"""
def __init__(self, *args, **kwargs):
"""
Initializes the Cli instance.
:param kwargs: All keyword arguments which will pass to ArgumentParser
:type kwargs: dict
"""
argparse.ArgumentParser.__init__(self, *args, **kwargs)
self.add_argument(
'--log-level', env_var='COSA_LOG_LEVEL', default='INFO',
choices=log._nameToLevel.keys(), help='Set the log level')
def add_argument(self, *args, **kwargs):
"""
Overloads the add_argument to be able to also read from
the environment. To read from the environment provide
the keyword arugment env_var.
:param args: Non keyword arguments to pass to add_argument
:type args: list
:param kwargs: Keyword arguments to pass to add_argument
:type kwargs: dict
"""
env_var = kwargs.pop('env_var', None)
if env_var is not None:
if not env_var.startswith('COSA_'):
env_var = f"COSA_{env_var}"
ka = kwargs.get("help", '')
kwargs['help'] = f"{ka} (Env: {env_var})"
default = kwargs.pop('default', None)
super().add_argument(
*args, default=os.environ.get(env_var, default), **kwargs)
else:
super().add_argument(*args, **kwargs)
def parse_args(self, **kwargs):
"""
Parses the arguments passed in, verifies inputs, sets the logger,
and returns the arguments.
:returns: The parsed arguments
:rtype: argparse.Namepsace
"""
args = super().parse_args()
self._set_logger(args.log_level)
return args
def _set_logger(self, level):
"""
Set the log level
:param level: set the log level
:type level: str
"""
log.basicConfig(
format='[%(levelname)s]: %(message)s',
level=log._nameToLevel.get(level.upper(), log.DEBUG))
class BuildCli(Cli):
"""
Cli class that adds in reusable build specific arguments.
"""
def __init__(self, *args, **kwargs):
"""
Initializes the BuildCli instance.
:param kwargs: All keyword arguments which will pass to ArgumentParser
:type kwargs: dict
"""
Cli.__init__(self, *args, **kwargs)
# Set common arguments
self.add_argument(
'--build', env_var="BUILD", default='latest',
help='Override build id, defaults to latest')
self.add_argument(
'--buildroot', env_var="BUILD_ROOT", default='builds',
help='Build diretory')
self.add_argument(
'--dump', default=False, action='store_true',
help='Dump the manfiest and exit')
self.add_argument(
'--schema', env_var="META_SCHEMA",
default='/usr/lib/coreos-assembler/schema/v1.json',
help='Schema to use. Set to NONE to skip all validation')
| 30.205479 | 78 | 0.587755 |
import argparse
import logging as log
import os
from cosalib import (
aliyun,
aws,
azure,
gcp,
vultr,
exoscale
)
CLOUD_CLI_TARGET = {
"aws": (aws.aws_cli,
aws.aws_run_ore,
aws.aws_run_ore_replicate),
"aliyun": (aliyun.aliyun_cli,
aliyun.aliyun_run_ore,
aliyun.aliyun_run_ore_replicate),
"azure": (azure.azure_cli,
azure.azure_run_ore,
azure.azure_run_ore_replicate),
"gcp": (gcp.gcp_cli,
gcp.gcp_run_ore,
gcp.gcp_run_ore_replicate),
"vultr": (vultr.vultr_cli,
vultr.vultr_run_ore,
vultr.vultr_run_ore_replicate),
"exoscale": (exoscale.exoscale_cli,
exoscale.exoscale_run_ore,
exoscale.exoscale_run_ore_replicate),
}
def cloud_clis():
return CLOUD_CLI_TARGET.keys()
def get_cloud_ore_cmds(target):
_, orecmd, orerep = CLOUD_CLI_TARGET[target]
return orecmd, orerep
def get_cloud_cli(target, parser=None):
if parser is None:
parser = BuildCli()
cli_func, _, _ = CLOUD_CLI_TARGET[target]
return cli_func(parser)
class Cli(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
argparse.ArgumentParser.__init__(self, *args, **kwargs)
self.add_argument(
'--log-level', env_var='COSA_LOG_LEVEL', default='INFO',
choices=log._nameToLevel.keys(), help='Set the log level')
def add_argument(self, *args, **kwargs):
env_var = kwargs.pop('env_var', None)
if env_var is not None:
if not env_var.startswith('COSA_'):
env_var = f"COSA_{env_var}"
ka = kwargs.get("help", '')
kwargs['help'] = f"{ka} (Env: {env_var})"
default = kwargs.pop('default', None)
super().add_argument(
*args, default=os.environ.get(env_var, default), **kwargs)
else:
super().add_argument(*args, **kwargs)
def parse_args(self, **kwargs):
args = super().parse_args()
self._set_logger(args.log_level)
return args
def _set_logger(self, level):
log.basicConfig(
format='[%(levelname)s]: %(message)s',
level=log._nameToLevel.get(level.upper(), log.DEBUG))
class BuildCli(Cli):
def __init__(self, *args, **kwargs):
Cli.__init__(self, *args, **kwargs)
self.add_argument(
'--build', env_var="BUILD", default='latest',
help='Override build id, defaults to latest')
self.add_argument(
'--buildroot', env_var="BUILD_ROOT", default='builds',
help='Build diretory')
self.add_argument(
'--dump', default=False, action='store_true',
help='Dump the manfiest and exit')
self.add_argument(
'--schema', env_var="META_SCHEMA",
default='/usr/lib/coreos-assembler/schema/v1.json',
help='Schema to use. Set to NONE to skip all validation')
| true | true |
f7ffbf7ce1c62a70431c1d8d68dfaca9799270a1 | 4,218 | py | Python | tensorflow_datasets/text/eraser_multi_rc.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | 1 | 2020-10-11T19:15:49.000Z | 2020-10-11T19:15:49.000Z | tensorflow_datasets/text/eraser_multi_rc.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | 1 | 2021-02-23T20:16:05.000Z | 2021-02-23T20:16:05.000Z | tensorflow_datasets/text/eraser_multi_rc.py | ChAnYaNG97/datasets | 0a45e2ea98716d325fc1c5e5494f2575f3bdb908 | [
"Apache-2.0"
] | 1 | 2020-08-03T20:19:12.000Z | 2020-08-03T20:19:12.000Z | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Passage, query, answers and answer classification with explanations."""
import json
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@unpublished{eraser2019,
title = {ERASER: A Benchmark to Evaluate Rationalized NLP Models},
author = {Jay DeYoung and Sarthak Jain and Nazneen Fatema Rajani and Eric Lehman and Caiming Xiong and Richard Socher and Byron C. Wallace}
}
@inproceedings{MultiRC2018,
author = {Daniel Khashabi and Snigdha Chaturvedi and Michael Roth and Shyam Upadhyay and Dan Roth},
title = {Looking Beyond the Surface:A Challenge Set for Reading Comprehension over Multiple Sentences},
booktitle = {NAACL},
year = {2018}
}
"""
_DESCRIPTION = """
Eraser Multi RC is a dataset for queries over multi-line passages, along with
answers and a rationalte. Each example in this dataset has the following 5 parts
1. A Mutli-line Passage
2. A Query about the passage
3. An Answer to the query
4. A Classification as to whether the answer is right or wrong
5. An Explanation justifying the classification
"""
_DOWNLOAD_URL = 'http://www.eraserbenchmark.com/zipped/multirc.tar.gz'
class EraserMultiRc(tfds.core.GeneratorBasedBuilder):
"""Multi Sentence Reasoning with Explanations (Eraser Benchmark)."""
VERSION = tfds.core.Version('0.1.1')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'passage': tfds.features.Text(),
'query_and_answer': tfds.features.Text(),
'label': tfds.features.ClassLabel(names=['False', 'True']),
'evidences': tfds.features.Sequence(tfds.features.Text())
}),
supervised_keys=None,
homepage='https://cogcomp.seas.upenn.edu/multirc/',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_DOWNLOAD_URL)
data_dir = os.path.join(dl_dir, 'multirc')
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={'data_dir': data_dir,
'filepath': os.path.join(data_dir, 'train.jsonl')},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={'data_dir': data_dir,
'filepath': os.path.join(data_dir, 'val.jsonl')},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={'data_dir': data_dir,
'filepath': os.path.join(data_dir, 'test.jsonl')},
),
]
def _generate_examples(self, data_dir, filepath):
"""Yields examples."""
multirc_dir = os.path.join(data_dir, 'docs')
with tf.io.gfile.GFile(filepath) as f:
for line in f:
row = json.loads(line)
evidences = []
for evidence in row['evidences'][0]:
docid = evidence['docid']
evidences.append(evidence['text'])
passage_file = os.path.join(multirc_dir, docid)
with tf.io.gfile.GFile(passage_file) as f1:
passage_text = f1.read()
yield row['annotation_id'], {
'passage': passage_text,
'query_and_answer': row['query'],
'label': row['classification'],
'evidences': evidences
}
| 35.745763 | 143 | 0.664533 |
import json
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@unpublished{eraser2019,
title = {ERASER: A Benchmark to Evaluate Rationalized NLP Models},
author = {Jay DeYoung and Sarthak Jain and Nazneen Fatema Rajani and Eric Lehman and Caiming Xiong and Richard Socher and Byron C. Wallace}
}
@inproceedings{MultiRC2018,
author = {Daniel Khashabi and Snigdha Chaturvedi and Michael Roth and Shyam Upadhyay and Dan Roth},
title = {Looking Beyond the Surface:A Challenge Set for Reading Comprehension over Multiple Sentences},
booktitle = {NAACL},
year = {2018}
}
"""
_DESCRIPTION = """
Eraser Multi RC is a dataset for queries over multi-line passages, along with
answers and a rationalte. Each example in this dataset has the following 5 parts
1. A Mutli-line Passage
2. A Query about the passage
3. An Answer to the query
4. A Classification as to whether the answer is right or wrong
5. An Explanation justifying the classification
"""
_DOWNLOAD_URL = 'http://www.eraserbenchmark.com/zipped/multirc.tar.gz'
class EraserMultiRc(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('0.1.1')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'passage': tfds.features.Text(),
'query_and_answer': tfds.features.Text(),
'label': tfds.features.ClassLabel(names=['False', 'True']),
'evidences': tfds.features.Sequence(tfds.features.Text())
}),
supervised_keys=None,
homepage='https://cogcomp.seas.upenn.edu/multirc/',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_DOWNLOAD_URL)
data_dir = os.path.join(dl_dir, 'multirc')
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={'data_dir': data_dir,
'filepath': os.path.join(data_dir, 'train.jsonl')},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={'data_dir': data_dir,
'filepath': os.path.join(data_dir, 'val.jsonl')},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={'data_dir': data_dir,
'filepath': os.path.join(data_dir, 'test.jsonl')},
),
]
def _generate_examples(self, data_dir, filepath):
multirc_dir = os.path.join(data_dir, 'docs')
with tf.io.gfile.GFile(filepath) as f:
for line in f:
row = json.loads(line)
evidences = []
for evidence in row['evidences'][0]:
docid = evidence['docid']
evidences.append(evidence['text'])
passage_file = os.path.join(multirc_dir, docid)
with tf.io.gfile.GFile(passage_file) as f1:
passage_text = f1.read()
yield row['annotation_id'], {
'passage': passage_text,
'query_and_answer': row['query'],
'label': row['classification'],
'evidences': evidences
}
| true | true |
f7ffbfc2bc7529594e23f3889a5ead2b68cc1ee1 | 7,305 | py | Python | mnist_cnn.py | VishaalMK/VectorDefense | dc488fbf19bc9aefaf58bcc2b89dfe0e5adc3806 | [
"MIT"
] | 13 | 2018-04-28T21:41:23.000Z | 2021-05-29T05:05:14.000Z | mnist_cnn.py | VishaalMK/VectorDefense | dc488fbf19bc9aefaf58bcc2b89dfe0e5adc3806 | [
"MIT"
] | null | null | null | mnist_cnn.py | VishaalMK/VectorDefense | dc488fbf19bc9aefaf58bcc2b89dfe0e5adc3806 | [
"MIT"
] | 5 | 2018-04-26T13:11:02.000Z | 2019-06-14T16:12:40.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout
from keras import backend
import tensorflow as tf
from tensorflow.python.platform import flags
import scipy.misc
import matplotlib
matplotlib.use('Agg')
from cleverhans.utils_mnist import data_mnist
from cleverhans.utils_tf import model_train, model_eval
from cleverhans.attacks import CarliniWagnerL2
from cleverhans.utils import AccuracyReport
from cleverhans.utils_keras import KerasModelWrapper, conv_2d
FLAGS = flags.FLAGS
model_path = "models/mnist"
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [conv_2d(nb_filters, (5, 5), (1, 1), "same",
input_shape=input_shape),
Activation('relu'),
conv_2d(nb_filters, (5, 5), (1, 1), "valid"),
Activation('relu'),
Flatten(),
Dropout(0.25),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
if logits:
logits_tensor = model(input_ph)
model.add(Activation('softmax'))
if logits:
return model, logits_tensor
else:
return model
def mnist_fgsm(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=6, batch_size=128,
learning_rate=0.001, train_dir=model_path,
filename="mnist.ckpt", load_model=False,
nb_classes=10, testing=False):
"""
MNIST CleverHans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param train_dir: Directory storing the saved model
:param filename: Filename to save model under
:param load_model: True for load, False for not load
:param testing: if true, test error is calculated
:return: an AccuracyReport object
"""
keras.layers.core.K.set_learning_phase(0)
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
"'th', temporarily setting to 'tf'")
# Create TF session and set as Keras backend session
sess = tf.Session()
keras.backend.set_session(sess)
# Get MNIST test data
X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end)
# Use label smoothing
assert Y_train.shape[1] == 10
label_smooth = .1
Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
y = tf.placeholder(tf.float32, shape=(None, 10))
# Define TF model graph
model, logits = cnn_model(logits=True, input_ph=x)
preds = model(x)
print("Defined TensorFlow model graph.")
def evaluate():
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params)
report.clean_train_clean_eval = acc
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'train_dir': train_dir,
'filename': filename
}
ckpt = tf.train.get_checkpoint_state(train_dir)
ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
rng = np.random.RandomState([2017, 8, 30])
if load_model and ckpt_path:
saver = tf.train.Saver()
saver.restore(sess, ckpt_path)
print("Model loaded from: {}".format(ckpt_path))
evaluate()
else:
print("Model was not loaded, training from scratch.")
model_train(sess, x, y, preds, X_train, Y_train, evaluate=evaluate,
args=train_params, save=True, rng=rng)
# Calculate training error
if testing:
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds, X_train, Y_train, args=eval_params)
report.train_clean_train_clean_eval = acc
return report
def main(argv=None):
mnist_fgsm(nb_epochs=FLAGS.nb_epochs,
batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
train_dir=FLAGS.train_dir,
filename=FLAGS.filename,
nb_classes=FLAGS.nb_classes,
load_model=FLAGS.load_model)
if __name__ == '__main__':
flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')
flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
flags.DEFINE_string('train_dir', model_path, 'Directory where to save model.')
flags.DEFINE_string('filename', 'mnist.ckpt', 'Checkpoint filename.')
flags.DEFINE_boolean('load_model', True, 'Load saved model or train.')
tf.app.run()
| 36.893939 | 82 | 0.656947 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout
from keras import backend
import tensorflow as tf
from tensorflow.python.platform import flags
import scipy.misc
import matplotlib
matplotlib.use('Agg')
from cleverhans.utils_mnist import data_mnist
from cleverhans.utils_tf import model_train, model_eval
from cleverhans.attacks import CarliniWagnerL2
from cleverhans.utils import AccuracyReport
from cleverhans.utils_keras import KerasModelWrapper, conv_2d
FLAGS = flags.FLAGS
model_path = "models/mnist"
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
model = Sequential()
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [conv_2d(nb_filters, (5, 5), (1, 1), "same",
input_shape=input_shape),
Activation('relu'),
conv_2d(nb_filters, (5, 5), (1, 1), "valid"),
Activation('relu'),
Flatten(),
Dropout(0.25),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
if logits:
logits_tensor = model(input_ph)
model.add(Activation('softmax'))
if logits:
return model, logits_tensor
else:
return model
def mnist_fgsm(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=6, batch_size=128,
learning_rate=0.001, train_dir=model_path,
filename="mnist.ckpt", load_model=False,
nb_classes=10, testing=False):
keras.layers.core.K.set_learning_phase(0)
report = AccuracyReport()
tf.set_random_seed(1234)
if not hasattr(backend, "tf"):
raise RuntimeError("This tutorial requires keras to be configured"
" to use the TensorFlow backend.")
if keras.backend.image_dim_ordering() != 'tf':
keras.backend.set_image_dim_ordering('tf')
print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
"'th', temporarily setting to 'tf'")
sess = tf.Session()
keras.backend.set_session(sess)
X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end)
assert Y_train.shape[1] == 10
label_smooth = .1
Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)
x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
y = tf.placeholder(tf.float32, shape=(None, 10))
model, logits = cnn_model(logits=True, input_ph=x)
preds = model(x)
print("Defined TensorFlow model graph.")
def evaluate():
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params)
report.clean_train_clean_eval = acc
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'train_dir': train_dir,
'filename': filename
}
ckpt = tf.train.get_checkpoint_state(train_dir)
ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
rng = np.random.RandomState([2017, 8, 30])
if load_model and ckpt_path:
saver = tf.train.Saver()
saver.restore(sess, ckpt_path)
print("Model loaded from: {}".format(ckpt_path))
evaluate()
else:
print("Model was not loaded, training from scratch.")
model_train(sess, x, y, preds, X_train, Y_train, evaluate=evaluate,
args=train_params, save=True, rng=rng)
if testing:
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds, X_train, Y_train, args=eval_params)
report.train_clean_train_clean_eval = acc
return report
def main(argv=None):
mnist_fgsm(nb_epochs=FLAGS.nb_epochs,
batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
train_dir=FLAGS.train_dir,
filename=FLAGS.filename,
nb_classes=FLAGS.nb_classes,
load_model=FLAGS.load_model)
if __name__ == '__main__':
flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')
flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
flags.DEFINE_string('train_dir', model_path, 'Directory where to save model.')
flags.DEFINE_string('filename', 'mnist.ckpt', 'Checkpoint filename.')
flags.DEFINE_boolean('load_model', True, 'Load saved model or train.')
tf.app.run()
| true | true |
f7ffc0426e17392c755081a2f23c62440845c239 | 1,564 | py | Python | src/score/score.py | hangzh-msft/MLOpsDatabricks | fc50127b236142a15c102a8c8e44ef1cbb584738 | [
"MIT"
] | null | null | null | src/score/score.py | hangzh-msft/MLOpsDatabricks | fc50127b236142a15c102a8c8e44ef1cbb584738 | [
"MIT"
] | null | null | null | src/score/score.py | hangzh-msft/MLOpsDatabricks | fc50127b236142a15c102a8c8e44ef1cbb584738 | [
"MIT"
] | 1 | 2019-07-17T21:19:18.000Z | 2019-07-17T21:19:18.000Z | import json
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from azorean.core.model import Model
from PIL import Image
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Linear(7 * 7 * 32, 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def init():
global model
model = CNN()
model_path = Model.get_model_path(model_name="torchcnn")
model.load_state_dict(torch.load(model_path))
model.eval()
def run(raw_data):
transform = transforms.transforms.Compose([
transforms.transforms.ToTensor(),
transforms.transforms.Normalize(
(0.1307,), (0.3081,))
])
img = Image.frombytes(
'1', (28, 28), (json.loads(raw_data)['data']).encode())
input_data = transform(img)
input_data = input_data.unsqueeze(0)
classes = ['tshirt', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
output = model(input_data)
index = torch.argmax(output, 1)
return classes[index]
| 28.436364 | 65 | 0.590153 | import json
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from azorean.core.model import Model
from PIL import Image
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Linear(7 * 7 * 32, 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def init():
global model
model = CNN()
model_path = Model.get_model_path(model_name="torchcnn")
model.load_state_dict(torch.load(model_path))
model.eval()
def run(raw_data):
transform = transforms.transforms.Compose([
transforms.transforms.ToTensor(),
transforms.transforms.Normalize(
(0.1307,), (0.3081,))
])
img = Image.frombytes(
'1', (28, 28), (json.loads(raw_data)['data']).encode())
input_data = transform(img)
input_data = input_data.unsqueeze(0)
classes = ['tshirt', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
output = model(input_data)
index = torch.argmax(output, 1)
return classes[index]
| true | true |
f7ffc23e048810283c9cce996af4b4f5676413f0 | 1,395 | py | Python | setup.py | hachi-88/online-judge-tools | f008cecf010468d8fbde6f7c21bf15f8d9e2b506 | [
"MIT"
] | 1 | 2018-11-03T19:17:33.000Z | 2018-11-03T19:17:33.000Z | setup.py | hachi-88/online-judge-tools | f008cecf010468d8fbde6f7c21bf15f8d9e2b506 | [
"MIT"
] | null | null | null | setup.py | hachi-88/online-judge-tools | f008cecf010468d8fbde6f7c21bf15f8d9e2b506 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from setuptools import setup, find_packages
import imp
def load_module(module_path):
path = None
for name in module_path.split('.'):
file, path, description = imp.find_module(name, path)
path = [ path ]
return imp.load_module(name, file, path[0], description)
version = load_module('onlinejudge.implementation.version')
with open('readme.md', encoding='utf-8') as fh:
readme = fh.read()
setup(
name=version.name,
version=version.__version__,
description='Tools for online-judge services',
install_requires=[
'requests',
'lxml',
'beautifulsoup4',
'colorama',
'sympy',
'pipdate',
],
long_description=readme,
author=version.__author__,
author_email=version.__email__,
url=version.__url__,
license=version.__license__,
packages=find_packages(exclude=( 'tests', 'docs' )),
scripts=[ 'oj' ],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Utilities',
],
)
| 29.0625 | 61 | 0.616487 |
from setuptools import setup, find_packages
import imp
def load_module(module_path):
path = None
for name in module_path.split('.'):
file, path, description = imp.find_module(name, path)
path = [ path ]
return imp.load_module(name, file, path[0], description)
version = load_module('onlinejudge.implementation.version')
with open('readme.md', encoding='utf-8') as fh:
readme = fh.read()
setup(
name=version.name,
version=version.__version__,
description='Tools for online-judge services',
install_requires=[
'requests',
'lxml',
'beautifulsoup4',
'colorama',
'sympy',
'pipdate',
],
long_description=readme,
author=version.__author__,
author_email=version.__email__,
url=version.__url__,
license=version.__license__,
packages=find_packages(exclude=( 'tests', 'docs' )),
scripts=[ 'oj' ],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Utilities',
],
)
| true | true |
f7ffc2b0d189d0a4f9c7d238ca56f57dc48f5adc | 11,304 | py | Python | code/main.py | guangyizhangbci/EEG_Riemannian | 2d301bf3d06a192da2829c1c54b24d388ddea1dd | [
"MIT"
] | 12 | 2022-02-22T17:15:04.000Z | 2022-03-19T11:41:15.000Z | code/main.py | guangyizhangbci/RFNet | 2d301bf3d06a192da2829c1c54b24d388ddea1dd | [
"MIT"
] | null | null | null | code/main.py | guangyizhangbci/RFNet | 2d301bf3d06a192da2829c1c54b24d388ddea1dd | [
"MIT"
] | null | null | null | from __future__ import print_function, division
import tensorflow as tf
import matplotlib.pyplot as plt
import sys, os
import numpy as np
from tqdm import tqdm
from rich.progress import track
from time import time
import pyriemann
import yaml
import argparse
from scipy.stats import pearsonr
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import cohen_kappa_score,accuracy_score
from sklearn.model_selection import KFold
from spatial_embedding import spatial_features
from model.spatial_temporal_information import spatial_temporal_info_stream
from utils import root_mean_squared_error_numpy, load_dataset_signal_addr, load_dataset_feature_addr, parse_valid_data_all, save_test_result
print('ready')
parser = argparse.ArgumentParser(description='Spatial Temporal_Info')
parser.add_argument('--dataset', default='BCI_IV_2b', type=str,
help='learning rate')
parser.add_argument('--cpu-seed', default=0, type=int, metavar='N',
help='cpu seed')
parser.add_argument('--gpu-seed', default=12345, type=int, metavar='N',
help='gpu seed')
parser.add_argument('--lr', default=0.001, type=float, metavar='N',
help='learning rate')
parser.add_argument('--batch-size', default=32, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='training epochs')
parser.add_argument('--early-stopping', default=200, type=int, metavar='N',
help='EarlyStopping')
parser.add_argument('--riemannian_dist', default=True, action='store_false')
parser.add_argument('--saved-ckpt', default=False, action='store_false')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
def load_config(name):
with open(os.path.join(sys.path[0], name)) as file:
config = yaml.safe_load(file)
return config
config = load_config('dataset_params.yaml')
net_params = {'epochs': args.epochs, 'batch_size': args.batch_size, 'early_stopping': args.early_stopping, 'saved_ckpt_flag': args.saved_ckpt}
class experiments():
def __init__(self, dataset_name):
self.dataset_name = dataset_name
'''
The main file of experiments, the training loop depends on each dataset
(e.g., train-test direct split or using k-Fold, session-dependent or not)
'''
def run_seed(self):
'''
Address of filtered EEG in each frequency band and extracted features (DE, PSD)
'''
addr_dict = load_dataset_signal_addr(self.dataset_name)
signal_train_addr, signal_test_addr, label_train_addr, label_test_addr = list(addr_dict.values())
addr_dict = load_dataset_feature_addr(self.dataset_name)
features_train_addr, features_test_addr, _, _ = list(addr_dict.values())
test_acc_result = np.zeros((config[self.dataset_name]['Subject_No'], config[self.dataset_name]['Session_No']))
test_kap_result = np.zeros((config[self.dataset_name]['Subject_No'], config[self.dataset_name]['Session_No']))
for subject_num in track(range(1, config[self.dataset_name]['Subject_No'] +1)):
for session_num in range(1,config[self.dataset_name]['Session_No']+1):
#____________________LOAD DATA____________________#
X_train_signal = np.load(signal_train_addr.format(subject_num, session_num))
X_test_signal = np.load(signal_test_addr.format(subject_num, session_num))
X_train_features = np.load(features_train_addr.format(subject_num, session_num))
X_test_features = np.load(features_test_addr.format(subject_num, session_num))
Y_train = np.load(label_train_addr.format(subject_num, session_num))
Y_test = np.load(label_test_addr.format(subject_num, session_num))
####################################################
train_embed, test_embed = spatial_features(config, self.dataset_name, args.riemannian_dist, config[self.dataset_name]['params']['Rank_No']).embedding(X_train_signal, X_test_signal)
Y_pred = spatial_temporal_info_stream(train_embed, test_embed, X_train_features, X_test_features, Y_train, Y_test, seld.dataset_name, net_params)
test_acc_mlp = np.mean(accuracy_score(Y_test, np.argmax(Y_pred, axis=-1)))
test_kap_mlp = np.mean(cohen_kappa_score(Y_test, np.argmax(Y_pred, axis=-1)))
test_acc_result[subject_num-1,session_num-1] = test_acc_mlp
test_kap_result[subject_num-1,session_num-1] = test_kap_mlp
test_acc_result = np.mean(test_acc_result, axis=2)
test_kap_result = np.mean(test_kap_result, axis=2)
save_test_result(self.dataset_name, test_acc_result, test_kap_result)
def run_seed_vig(self):
'''
Address of filtered EEG in each frequency band and extracted features (DE, PSD)
'''
addr_dict = load_dataset_signal_addr(self.dataset_name)
signal_addr, label_addr = list(addr_dict.values())
addr_dict = load_dataset_feature_addr(self.dataset_name)
features_addr, _ = list(addr_dict.values())
test_Fold_No = config[self.dataset_name]['Fold_No']
test_rmse_result = np.zeros((config[self.dataset_name]['Subject_No'], test_Fold_No))
test_corr_result = np.zeros((config[self.dataset_name]['Subject_No'], test_Fold_No))
for subject_num in track(range(1, config[self.dataset_name]['Subject_No'] +1)):
#____________________LOAD DATA____________________#
X_signal = np.load(signal_addr.format(subject_num))
X_features = np.load(features_addr.format(subject_num))
Y = np.load(label_addr.format(subject_num))
####################################################
test_Fold_count=1
Y_test_total = np.zeros((1,1))
Y_pred_total = np.zeros((1,1))
rmse_array = np.zeros(([config[self.dataset_name]['Subject_No'], test_Fold_No]))
corr_array = np.zeros(([config[self.dataset_name]['Subject_No'], test_Fold_No]))
kfold_test = KFold(test_Fold_No, True, 1)
# kfold_test = KFold(Fold_No_test, False, None)
Y_test_total = np.zeros((0,1))
Y_pred_total = np.zeros((0,1))
for train_index, test_index in kfold_test.split(X_signal):
print("KFold No.", test_Fold_count)
X_train_signal, X_test_signal, X_train_features, X_test_features, Y_train, Y_test = X_signal[train_index], X_signal[test_index], X_features[train_index], X_features[test_index], Y[train_index], Y[test_index]
train_embed, test_embed = spatial_features(config, self.dataset_name, args.riemannian_dist, config[self.dataset_name]['params']['Rank_No']).embedding(X_train_signal, X_test_signal)
Y_pred = spatial_temporal_info_stream(train_embed, test_embed, X_train_features, X_test_features, Y_train, Y_test, self.dataset_name, net_params)
temp_Y_test = Y_test
temp_Y_pred = Y_pred
Y_test_total = np.vstack((Y_test_total, temp_Y_test))
Y_pred_total = np.vstack((Y_pred_total, temp_Y_pred))
Y_test_total = np.ravel(Y_test_total)
Y_pred_total = np.ravel(Y_pred_total)
print(Y_test_total.shape, Y_pred_total.shape)
test_Fold_count += 1
rmse_value = root_mean_squared_error_numpy(Y_test_total, Y_pred_total) # RMSE value for all 885 samples
corcoeff_value, _ = pearsonr(Y_test_total, Y_pred_total)
rmse_array[subject_num-1, test_Fold_No-1] = rmse_value
corr_array[subject_num-1, test_Fold_No-1] = corcoeff_value
save_test_result(self.dataset_name, test_acc_result, test_kap_result)
def run_bci(self):
'''
Address of filtered EEG in each frequency band and extracted features (DE, PSD)
'''
addr_dict = load_dataset_signal_addr(self.dataset_name)
signal_train_addr, signal_test_addr, label_train_addr, label_test_addr = list(addr_dict.values())
addr_dict = load_dataset_feature_addr(self.dataset_name)
features_train_addr, features_test_addr, _, _ = list(addr_dict.values())
test_acc_result = np.zeros((config[self.dataset_name]['Subject_No']))
test_kap_result = np.zeros((config[self.dataset_name]['Subject_No']))
for subject_num in track(range(1, config[self.dataset_name]['Subject_No']+1)):
# for subject_num in track(range(1, 2)):
#____________________LOAD DATA____________________#
X_train_signal = np.load(signal_train_addr.format(subject_num))
X_test_signal = np.load(signal_test_addr.format(subject_num))
X_train_features = np.load(features_train_addr.format(subject_num))
X_test_features = np.load(features_test_addr.format(subject_num))
Y_train = np.load(label_train_addr.format(subject_num))
Y_test = np.load(label_test_addr.format(subject_num))
Y_train = np.expand_dims(Y_train, axis=1) -1 #1,2,3,4 ---> 0,1,2,3
Y_test = np.expand_dims(Y_test, axis=1) -1 #1,2,3,4 ---> 0,1,2,3
####################################################
X_train_signal, X_train_features, Y_train = parse_valid_data_all(X_train_signal, X_train_features, Y_train)
X_test_signal, X_test_features, Y_test = parse_valid_data_all(X_test_signal, X_test_features, Y_test)
train_embed, test_embed = spatial_features(config, self.dataset_name, args.riemannian_dist, config[self.dataset_name]['params']['Rank_No']).embedding(X_train_signal, X_test_signal)
Y_pred = spatial_temporal_info_stream(train_embed, test_embed, X_train_features, X_test_features, Y_train, Y_test, self.dataset_name, net_params)
'''
2a output label in one-hot form, 2b output label in range (0,1)
'''
if '2a' in self.dataset_name:
Y_pred = np.argmax(Y_pred, axis=-1)
else:
Y_pred = np.round(Y_pred)
Y_test = Y_test.squeeze(1)
test_acc_mlp = np.mean(accuracy_score(Y_test, Y_pred))
test_kap_mlp = np.mean(cohen_kappa_score(Y_test, Y_pred))
test_acc_result[subject_num-1] = test_acc_mlp
test_kap_result[subject_num-1] = test_kap_mlp
save_test_result(self.dataset_name, test_acc_result, test_kap_result)
def run(self):
if 'BCI' in self.dataset_name:
self.run_bci()
elif self.dataset_name=='SEED':
self.run_seed()
elif self.dataset_name=='SEED_VIG':
self.run_seed_vig()
else:
raise Exception('Datasets Name Error')
if __name__ == '__main__':
config = load_config('dataset_params.yaml')
with tf.device("gpu:0"):
np.random.seed(args.cpu_seed)
tf.random.set_random_seed(args.gpu_seed)
experiments(args.dataset).run()
#
| 41.255474 | 223 | 0.662863 | from __future__ import print_function, division
import tensorflow as tf
import matplotlib.pyplot as plt
import sys, os
import numpy as np
from tqdm import tqdm
from rich.progress import track
from time import time
import pyriemann
import yaml
import argparse
from scipy.stats import pearsonr
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import cohen_kappa_score,accuracy_score
from sklearn.model_selection import KFold
from spatial_embedding import spatial_features
from model.spatial_temporal_information import spatial_temporal_info_stream
from utils import root_mean_squared_error_numpy, load_dataset_signal_addr, load_dataset_feature_addr, parse_valid_data_all, save_test_result
print('ready')
parser = argparse.ArgumentParser(description='Spatial Temporal_Info')
parser.add_argument('--dataset', default='BCI_IV_2b', type=str,
help='learning rate')
parser.add_argument('--cpu-seed', default=0, type=int, metavar='N',
help='cpu seed')
parser.add_argument('--gpu-seed', default=12345, type=int, metavar='N',
help='gpu seed')
parser.add_argument('--lr', default=0.001, type=float, metavar='N',
help='learning rate')
parser.add_argument('--batch-size', default=32, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='training epochs')
parser.add_argument('--early-stopping', default=200, type=int, metavar='N',
help='EarlyStopping')
parser.add_argument('--riemannian_dist', default=True, action='store_false')
parser.add_argument('--saved-ckpt', default=False, action='store_false')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
def load_config(name):
with open(os.path.join(sys.path[0], name)) as file:
config = yaml.safe_load(file)
return config
config = load_config('dataset_params.yaml')
net_params = {'epochs': args.epochs, 'batch_size': args.batch_size, 'early_stopping': args.early_stopping, 'saved_ckpt_flag': args.saved_ckpt}
class experiments():
def __init__(self, dataset_name):
self.dataset_name = dataset_name
def run_seed(self):
addr_dict = load_dataset_signal_addr(self.dataset_name)
signal_train_addr, signal_test_addr, label_train_addr, label_test_addr = list(addr_dict.values())
addr_dict = load_dataset_feature_addr(self.dataset_name)
features_train_addr, features_test_addr, _, _ = list(addr_dict.values())
test_acc_result = np.zeros((config[self.dataset_name]['Subject_No'], config[self.dataset_name]['Session_No']))
test_kap_result = np.zeros((config[self.dataset_name]['Subject_No'], config[self.dataset_name]['Session_No']))
for subject_num in track(range(1, config[self.dataset_name]['Subject_No'] +1)):
for session_num in range(1,config[self.dataset_name]['Session_No']+1):
X_train_signal = np.load(signal_train_addr.format(subject_num, session_num))
X_test_signal = np.load(signal_test_addr.format(subject_num, session_num))
X_train_features = np.load(features_train_addr.format(subject_num, session_num))
X_test_features = np.load(features_test_addr.format(subject_num, session_num))
Y_train = np.load(label_train_addr.format(subject_num, session_num))
Y_test = np.load(label_test_addr.format(subject_num, session_num))
)
test_corr_result = np.zeros((config[self.dataset_name]['Subject_No'], test_Fold_No))
for subject_num in track(range(1, config[self.dataset_name]['Subject_No'] +1)):
X_signal = np.load(signal_addr.format(subject_num))
X_features = np.load(features_addr.format(subject_num))
Y = np.load(label_addr.format(subject_num))
_Y_test))
Y_pred_total = np.vstack((Y_pred_total, temp_Y_pred))
Y_test_total = np.ravel(Y_test_total)
Y_pred_total = np.ravel(Y_pred_total)
print(Y_test_total.shape, Y_pred_total.shape)
test_Fold_count += 1
rmse_value = root_mean_squared_error_numpy(Y_test_total, Y_pred_total)
corcoeff_value, _ = pearsonr(Y_test_total, Y_pred_total)
rmse_array[subject_num-1, test_Fold_No-1] = rmse_value
corr_array[subject_num-1, test_Fold_No-1] = corcoeff_value
save_test_result(self.dataset_name, test_acc_result, test_kap_result)
def run_bci(self):
addr_dict = load_dataset_signal_addr(self.dataset_name)
signal_train_addr, signal_test_addr, label_train_addr, label_test_addr = list(addr_dict.values())
addr_dict = load_dataset_feature_addr(self.dataset_name)
features_train_addr, features_test_addr, _, _ = list(addr_dict.values())
test_acc_result = np.zeros((config[self.dataset_name]['Subject_No']))
test_kap_result = np.zeros((config[self.dataset_name]['Subject_No']))
for subject_num in track(range(1, config[self.dataset_name]['Subject_No']+1)):
X_train_signal = np.load(signal_train_addr.format(subject_num))
X_test_signal = np.load(signal_test_addr.format(subject_num))
X_train_features = np.load(features_train_addr.format(subject_num))
X_test_features = np.load(features_test_addr.format(subject_num))
Y_train = np.load(label_train_addr.format(subject_num))
Y_test = np.load(label_test_addr.format(subject_num))
Y_train = np.expand_dims(Y_train, axis=1) -1
Y_test = np.expand_dims(Y_test, axis=1) -1
_VIG':
self.run_seed_vig()
else:
raise Exception('Datasets Name Error')
if __name__ == '__main__':
config = load_config('dataset_params.yaml')
with tf.device("gpu:0"):
np.random.seed(args.cpu_seed)
tf.random.set_random_seed(args.gpu_seed)
experiments(args.dataset).run()
| true | true |
f7ffc4fb4300a0490128225281deb11ff866f150 | 1,889 | py | Python | bin/test_file.py | rodrigomelo9/uvm-python | e3127eba2cc1519a61dc6f736d862a8dcd6fce20 | [
"Apache-2.0"
] | 140 | 2020-01-18T00:14:17.000Z | 2022-03-29T10:57:24.000Z | bin/test_file.py | Mohsannaeem/uvm-python | 1b8768a1358d133465ede9cadddae651664b1d53 | [
"Apache-2.0"
] | 24 | 2020-01-18T18:40:58.000Z | 2021-03-25T17:39:07.000Z | bin/test_file.py | Mohsannaeem/uvm-python | 1b8768a1358d133465ede9cadddae651664b1d53 | [
"Apache-2.0"
] | 34 | 2020-01-18T12:22:59.000Z | 2022-02-11T07:03:11.000Z | # File header
# Header row2
import_var = "ABC"
# Also move this as a comment
# for 'comment_this_func.
#
# With extra empty line
def comment_this_func(aaa):
return aaa * 2
## Move this as class comment docstring
## Add this to class
class ABC():
def do_not_comment_this(self):
pass
# Nothing to do here
test_var = ""
# Is this indent?
def __init__(self, name): # EOL ignored
self.name = name
# Floating comments, not included
# Returns something.
# Accepts nothing.
async def my_func(self):
return "Something"
# Block before var, must preserve
# var indent
my_arr = []
#def no_comments(self, ok):
# ok = 2 * ok
# return ok
# This function does X and Y.
# Plus Z also.
@classmethod
def static_func(cls, some_args):
# Preserve this as line comment, DONT TOUCH
return 2 * some_args
def has_existing_comment(self, a1, a2):
""" Already docstring, Should not do anything
"""
pass
# Should convert into docstring
def gets_docstring_too(self):
raise Exception("Not done")
# Do not move this to next function
# Nor this one
def no_comments_here(self):
pass
# Should convert to docstring
def receives_docstring(self, a, b, c):
# Started comment
if a > b:
# Need to do something
c = b * a
# Closing comments
return (c * c)
# Get comment also
def short_func(self, k):
pass
def triple_dedent_at_end(self):
if 1 == 0:
if 2 == 3:
pass
# Misc class comments
# Move this to docstring of 'must_comment' function
def must_comment(self):
pass
# Misc trailing class comments, keep this
# And also this
# KEEP1
# KEEP2
# KEEP3
# KEEP 4
# KEEP 5
| 19.474227 | 55 | 0.588142 |
import_var = "ABC"
#
# With extra empty line
def comment_this_func(aaa):
return aaa * 2
## Move this as class comment docstring
## Add this to class
class ABC():
def do_not_comment_this(self):
pass
# Nothing to do here
test_var = ""
# Is this indent?
def __init__(self, name): # EOL ignored
self.name = name
# Floating comments, not included
# Returns something.
# Accepts nothing.
async def my_func(self):
return "Something"
# Block before var, must preserve
# var indent
my_arr = []
#def no_comments(self, ok):
# ok = 2 * ok
# return ok
# This function does X and Y.
# Plus Z also.
@classmethod
def static_func(cls, some_args):
# Preserve this as line comment, DONT TOUCH
return 2 * some_args
def has_existing_comment(self, a1, a2):
pass
# Should convert into docstring
def gets_docstring_too(self):
raise Exception("Not done")
# Do not move this to next function
# Nor this one
def no_comments_here(self):
pass
# Should convert to docstring
def receives_docstring(self, a, b, c):
# Started comment
if a > b:
# Need to do something
c = b * a
# Closing comments
return (c * c)
# Get comment also
def short_func(self, k):
pass
def triple_dedent_at_end(self):
if 1 == 0:
if 2 == 3:
pass
# Misc class comments
# Move this to docstring of 'must_comment' function
def must_comment(self):
pass
# Misc trailing class comments, keep this
# And also this
# KEEP1
# KEEP2
# KEEP3
# KEEP 4
# KEEP 5
| true | true |
f7ffc5786cdb18ff1e24ceb8dd4e2d64c44ff421 | 1,879 | py | Python | replacy/test_helper.py | writerai/replaCy | 00a5ff923ebc876165805b267ae8a69d7fdc6405 | [
"MIT"
] | 18 | 2021-11-09T04:19:46.000Z | 2022-03-16T11:18:04.000Z | replacy/test_helper.py | weisisheng/replaCy | 00a5ff923ebc876165805b267ae8a69d7fdc6405 | [
"MIT"
] | 69 | 2020-02-27T19:37:07.000Z | 2021-07-02T17:19:50.000Z | replacy/test_helper.py | weisisheng/replaCy | 00a5ff923ebc876165805b267ae8a69d7fdc6405 | [
"MIT"
] | 4 | 2020-06-11T23:54:26.000Z | 2021-03-18T21:29:59.000Z | import unittest
from typing import Any, Dict, List, Tuple
import spacy
from replacy import ReplaceMatcher
from replacy.db import get_match_dict
class MatchDictTestHelper(unittest.TestCase):
@staticmethod
def generate_cases(match_dict: Dict[str, Any]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
positives: List[Tuple[str, str]] = []
negatives: List[Tuple[str, str]] = []
for rule_name in match_dict:
test_set = match_dict[rule_name]["test"]
positive_cases = test_set["positive"]
negative_cases = test_set["negative"]
for positive_sent in positive_cases:
positives.append((rule_name, positive_sent))
for negative_sent in negative_cases:
negatives.append((rule_name, negative_sent))
return positives, negatives
@classmethod
def setUpClass(cls):
nlp = spacy.load("en_core_web_sm")
match_dict = get_match_dict()
cls.r_matcher = ReplaceMatcher(nlp, match_dict)
cls.positive_cases, cls.negative_cases = MatchDictTestHelper.generate_cases(match_dict)
def test_positive(self):
for (match_name, positive_sent) in self.positive_cases:
spans = self.r_matcher(positive_sent)
spans_from_this_rule = list(filter(lambda s: s._.match_name == match_name, spans))
print(match_name, positive_sent)
assert len(spans_from_this_rule) > 0, "Positive case should trigger rule"
def test_negative(self):
for (match_name, negative_sent) in self.negative_cases:
spans = self.r_matcher(negative_sent)
spans_from_this_rule = list(filter(lambda s: s._.match_name == match_name, spans))
print(match_name, negative_sent)
assert len(spans_from_this_rule) == 0, "Negative case should NOT trigger rule"
| 40.847826 | 106 | 0.671634 | import unittest
from typing import Any, Dict, List, Tuple
import spacy
from replacy import ReplaceMatcher
from replacy.db import get_match_dict
class MatchDictTestHelper(unittest.TestCase):
@staticmethod
def generate_cases(match_dict: Dict[str, Any]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]:
positives: List[Tuple[str, str]] = []
negatives: List[Tuple[str, str]] = []
for rule_name in match_dict:
test_set = match_dict[rule_name]["test"]
positive_cases = test_set["positive"]
negative_cases = test_set["negative"]
for positive_sent in positive_cases:
positives.append((rule_name, positive_sent))
for negative_sent in negative_cases:
negatives.append((rule_name, negative_sent))
return positives, negatives
@classmethod
def setUpClass(cls):
nlp = spacy.load("en_core_web_sm")
match_dict = get_match_dict()
cls.r_matcher = ReplaceMatcher(nlp, match_dict)
cls.positive_cases, cls.negative_cases = MatchDictTestHelper.generate_cases(match_dict)
def test_positive(self):
for (match_name, positive_sent) in self.positive_cases:
spans = self.r_matcher(positive_sent)
spans_from_this_rule = list(filter(lambda s: s._.match_name == match_name, spans))
print(match_name, positive_sent)
assert len(spans_from_this_rule) > 0, "Positive case should trigger rule"
def test_negative(self):
for (match_name, negative_sent) in self.negative_cases:
spans = self.r_matcher(negative_sent)
spans_from_this_rule = list(filter(lambda s: s._.match_name == match_name, spans))
print(match_name, negative_sent)
assert len(spans_from_this_rule) == 0, "Negative case should NOT trigger rule"
| true | true |
f7ffc65a73da4dfbcb0939bcbd5b54e6b9cf2382 | 4,722 | py | Python | src/c3nav/mapdata/render/geometry/altitudearea.py | johnjohndoe/c3nav | a17f863a3512e305595c16b0300796b6bae81241 | [
"Apache-2.0"
] | 132 | 2016-11-12T01:45:23.000Z | 2022-03-08T15:17:10.000Z | src/c3nav/mapdata/render/geometry/altitudearea.py | johnjohndoe/c3nav | a17f863a3512e305595c16b0300796b6bae81241 | [
"Apache-2.0"
] | 66 | 2016-09-29T09:46:19.000Z | 2022-03-11T23:26:18.000Z | src/c3nav/mapdata/render/geometry/altitudearea.py | johnjohndoe/c3nav | a17f863a3512e305595c16b0300796b6bae81241 | [
"Apache-2.0"
] | 42 | 2016-09-29T08:34:57.000Z | 2022-03-08T15:17:15.000Z | from collections import deque
from itertools import chain
import numpy as np
from c3nav.mapdata.models import AltitudeArea
from c3nav.mapdata.render.geometry.hybrid import HybridGeometry
class AltitudeAreaGeometries:
def __init__(self, altitudearea=None, colors=None, obstacles=None):
if altitudearea is not None:
self.geometry = altitudearea.geometry
self.altitude = int(altitudearea.altitude * 1000)
self.altitude2 = None if altitudearea.altitude2 is None else int(altitudearea.altitude2 * 1000)
self.point1 = altitudearea.point1
self.point2 = altitudearea.point2
else:
self.geometry = None
self.altitude = None
self.altitude2 = None
self.point1 = None
self.point2 = None
self.base = None
self.bottom = None
self.colors = colors
self.obstacles = obstacles
def get_altitudes(self, points):
# noinspection PyCallByClass,PyTypeChecker
return AltitudeArea.get_altitudes(self, points/1000).astype(np.int32)
def create_hybrid_geometries(self, face_centers, vertices_offset, faces_offset):
self.geometry = HybridGeometry.create(self.geometry, face_centers)
vertices = deque()
faces = deque()
for color, areas in self.colors.items():
for height in tuple(areas.keys()):
faces_offset, vertices_offset = self._call_create_full(areas, height, faces, vertices,
faces_offset, vertices_offset)
for height_obstacles in self.obstacles.values():
for color_obstacles in height_obstacles.values():
for i in range(len(color_obstacles)):
faces_offset, vertices_offset = self._call_create_full(color_obstacles, i, faces, vertices,
faces_offset, vertices_offset)
if not vertices:
return np.empty((0, 2), dtype=np.int32), np.empty((0, 3), dtype=np.uint32)
return np.vstack(vertices), np.vstack(faces)
def _call_create_full(self, mapping, key, faces, vertices, faces_offset, vertices_offset):
geom = mapping[key]
new_geom, new_vertices, new_faces = HybridGeometry.create_full(geom, vertices_offset, faces_offset)
mapping[key] = new_geom
vertices_offset += new_vertices.shape[0]
faces_offset += new_faces.shape[0]
vertices.append(new_vertices)
faces.append(new_faces)
return faces_offset, vertices_offset
def remove_faces(self, faces):
self.geometry.remove_faces(faces)
for areas in self.colors.values():
for area in areas.values():
area.remove_faces(faces)
def create_polyhedrons(self, create_polyhedron, altitudes, min_altitude, crops):
if self.altitude2 is None:
altitudes = self.altitude
self.base = HybridGeometry(self.geometry.geom, self.geometry.faces)
self.bottom = HybridGeometry(self.geometry.geom, self.geometry.faces)
self.geometry.build_polyhedron(create_polyhedron,
lower=altitudes - int(0.7 * 1000),
upper=altitudes,
crops=crops)
self.base.build_polyhedron(create_polyhedron,
lower=min_altitude - int(0.7 * 1000),
upper=altitudes - int(0.7 * 1000),
crops=crops,
top=False, bottom=False)
self.bottom.build_polyhedron(create_polyhedron,
lower=0, upper=1,
crops=crops,
top=False)
for geometry in chain(*(areas.values() for areas in self.colors.values())):
geometry.build_polyhedron(create_polyhedron,
lower=altitudes,
upper=altitudes + int(0.001 * 1000),
crops=crops)
# todo: treat altitude properly
for height, height_geometries in self.obstacles.items():
for color, color_geometries in height_geometries.items():
for geometry in color_geometries:
geometry.build_polyhedron(create_polyhedron,
lower=altitudes,
upper=altitudes + height,
crops=crops)
| 45.84466 | 111 | 0.568827 | from collections import deque
from itertools import chain
import numpy as np
from c3nav.mapdata.models import AltitudeArea
from c3nav.mapdata.render.geometry.hybrid import HybridGeometry
class AltitudeAreaGeometries:
def __init__(self, altitudearea=None, colors=None, obstacles=None):
if altitudearea is not None:
self.geometry = altitudearea.geometry
self.altitude = int(altitudearea.altitude * 1000)
self.altitude2 = None if altitudearea.altitude2 is None else int(altitudearea.altitude2 * 1000)
self.point1 = altitudearea.point1
self.point2 = altitudearea.point2
else:
self.geometry = None
self.altitude = None
self.altitude2 = None
self.point1 = None
self.point2 = None
self.base = None
self.bottom = None
self.colors = colors
self.obstacles = obstacles
def get_altitudes(self, points):
return AltitudeArea.get_altitudes(self, points/1000).astype(np.int32)
def create_hybrid_geometries(self, face_centers, vertices_offset, faces_offset):
self.geometry = HybridGeometry.create(self.geometry, face_centers)
vertices = deque()
faces = deque()
for color, areas in self.colors.items():
for height in tuple(areas.keys()):
faces_offset, vertices_offset = self._call_create_full(areas, height, faces, vertices,
faces_offset, vertices_offset)
for height_obstacles in self.obstacles.values():
for color_obstacles in height_obstacles.values():
for i in range(len(color_obstacles)):
faces_offset, vertices_offset = self._call_create_full(color_obstacles, i, faces, vertices,
faces_offset, vertices_offset)
if not vertices:
return np.empty((0, 2), dtype=np.int32), np.empty((0, 3), dtype=np.uint32)
return np.vstack(vertices), np.vstack(faces)
def _call_create_full(self, mapping, key, faces, vertices, faces_offset, vertices_offset):
geom = mapping[key]
new_geom, new_vertices, new_faces = HybridGeometry.create_full(geom, vertices_offset, faces_offset)
mapping[key] = new_geom
vertices_offset += new_vertices.shape[0]
faces_offset += new_faces.shape[0]
vertices.append(new_vertices)
faces.append(new_faces)
return faces_offset, vertices_offset
def remove_faces(self, faces):
self.geometry.remove_faces(faces)
for areas in self.colors.values():
for area in areas.values():
area.remove_faces(faces)
def create_polyhedrons(self, create_polyhedron, altitudes, min_altitude, crops):
if self.altitude2 is None:
altitudes = self.altitude
self.base = HybridGeometry(self.geometry.geom, self.geometry.faces)
self.bottom = HybridGeometry(self.geometry.geom, self.geometry.faces)
self.geometry.build_polyhedron(create_polyhedron,
lower=altitudes - int(0.7 * 1000),
upper=altitudes,
crops=crops)
self.base.build_polyhedron(create_polyhedron,
lower=min_altitude - int(0.7 * 1000),
upper=altitudes - int(0.7 * 1000),
crops=crops,
top=False, bottom=False)
self.bottom.build_polyhedron(create_polyhedron,
lower=0, upper=1,
crops=crops,
top=False)
for geometry in chain(*(areas.values() for areas in self.colors.values())):
geometry.build_polyhedron(create_polyhedron,
lower=altitudes,
upper=altitudes + int(0.001 * 1000),
crops=crops)
for height, height_geometries in self.obstacles.items():
for color, color_geometries in height_geometries.items():
for geometry in color_geometries:
geometry.build_polyhedron(create_polyhedron,
lower=altitudes,
upper=altitudes + height,
crops=crops)
| true | true |
f7ffc6993a268caf48434eb6fc61b32051c9617a | 7,988 | py | Python | opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py | bgranetzke/opentelemetry-python | f81381cf8aca64a707d934f20c6c27d40b949dce | [
"Apache-2.0"
] | null | null | null | opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py | bgranetzke/opentelemetry-python | f81381cf8aca64a707d934f20c6c27d40b949dce | [
"Apache-2.0"
] | null | null | null | opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py | bgranetzke/opentelemetry-python | f81381cf8aca64a707d934f20c6c27d40b949dce | [
"Apache-2.0"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock, patch
from opentelemetry.sdk._metrics.aggregation import DropAggregation
from opentelemetry.sdk._metrics.instrument import Counter
from opentelemetry.sdk._metrics.measurement import Measurement
from opentelemetry.sdk._metrics.metric_reader_storage import (
MetricReaderStorage,
)
from opentelemetry.sdk._metrics.point import AggregationTemporality
from opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration
from opentelemetry.sdk._metrics.view import View
from opentelemetry.test.concurrency_test import ConcurrencyTestBase, MockFunc
def mock_view_matching(name, *instruments) -> Mock:
mock = Mock(name=name)
mock._match.side_effect = lambda instrument: instrument in instruments
return mock
def mock_instrument() -> Mock:
instr = Mock()
instr.attributes = {}
return instr
class TestMetricReaderStorage(ConcurrencyTestBase):
@patch(
"opentelemetry.sdk._metrics.metric_reader_storage._ViewInstrumentMatch"
)
def test_creates_view_instrument_matches(
self, MockViewInstrumentMatch: Mock
):
"""It should create a MockViewInstrumentMatch when an instrument matches a view"""
instrument1 = Mock(name="instrument1")
instrument2 = Mock(name="instrument2")
view1 = mock_view_matching("view_1", instrument1)
view2 = mock_view_matching("view_2", instrument1, instrument2)
storage = MetricReaderStorage(
SdkConfiguration(
resource=Mock(),
metric_readers=(),
views=(view1, view2),
)
)
# instrument1 matches view1 and view2, so should create two ViewInstrumentMatch objects
storage.consume_measurement(Measurement(1, instrument1))
self.assertEqual(
len(MockViewInstrumentMatch.call_args_list),
2,
MockViewInstrumentMatch.mock_calls,
)
# they should only be created the first time the instrument is seen
storage.consume_measurement(Measurement(1, instrument1))
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 2)
# instrument2 matches view2, so should create a single ViewInstrumentMatch
MockViewInstrumentMatch.call_args_list.clear()
storage.consume_measurement(Measurement(1, instrument2))
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1)
@patch(
"opentelemetry.sdk._metrics.metric_reader_storage._ViewInstrumentMatch"
)
def test_forwards_calls_to_view_instrument_match(
self, MockViewInstrumentMatch: Mock
):
view_instrument_match1 = Mock()
view_instrument_match2 = Mock()
view_instrument_match3 = Mock()
MockViewInstrumentMatch.side_effect = [
view_instrument_match1,
view_instrument_match2,
view_instrument_match3,
]
instrument1 = Mock(name="instrument1")
instrument2 = Mock(name="instrument2")
view1 = mock_view_matching("view1", instrument1)
view2 = mock_view_matching("view2", instrument1, instrument2)
storage = MetricReaderStorage(
SdkConfiguration(
resource=Mock(),
metric_readers=(),
views=(view1, view2),
)
)
# Measurements from an instrument should be passed on to each ViewInstrumentMatch objects
# created for that instrument
measurement = Measurement(1, instrument1)
storage.consume_measurement(measurement)
view_instrument_match1.consume_measurement.assert_called_once_with(
measurement
)
view_instrument_match2.consume_measurement.assert_called_once_with(
measurement
)
view_instrument_match3.consume_measurement.assert_not_called()
measurement = Measurement(1, instrument2)
storage.consume_measurement(measurement)
view_instrument_match3.consume_measurement.assert_called_once_with(
measurement
)
# collect() should call collect on all of its _ViewInstrumentMatch objects and combine them together
all_metrics = [Mock() for _ in range(6)]
view_instrument_match1.collect.return_value = all_metrics[:2]
view_instrument_match2.collect.return_value = all_metrics[2:4]
view_instrument_match3.collect.return_value = all_metrics[4:]
result = storage.collect(AggregationTemporality.CUMULATIVE)
view_instrument_match1.collect.assert_called_once()
view_instrument_match2.collect.assert_called_once()
view_instrument_match3.collect.assert_called_once()
self.assertEqual(result, all_metrics)
@patch(
"opentelemetry.sdk._metrics.metric_reader_storage._ViewInstrumentMatch"
)
def test_race_concurrent_measurements(self, MockViewInstrumentMatch: Mock):
mock_view_instrument_match_ctor = MockFunc()
MockViewInstrumentMatch.side_effect = mock_view_instrument_match_ctor
instrument1 = Mock(name="instrument1")
view1 = mock_view_matching(instrument1)
storage = MetricReaderStorage(
SdkConfiguration(
resource=Mock(),
metric_readers=(),
views=(view1,),
)
)
def send_measurement():
storage.consume_measurement(Measurement(1, instrument1))
# race sending many measurements concurrently
self.run_with_many_threads(send_measurement)
# _ViewInstrumentMatch constructor should have only been called once
self.assertEqual(mock_view_instrument_match_ctor.call_count, 1)
@patch(
"opentelemetry.sdk._metrics.metric_reader_storage._ViewInstrumentMatch"
)
def test_default_view_enabled(self, MockViewInstrumentMatch: Mock):
"""Instruments should be matched with default views when enabled"""
instrument1 = Mock(name="instrument1")
instrument2 = Mock(name="instrument2")
storage = MetricReaderStorage(
SdkConfiguration(
resource=Mock(),
metric_readers=(),
views=(),
)
)
storage.consume_measurement(Measurement(1, instrument1))
self.assertEqual(
len(MockViewInstrumentMatch.call_args_list),
1,
MockViewInstrumentMatch.mock_calls,
)
storage.consume_measurement(Measurement(1, instrument1))
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1)
MockViewInstrumentMatch.call_args_list.clear()
storage.consume_measurement(Measurement(1, instrument2))
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1)
def test_drop_aggregation(self):
counter = Counter("name", Mock(), Mock())
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
resource=Mock(),
metric_readers=(),
views=(
View(
instrument_name="name", aggregation=DropAggregation()
),
),
)
)
metric_reader_storage.consume_measurement(Measurement(1, counter))
self.assertEqual(
[], metric_reader_storage.collect(AggregationTemporality.DELTA)
)
| 38.038095 | 108 | 0.683525 |
from unittest.mock import Mock, patch
from opentelemetry.sdk._metrics.aggregation import DropAggregation
from opentelemetry.sdk._metrics.instrument import Counter
from opentelemetry.sdk._metrics.measurement import Measurement
from opentelemetry.sdk._metrics.metric_reader_storage import (
MetricReaderStorage,
)
from opentelemetry.sdk._metrics.point import AggregationTemporality
from opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration
from opentelemetry.sdk._metrics.view import View
from opentelemetry.test.concurrency_test import ConcurrencyTestBase, MockFunc
def mock_view_matching(name, *instruments) -> Mock:
mock = Mock(name=name)
mock._match.side_effect = lambda instrument: instrument in instruments
return mock
def mock_instrument() -> Mock:
instr = Mock()
instr.attributes = {}
return instr
class TestMetricReaderStorage(ConcurrencyTestBase):
@patch(
"opentelemetry.sdk._metrics.metric_reader_storage._ViewInstrumentMatch"
)
def test_creates_view_instrument_matches(
self, MockViewInstrumentMatch: Mock
):
instrument1 = Mock(name="instrument1")
instrument2 = Mock(name="instrument2")
view1 = mock_view_matching("view_1", instrument1)
view2 = mock_view_matching("view_2", instrument1, instrument2)
storage = MetricReaderStorage(
SdkConfiguration(
resource=Mock(),
metric_readers=(),
views=(view1, view2),
)
)
storage.consume_measurement(Measurement(1, instrument1))
self.assertEqual(
len(MockViewInstrumentMatch.call_args_list),
2,
MockViewInstrumentMatch.mock_calls,
)
storage.consume_measurement(Measurement(1, instrument1))
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 2)
MockViewInstrumentMatch.call_args_list.clear()
storage.consume_measurement(Measurement(1, instrument2))
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1)
@patch(
"opentelemetry.sdk._metrics.metric_reader_storage._ViewInstrumentMatch"
)
def test_forwards_calls_to_view_instrument_match(
self, MockViewInstrumentMatch: Mock
):
view_instrument_match1 = Mock()
view_instrument_match2 = Mock()
view_instrument_match3 = Mock()
MockViewInstrumentMatch.side_effect = [
view_instrument_match1,
view_instrument_match2,
view_instrument_match3,
]
instrument1 = Mock(name="instrument1")
instrument2 = Mock(name="instrument2")
view1 = mock_view_matching("view1", instrument1)
view2 = mock_view_matching("view2", instrument1, instrument2)
storage = MetricReaderStorage(
SdkConfiguration(
resource=Mock(),
metric_readers=(),
views=(view1, view2),
)
)
measurement = Measurement(1, instrument1)
storage.consume_measurement(measurement)
view_instrument_match1.consume_measurement.assert_called_once_with(
measurement
)
view_instrument_match2.consume_measurement.assert_called_once_with(
measurement
)
view_instrument_match3.consume_measurement.assert_not_called()
measurement = Measurement(1, instrument2)
storage.consume_measurement(measurement)
view_instrument_match3.consume_measurement.assert_called_once_with(
measurement
)
all_metrics = [Mock() for _ in range(6)]
view_instrument_match1.collect.return_value = all_metrics[:2]
view_instrument_match2.collect.return_value = all_metrics[2:4]
view_instrument_match3.collect.return_value = all_metrics[4:]
result = storage.collect(AggregationTemporality.CUMULATIVE)
view_instrument_match1.collect.assert_called_once()
view_instrument_match2.collect.assert_called_once()
view_instrument_match3.collect.assert_called_once()
self.assertEqual(result, all_metrics)
@patch(
"opentelemetry.sdk._metrics.metric_reader_storage._ViewInstrumentMatch"
)
def test_race_concurrent_measurements(self, MockViewInstrumentMatch: Mock):
mock_view_instrument_match_ctor = MockFunc()
MockViewInstrumentMatch.side_effect = mock_view_instrument_match_ctor
instrument1 = Mock(name="instrument1")
view1 = mock_view_matching(instrument1)
storage = MetricReaderStorage(
SdkConfiguration(
resource=Mock(),
metric_readers=(),
views=(view1,),
)
)
def send_measurement():
storage.consume_measurement(Measurement(1, instrument1))
self.run_with_many_threads(send_measurement)
self.assertEqual(mock_view_instrument_match_ctor.call_count, 1)
@patch(
"opentelemetry.sdk._metrics.metric_reader_storage._ViewInstrumentMatch"
)
def test_default_view_enabled(self, MockViewInstrumentMatch: Mock):
instrument1 = Mock(name="instrument1")
instrument2 = Mock(name="instrument2")
storage = MetricReaderStorage(
SdkConfiguration(
resource=Mock(),
metric_readers=(),
views=(),
)
)
storage.consume_measurement(Measurement(1, instrument1))
self.assertEqual(
len(MockViewInstrumentMatch.call_args_list),
1,
MockViewInstrumentMatch.mock_calls,
)
storage.consume_measurement(Measurement(1, instrument1))
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1)
MockViewInstrumentMatch.call_args_list.clear()
storage.consume_measurement(Measurement(1, instrument2))
self.assertEqual(len(MockViewInstrumentMatch.call_args_list), 1)
def test_drop_aggregation(self):
counter = Counter("name", Mock(), Mock())
metric_reader_storage = MetricReaderStorage(
SdkConfiguration(
resource=Mock(),
metric_readers=(),
views=(
View(
instrument_name="name", aggregation=DropAggregation()
),
),
)
)
metric_reader_storage.consume_measurement(Measurement(1, counter))
self.assertEqual(
[], metric_reader_storage.collect(AggregationTemporality.DELTA)
)
| true | true |
f7ffc6e3e47bc3b5d96b26ae3ea298bc61889322 | 3,518 | py | Python | scripting/models.py | NicolasKiely/percms | dbfae2406a9ea79c273197d96c5b0e70010ad114 | [
"MIT"
] | null | null | null | scripting/models.py | NicolasKiely/percms | dbfae2406a9ea79c273197d96c5b0e70010ad114 | [
"MIT"
] | 9 | 2016-09-15T05:12:36.000Z | 2016-10-27T21:38:40.000Z | scripting/models.py | NicolasKiely/percms | dbfae2406a9ea79c273197d96c5b0e70010ad114 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.core.urlresolvers import reverse
from common.core import view_link, edit_link
# Languages
langs = (
('py', 'Python'),
('js', 'Javascript')
)
class Script(models.Model):
''' Executable script handle '''
name = models.CharField('Script Name', max_length=255)
category = models.CharField('Script Category', max_length=255)
description = models.CharField('Script Description', max_length=1024)
lang = models.CharField('Script language', max_length=16, choices=langs)
def __str__(self):
return self.category +':'+ self.name
def edit_link(self):
return edit_link('script:script_editor', (self.pk,), text='Edit Script')
def view_link(self):
return view_link('script:script_view', (self.pk,), text='View Script')
def dashboard_link(self):
url = reverse('script:script_dashboard')
return '<a href="'+ url +'">Script Dashboard</a>'
def nav_link(self):
return self.dashboard_link() +' | '+ self.view_link()
def get_latest_source(self):
return Source.objects.order_by('-version').filter(script=self)[0]
def to_form_fields(self):
return [
{'label': 'Name:', 'name': 'name', 'value': self.name},
{'label': 'Category', 'name': 'category', 'value': self.category},
{'label': 'Description', 'name': 'description', 'value': self.description},
{
'type': 'select', 'label': 'Language',
'name': 'lang', 'value': self.lang,
'options': langs
},
{'type': 'hidden', 'name': 'pk', 'value': self.pk}
]
class Source(models.Model):
''' Source code for a script '''
version = models.IntegerField('Version number')
source = models.TextField('Source code')
message = models.TextField('Change message')
script = models.ForeignKey(Script, on_delete=models.CASCADE, null=True)
def __str__(self):
return str(self.script) +'#'+ str(self.version)
def short_message(self):
return self.message[:20]
def edit_link(self):
return edit_link('script:source_editor', (self.pk,), text='Edit Version')
def view_link(self):
return view_link('script:source_view', (self.pk,), text='View Version')
def nav_link(self):
return self.script.edit_link() +' | '+ self.view_link()
def to_form_fields(self):
return [
{'label': 'Message: ', 'name': 'message', 'value': self.message},
{'label': 'Version: ', 'name': 'version', 'value': self.version},
{'type': 'hidden', 'name': 'pk', 'value': self.pk}
]
class Log_Message(models.Model):
''' Simple logging system to be used for apps using scripting system '''
stamp = models.DateTimeField('Start date of contigous data pulled')
app_name = models.TextField('Name of app to register message')
short_message = models.TextField('Short description of message')
long_message = models.TextField('Long description of message')
def __str__(self):
return '[%s] %s: %s' % (self.stamp, self.app_name, self.short_message)
def to_form_fields(self):
return [
{'label': 'App Name', 'name': 'app', 'value': self.app_name},
{'label': 'Short Message', 'name': 'short', 'value': self.short_message},
{'label': 'Long Message', 'name': 'long', 'value': self.long_message},
]
| 35.18 | 87 | 0.614554 | from __future__ import unicode_literals
from django.db import models
from django.core.urlresolvers import reverse
from common.core import view_link, edit_link
langs = (
('py', 'Python'),
('js', 'Javascript')
)
class Script(models.Model):
name = models.CharField('Script Name', max_length=255)
category = models.CharField('Script Category', max_length=255)
description = models.CharField('Script Description', max_length=1024)
lang = models.CharField('Script language', max_length=16, choices=langs)
def __str__(self):
return self.category +':'+ self.name
def edit_link(self):
return edit_link('script:script_editor', (self.pk,), text='Edit Script')
def view_link(self):
return view_link('script:script_view', (self.pk,), text='View Script')
def dashboard_link(self):
url = reverse('script:script_dashboard')
return '<a href="'+ url +'">Script Dashboard</a>'
def nav_link(self):
return self.dashboard_link() +' | '+ self.view_link()
def get_latest_source(self):
return Source.objects.order_by('-version').filter(script=self)[0]
def to_form_fields(self):
return [
{'label': 'Name:', 'name': 'name', 'value': self.name},
{'label': 'Category', 'name': 'category', 'value': self.category},
{'label': 'Description', 'name': 'description', 'value': self.description},
{
'type': 'select', 'label': 'Language',
'name': 'lang', 'value': self.lang,
'options': langs
},
{'type': 'hidden', 'name': 'pk', 'value': self.pk}
]
class Source(models.Model):
version = models.IntegerField('Version number')
source = models.TextField('Source code')
message = models.TextField('Change message')
script = models.ForeignKey(Script, on_delete=models.CASCADE, null=True)
def __str__(self):
return str(self.script) +'#'+ str(self.version)
def short_message(self):
return self.message[:20]
def edit_link(self):
return edit_link('script:source_editor', (self.pk,), text='Edit Version')
def view_link(self):
return view_link('script:source_view', (self.pk,), text='View Version')
def nav_link(self):
return self.script.edit_link() +' | '+ self.view_link()
def to_form_fields(self):
return [
{'label': 'Message: ', 'name': 'message', 'value': self.message},
{'label': 'Version: ', 'name': 'version', 'value': self.version},
{'type': 'hidden', 'name': 'pk', 'value': self.pk}
]
class Log_Message(models.Model):
stamp = models.DateTimeField('Start date of contigous data pulled')
app_name = models.TextField('Name of app to register message')
short_message = models.TextField('Short description of message')
long_message = models.TextField('Long description of message')
def __str__(self):
return '[%s] %s: %s' % (self.stamp, self.app_name, self.short_message)
def to_form_fields(self):
return [
{'label': 'App Name', 'name': 'app', 'value': self.app_name},
{'label': 'Short Message', 'name': 'short', 'value': self.short_message},
{'label': 'Long Message', 'name': 'long', 'value': self.long_message},
]
| true | true |
f7ffc6e72ca5734cd6f20a437de6634e65ba36bd | 1,747 | py | Python | juriscraper/oral_args/united_states/state/md.py | EvandoBlanco/juriscraper | 3d16af258620d4ba1b4827f66ef69e8a2c5a0484 | [
"BSD-2-Clause"
] | 228 | 2015-01-23T04:41:39.000Z | 2022-03-30T09:52:20.000Z | juriscraper/oral_args/united_states/state/md.py | EvandoBlanco/juriscraper | 3d16af258620d4ba1b4827f66ef69e8a2c5a0484 | [
"BSD-2-Clause"
] | 331 | 2015-01-05T18:53:40.000Z | 2022-03-29T23:43:30.000Z | juriscraper/oral_args/united_states/state/md.py | EvandoBlanco/juriscraper | 3d16af258620d4ba1b4827f66ef69e8a2c5a0484 | [
"BSD-2-Clause"
] | 84 | 2015-01-03T01:19:21.000Z | 2022-03-01T08:09:32.000Z | """Scraper for Maryland Supreme Court Oral Argument Audio
This scraper has an interesting history. It was briefly running on the live
site, but we realized shortly after starting it that the scraper was
downloading video, not audio!
Seeing that we weren't ready for video, we disabled this scraper and deleted
any traces of it on the server.
One interesting lesson though was that the OA system didn't crumble or budge
when this was running. The video converted to mp3 just fine (each item took a
few hours) and we began hosting it like nothing was different. Go figure.
Your humble editor,
Mike
CourtID: md
Court Short Name: Md.
"""
from juriscraper.OralArgumentSiteLinear import OralArgumentSiteLinear
class Site(OralArgumentSiteLinear):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = "http://www.courts.state.md.us/coappeals/webcasts/webcastarchive.html"
def _process_html(self):
# Find rows that contain valid, non-"Bar Admissions", link
path = (
"//tr[.//td[2]//a/@href][not(contains(.//@href, 'baradmission'))]"
)
rows = self.html.xpath(path)
for row in rows:
cell_two = row.xpath("./td[2]")[0]
self.cases.append(
{
"date": row.xpath("./td[1]")[0].text_content(),
"name": row.xpath("./td[3]/*[self::b or self::strong]")[
0
].text_content(), # sometimes they use <b> tag, other times <strong> tag
"docket": cell_two.text_content(),
"url": cell_two.xpath(".//a/@href")[0],
}
)
| 34.254902 | 93 | 0.616485 |
from juriscraper.OralArgumentSiteLinear import OralArgumentSiteLinear
class Site(OralArgumentSiteLinear):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = "http://www.courts.state.md.us/coappeals/webcasts/webcastarchive.html"
def _process_html(self):
path = (
"//tr[.//td[2]//a/@href][not(contains(.//@href, 'baradmission'))]"
)
rows = self.html.xpath(path)
for row in rows:
cell_two = row.xpath("./td[2]")[0]
self.cases.append(
{
"date": row.xpath("./td[1]")[0].text_content(),
"name": row.xpath("./td[3]/*[self::b or self::strong]")[
0
].text_content(),
"docket": cell_two.text_content(),
"url": cell_two.xpath(".//a/@href")[0],
}
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.