hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e421253d5fba18942d17797bdb68d587f5cf8153 | 513 | py | Python | data/add_teacher.py | Parenty/pytest | e105c547623e028af2c8c38195540144a1f662d8 | [
"Apache-2.0"
] | 1 | 2019-04-28T09:52:58.000Z | 2019-04-28T09:52:58.000Z | data/add_teacher.py | Parenty/pytest | e105c547623e028af2c8c38195540144a1f662d8 | [
"Apache-2.0"
] | null | null | null | data/add_teacher.py | Parenty/pytest | e105c547623e028af2c8c38195540144a1f662d8 | [
"Apache-2.0"
] | null | null | null | from model.registration import Reg
# Группа при регистрации
testdata_reg = [
Reg(number_class='3', name_class='проверка регистрации', subject_list=['math', 'rus', 'eng'], email='dmitriev+14@uchi.ru',
password='1'),
# Reg(number_class='3', name_class='метка б', subject_list=['math', 'rus', 'eng'], email='dmitriev+1@uchi.ru',
# password='123'),
# Reg(number_class='3', name_class='метка с', subject_list=['math', 'rus', 'eng'], email='dmitriev+2@uchi.ru',
# password='123')
]
| 42.75 | 126 | 0.641326 |
a2bd0467d4047ec0af992b089e8f8feb9e229c5d | 345 | py | Python | sdk/keyvault/azure-security-keyvault/azure/security/keyvault/version.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/keyvault/azure-security-keyvault/azure/security/keyvault/version.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | sdk/keyvault/azure-security-keyvault/azure/security/keyvault/version.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "0.0.1"
| 38.333333 | 76 | 0.414493 |
bc28fccb95d2197e5c27785d99775f7ac5464e9e | 1,291 | py | Python | Python/file_replace_newline/src/file_operations.py | toddnguyen47/utility-files | de7d9839b5265c10d7ec017f8e08dfb8d6caac2a | [
"MIT"
] | null | null | null | Python/file_replace_newline/src/file_operations.py | toddnguyen47/utility-files | de7d9839b5265c10d7ec017f8e08dfb8d6caac2a | [
"MIT"
] | 1 | 2021-04-20T22:01:09.000Z | 2021-04-20T22:01:09.000Z | Python/file_replace_newline/src/file_operations.py | toddnguyen47/utility-files | de7d9839b5265c10d7ec017f8e08dfb8d6caac2a | [
"MIT"
] | 1 | 2021-03-18T03:42:03.000Z | 2021-03-18T03:42:03.000Z | from typing import List
import re
_non_whitespace = re.compile("\S")
class FileOperations:
def __init__(self, file_path: str):
self._file_path: str = file_path
self._lines: List[str] = []
def execute(self):
self._read_in_file()
self._trim_all_lines()
self._output_back_to_file()
def _read_in_file(self):
with open(self._file_path, "r") as file:
self._lines = [line.rstrip("\n") for line in file]
def _trim_all_lines(self):
for index, line in enumerate(self._lines):
match_obj = re.search(_non_whitespace, line)
if match_obj is None:
# If match_obj is None, then the line has no non-whitespace character
self._lines[index] = self._handle_lines_with_only_whitespace(line)
else:
self._lines[index] = self._handle_lines_with_characters(line)
def _output_back_to_file(self):
with open(self._file_path, "w") as file:
for line in self._lines:
file.write(line)
file.write("\n")
def _handle_lines_with_only_whitespace(self, line: str) -> str:
return line.strip()
def _handle_lines_with_characters(self, line: str) -> str:
return line.rstrip()
| 31.487805 | 85 | 0.620449 |
7ab286219bf5abea85cc22b5bc9c6d08e938a11d | 18,034 | py | Python | intersight/models/iam_resource_limits.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 21 | 2018-03-29T14:20:35.000Z | 2021-10-13T05:11:41.000Z | intersight/models/iam_resource_limits.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 14 | 2018-01-30T15:45:46.000Z | 2022-02-23T14:23:21.000Z | intersight/models/iam_resource_limits.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 18 | 2018-01-03T15:09:56.000Z | 2021-07-16T02:21:54.000Z | # coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class IamResourceLimits(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'create_time': 'datetime',
'domain_group_moid': 'str',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'shared_scope': 'str',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'ancestors': 'list[MoBaseMoRef]',
'parent': 'MoBaseMoRef',
'permission_resources': 'list[MoBaseMoRef]',
'per_account_user_limit': 'int',
'account': 'IamAccountRef'
}
attribute_map = {
'account_moid': 'AccountMoid',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'ancestors': 'Ancestors',
'parent': 'Parent',
'permission_resources': 'PermissionResources',
'per_account_user_limit': 'PerAccountUserLimit',
'account': 'Account'
}
def __init__(self, account_moid=None, create_time=None, domain_group_moid=None, mod_time=None, moid=None, object_type=None, owners=None, shared_scope=None, tags=None, version_context=None, ancestors=None, parent=None, permission_resources=None, per_account_user_limit=None, account=None):
"""
IamResourceLimits - a model defined in Swagger
"""
self._account_moid = None
self._create_time = None
self._domain_group_moid = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._shared_scope = None
self._tags = None
self._version_context = None
self._ancestors = None
self._parent = None
self._permission_resources = None
self._per_account_user_limit = None
self._account = None
if account_moid is not None:
self.account_moid = account_moid
if create_time is not None:
self.create_time = create_time
if domain_group_moid is not None:
self.domain_group_moid = domain_group_moid
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if shared_scope is not None:
self.shared_scope = shared_scope
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if ancestors is not None:
self.ancestors = ancestors
if parent is not None:
self.parent = parent
if permission_resources is not None:
self.permission_resources = permission_resources
if per_account_user_limit is not None:
self.per_account_user_limit = per_account_user_limit
if account is not None:
self.account = account
@property
def account_moid(self):
"""
Gets the account_moid of this IamResourceLimits.
The Account ID for this managed object.
:return: The account_moid of this IamResourceLimits.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this IamResourceLimits.
The Account ID for this managed object.
:param account_moid: The account_moid of this IamResourceLimits.
:type: str
"""
self._account_moid = account_moid
@property
def create_time(self):
"""
Gets the create_time of this IamResourceLimits.
The time when this managed object was created.
:return: The create_time of this IamResourceLimits.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this IamResourceLimits.
The time when this managed object was created.
:param create_time: The create_time of this IamResourceLimits.
:type: datetime
"""
self._create_time = create_time
@property
def domain_group_moid(self):
"""
Gets the domain_group_moid of this IamResourceLimits.
The DomainGroup ID for this managed object.
:return: The domain_group_moid of this IamResourceLimits.
:rtype: str
"""
return self._domain_group_moid
@domain_group_moid.setter
def domain_group_moid(self, domain_group_moid):
"""
Sets the domain_group_moid of this IamResourceLimits.
The DomainGroup ID for this managed object.
:param domain_group_moid: The domain_group_moid of this IamResourceLimits.
:type: str
"""
self._domain_group_moid = domain_group_moid
@property
def mod_time(self):
"""
Gets the mod_time of this IamResourceLimits.
The time when this managed object was last modified.
:return: The mod_time of this IamResourceLimits.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this IamResourceLimits.
The time when this managed object was last modified.
:param mod_time: The mod_time of this IamResourceLimits.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this IamResourceLimits.
The unique identifier of this Managed Object instance.
:return: The moid of this IamResourceLimits.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this IamResourceLimits.
The unique identifier of this Managed Object instance.
:param moid: The moid of this IamResourceLimits.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this IamResourceLimits.
The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path.
:return: The object_type of this IamResourceLimits.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this IamResourceLimits.
The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path.
:param object_type: The object_type of this IamResourceLimits.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this IamResourceLimits.
The array of owners which represent effective ownership of this object.
:return: The owners of this IamResourceLimits.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this IamResourceLimits.
The array of owners which represent effective ownership of this object.
:param owners: The owners of this IamResourceLimits.
:type: list[str]
"""
self._owners = owners
@property
def shared_scope(self):
"""
Gets the shared_scope of this IamResourceLimits.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:return: The shared_scope of this IamResourceLimits.
:rtype: str
"""
return self._shared_scope
@shared_scope.setter
def shared_scope(self, shared_scope):
"""
Sets the shared_scope of this IamResourceLimits.
Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.
:param shared_scope: The shared_scope of this IamResourceLimits.
:type: str
"""
self._shared_scope = shared_scope
@property
def tags(self):
"""
Gets the tags of this IamResourceLimits.
The array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this IamResourceLimits.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this IamResourceLimits.
The array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this IamResourceLimits.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this IamResourceLimits.
The versioning info for this managed object.
:return: The version_context of this IamResourceLimits.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this IamResourceLimits.
The versioning info for this managed object.
:param version_context: The version_context of this IamResourceLimits.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def ancestors(self):
"""
Gets the ancestors of this IamResourceLimits.
The array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this IamResourceLimits.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this IamResourceLimits.
The array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this IamResourceLimits.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def parent(self):
"""
Gets the parent of this IamResourceLimits.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this IamResourceLimits.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this IamResourceLimits.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this IamResourceLimits.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def permission_resources(self):
"""
Gets the permission_resources of this IamResourceLimits.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:return: The permission_resources of this IamResourceLimits.
:rtype: list[MoBaseMoRef]
"""
return self._permission_resources
@permission_resources.setter
def permission_resources(self, permission_resources):
"""
Sets the permission_resources of this IamResourceLimits.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:param permission_resources: The permission_resources of this IamResourceLimits.
:type: list[MoBaseMoRef]
"""
self._permission_resources = permission_resources
@property
def per_account_user_limit(self):
"""
Gets the per_account_user_limit of this IamResourceLimits.
The maximum number of users allowed in an account. The default value is 200.
:return: The per_account_user_limit of this IamResourceLimits.
:rtype: int
"""
return self._per_account_user_limit
@per_account_user_limit.setter
def per_account_user_limit(self, per_account_user_limit):
"""
Sets the per_account_user_limit of this IamResourceLimits.
The maximum number of users allowed in an account. The default value is 200.
:param per_account_user_limit: The per_account_user_limit of this IamResourceLimits.
:type: int
"""
self._per_account_user_limit = per_account_user_limit
@property
def account(self):
"""
Gets the account of this IamResourceLimits.
A collection of references to the [iam.Account](mo://iam.Account) Managed Object. When this managed object is deleted, the referenced [iam.Account](mo://iam.Account) MO unsets its reference to this deleted MO.
:return: The account of this IamResourceLimits.
:rtype: IamAccountRef
"""
return self._account
@account.setter
def account(self, account):
"""
Sets the account of this IamResourceLimits.
A collection of references to the [iam.Account](mo://iam.Account) Managed Object. When this managed object is deleted, the referenced [iam.Account](mo://iam.Account) MO unsets its reference to this deleted MO.
:param account: The account of this IamResourceLimits.
:type: IamAccountRef
"""
self._account = account
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, IamResourceLimits):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.814672 | 738 | 0.64883 |
9c0a9c8d6b52a6004e16935f4494468359c20b29 | 8,045 | py | Python | rl-fmri/test_fMRI.py | liusida/thesis-bodies | dceb8a36efd2cefc611f6749a52b56b9d3572f7a | [
"MIT"
] | null | null | null | rl-fmri/test_fMRI.py | liusida/thesis-bodies | dceb8a36efd2cefc611f6749a52b56b9d3572f7a | [
"MIT"
] | null | null | null | rl-fmri/test_fMRI.py | liusida/thesis-bodies | dceb8a36efd2cefc611f6749a52b56b9d3572f7a | [
"MIT"
] | null | null | null | import time
import pickle
import os
import matplotlib.pyplot as plt
from matplotlib import colors
import matplotlib.colors as mcolors
from tqdm import tqdm
import cv2
import pybullet
from stable_baselines3.common.policies import ActorCriticPolicy
import yaml
import torch as th
import numpy as np
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnv, VecNormalize, util
import common.utils as utils
import common.wrapper as wrapper
import common.plots as plots
g_step = 0
g_fMRI_data = None
def _predict_fMRI(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""Inject this method to ActorCriticPolicy (MlpPolicy)."""
global g_step, g_fMRI_data
latent_pi, _, latent_sde = self._get_latent(observation)
# print(g_step, latent_pi)
g_fMRI_data[g_step, :] = latent_pi
g_step += 1
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
return distribution.get_actions(deterministic=deterministic)
ActorCriticPolicy._predict = _predict_fMRI
def test(seed, model_filename, vec_filename, train, test, test_as_class=0, render=False, save_file="default.yml"):
global g_step, g_fMRI_data
print("Testing:")
total_rewards = []
distance_xs = []
if True:
g_step = 0
g_fMRI_data = np.zeros(shape=[args.test_steps,256], dtype=np.float32)
print(f" Seed {seed}, model {model_filename} vec {vec_filename}")
print(f" Train on {train}, test on {test}, w/ bodyinfo {test_as_class}")
if test_as_class>=0:
bodyinfo = test_as_class
else:
if args.with_bodyinfo:
bodyinfo = test//100
else:
bodyinfo = 0
eval_env = utils.make_env(render=render, robot_body=test, body_info=bodyinfo)
eval_env = DummyVecEnv([eval_env])
if args.vec_normalize:
eval_env = VecNormalize.load(vec_filename, eval_env)
eval_env.norm_reward = False
eval_env.seed(seed)
model = PPO.load(model_filename)
obs = eval_env.reset()
if render:
# eval_env.env_method("set_view")
print("\n\nWait for a while, so I have the time to press Ctrl+F11 to enter FullScreen Mode.\n\n")
time.sleep(3) # Wait for a while, so I have the time to press Ctrl+F11 to enter FullScreen Mode.
distance_x = 0
# print(obs)
total_reward = 0
for step in tqdm(range(args.test_steps)):
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = eval_env.step(action)
if render:
eval_env.envs[0].camera_adjust()
(width, height, rgbPixels, _, _) = eval_env.envs[0].env.env._p.getCameraImage(1920,1080, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
image = rgbPixels[:,:,:3]
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(f"{folder}/fMRI_videos/getCameraImage_b{test}_s{seed}_{step:05}.png", image)
if done:
# it should not matter if the env reset. I guess...
# break
pass
else: # the last observation will be after reset, so skip the last
distance_x = eval_env.envs[0].robot.body_xyz[0]
total_reward += reward[0]
# if render:
# time.sleep(0.01)
eval_env.close()
print(f"train {train}, test {test}, test_as_class {test_as_class}, step {step}, total_reward {total_reward}, distance_x {distance_x}")
if args.save_fmri:
base_fMRI_data = None
sorted_data = g_fMRI_data.copy()
if test!=0 or seed!=0:
# if sorted_arg exists, use the existing one
# because we want to compare the patterns of two experiments
sorted_arg = np.load(f"{folder}/sorted_arg.npy")
base_fMRI_data = np.load(f"{folder}/base_fMRI_data.npy")
else:
sorted_arg = np.argsort(np.mean(sorted_data,axis=0))
np.save(f"{folder}/sorted_arg.npy", sorted_arg)
base_fMRI_data = g_fMRI_data.copy()
np.save(f"{folder}/base_fMRI_data.npy", base_fMRI_data)
sorted_data = sorted_data[:,sorted_arg]
base_fMRI_data = base_fMRI_data[:, sorted_arg]
for step in tqdm(range(args.test_steps)):
plt.close()
plt.figure(figsize=[10,4])
if test!=0 or seed!=0:
x = sorted_data[step]
plt.bar(np.arange(len(x)), x, color=[0.4, 0.7, 0.9, 0.5])
x = base_fMRI_data[step]
plt.bar(np.arange(len(x)), x, color=[0.3, 0.3, 0.3, 0.5])
plt.savefig(f"{folder}/fMRI_videos/barchart_b{test}_s{seed}_{step:05}.png")
plt.close()
total_rewards.append(total_reward)
distance_xs.append(distance_x)
# avoid yaml turn float64 to numpy array
total_rewards = [float(x) for x in total_rewards]
distance_xs = [float(x) for x in distance_xs]
data = {
"title": "test",
"train": train,
"test": test,
"total_reward": total_rewards,
"distance_x": distance_xs,
}
with open(f"{save_file}", "w") as f:
yaml.dump(data, f)
if __name__ == "__main__": # noqa: C901
args = utils.args
folder = utils.folder
train_bodies = [int(x) for x in args.train_bodies.split(',')]
test_bodies = [int(x) for x in args.test_bodies.split(',')]
test_as_class = args.test_as_class
seed = args.seed
model_filename = f"model-ant-{'-'.join(str(x) for x in train_bodies)}.zip"
vec_filename = model_filename[:-4] + "-vecnormalize.pkl"
os.makedirs(f"{folder}/test-results/", exist_ok=True)
fig, axes = plt.subplots(nrows=len(test_bodies), figsize=(10,10))
base_fMRI_data = None
# Baseline
test_body = test_bodies[0]
test(seed=seed, model_filename=f"{folder}/{model_filename}", vec_filename=f"{folder}/{vec_filename}",
train=train_bodies, test=test_body, test_as_class=test_as_class, render=args.render,
save_file=f"{folder}/test-results/{model_filename[:-4]}-test-{test_body}-class-{test_as_class}.yaml")
base_fMRI_data = g_fMRI_data.copy()
np.save(f"{folder}/fMRI_data_{test_body}", base_fMRI_data)
# Plots
bar_colors = list(mcolors.TABLEAU_COLORS.values())
def one_subplot(relative_fMRI_data, ax, title, color_idx):
ax.bar(np.arange(relative_fMRI_data.shape[1]), np.mean(relative_fMRI_data[100:900,:], axis=0), color=bar_colors[color_idx])
ax.set_xlabel("step")
ax.set_ylabel("activation")
ax.set_ylim(-0.5, 0.5)
ax.set_title(title)
if args.compare_seed: # compare seed
test(seed=seed+1, model_filename=f"{folder}/{model_filename}", vec_filename=f"{folder}/{vec_filename}",
train=train_bodies, test=test_body, test_as_class=test_as_class, render=args.render,
save_file=f"{folder}/test-results/{model_filename[:-4]}-test-{test_body}-class-{test_as_class}.yaml")
relative_fMRI_data = g_fMRI_data - base_fMRI_data
one_subplot(relative_fMRI_data, axes[0], "Different Seed", 0)
for i, test_body in enumerate(test_bodies):
if i>0:
test(seed=seed, model_filename=f"{folder}/{model_filename}", vec_filename=f"{folder}/{vec_filename}",
train=train_bodies, test=test_body, test_as_class=test_as_class, render=args.render,
save_file=f"{folder}/test-results/{model_filename[:-4]}-test-{test_body}-class-{test_as_class}.yaml")
relative_fMRI_data = g_fMRI_data - base_fMRI_data
one_subplot(relative_fMRI_data, axes[i], f"Difference between {test_body} and baseline", i)
np.save(f"{folder}/fMRI_data_{test_body}", g_fMRI_data)
plt.tight_layout()
plt.savefig(f"{folder}/fMRI.png")
| 42.342105 | 149 | 0.63729 |
54ef3244dda5ea03bda4fe6d287d3b13094834fe | 38,549 | py | Python | dojo/filters.py | everable/django-DefectDojo | 1548982c1f197f3f702162846d314b16bb687c78 | [
"BSD-3-Clause"
] | 1 | 2020-03-27T06:58:40.000Z | 2020-03-27T06:58:40.000Z | dojo/filters.py | viral-sangani/django-DefectDojo | e0fa585ee770bd0b15ba4a73bd81e6cc043650fd | [
"BSD-3-Clause"
] | 30 | 2020-08-11T21:29:57.000Z | 2022-03-17T19:02:50.000Z | dojo/filters.py | viral-sangani/django-DefectDojo | e0fa585ee770bd0b15ba4a73bd81e6cc043650fd | [
"BSD-3-Clause"
] | 1 | 2020-09-30T13:01:50.000Z | 2020-09-30T13:01:50.000Z | __author__ = 'Jay Paz'
import collections
from datetime import timedelta, datetime
from auditlog.models import LogEntry
from django.contrib.auth.models import User
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django_filters import FilterSet, CharFilter, OrderingFilter, \
ModelMultipleChoiceFilter, ModelChoiceFilter, MultipleChoiceFilter, \
BooleanFilter
from django_filters.filters import ChoiceFilter, _truncate, DateTimeFilter
from pytz import timezone
from dojo.models import Dojo_User, Product_Type, Finding, Product, Test_Type, \
Endpoint, Development_Environment, Finding_Template, Report, Note_Type, \
Engagement_Survey, Question, TextQuestion, ChoiceQuestion
from dojo.utils import get_system_setting
from django.contrib.contenttypes.models import ContentType
local_tz = timezone(get_system_setting('time_zone'))
SEVERITY_CHOICES = (('Info', 'Info'), ('Low', 'Low'), ('Medium', 'Medium'),
('High', 'High'), ('Critical', 'Critical'))
BOOLEAN_CHOICES = (('false', 'No'), ('true', 'Yes'),)
EARLIEST_FINDING = None
def now():
return local_tz.localize(datetime.today())
def get_earliest_finding():
global EARLIEST_FINDING
if EARLIEST_FINDING is not None:
return EARLIEST_FINDING
try:
EARLIEST_FINDING = Finding.objects.earliest('date')
except Finding.DoesNotExist:
EARLIEST_FINDING = None
return EARLIEST_FINDING
class DojoFilter(FilterSet):
def __init__(self, *args, **kwargs):
super(DojoFilter, self).__init__(*args, **kwargs)
class DateRangeFilter(ChoiceFilter):
options = {
'': (_('Any date'), lambda qs, name: qs.all()),
1: (_('Today'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month,
'%s__day' % name: now().day
})),
2: (_('Past 7 days'), lambda qs, name: qs.filter(**{
'%s__gte' % name: _truncate(now() - timedelta(days=7)),
'%s__lt' % name: _truncate(now() + timedelta(days=1)),
})),
3: (_('Past 30 days'), lambda qs, name: qs.filter(**{
'%s__gte' % name: _truncate(now() - timedelta(days=30)),
'%s__lt' % name: _truncate(now() + timedelta(days=1)),
})),
4: (_('Past 90 days'), lambda qs, name: qs.filter(**{
'%s__gte' % name: _truncate(now() - timedelta(days=90)),
'%s__lt' % name: _truncate(now() + timedelta(days=1)),
})),
5: (_('Current month'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month
})),
6: (_('Current year'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
})),
7: (_('Past year'), lambda qs, name: qs.filter(**{
'%s__gte' % name: _truncate(now() - timedelta(days=365)),
'%s__lt' % name: _truncate(now() + timedelta(days=1)),
})),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(DateRangeFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](qs, self.field_name)
class MitigatedDateRangeFilter(ChoiceFilter):
options = {
'': (_('Either'), lambda qs, name: qs.all()),
1: (_('Yes'), lambda qs, name: qs.filter(**{
'%s__isnull' % name: False
})),
2: (_('No'), lambda qs, name: qs.filter(**{
'%s__isnull' % name: True
})),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(MitigatedDateRangeFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](qs, self.field_name)
class ReportBooleanFilter(ChoiceFilter):
options = {
'': (_('Either'), lambda qs, name: qs.all()),
1: (_('Yes'), lambda qs, name: qs.filter(**{
'%s' % name: True
})),
2: (_('No'), lambda qs, name: qs.filter(**{
'%s' % name: False
})),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(ReportBooleanFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](qs, self.field_name)
class ReportRiskAcceptanceFilter(ChoiceFilter):
def any(self, qs, name):
return qs.all()
def accepted(self, qs, name):
return qs.filter(risk_acceptance__isnull=False)
def not_accepted(self, qs, name):
return qs.filter(risk_acceptance__isnull=True)
options = {
'': (_('Either'), any),
1: (_('Yes'), accepted),
2: (_('No'), not_accepted),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(ReportRiskAcceptanceFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](self, qs, self.field_name)
class MetricsDateRangeFilter(ChoiceFilter):
def any(self, qs, name):
if get_earliest_finding() is not None:
start_date = local_tz.localize(datetime.combine(
get_earliest_finding().date, datetime.min.time())
)
self.start_date = _truncate(start_date - timedelta(days=1))
self.end_date = _truncate(now() + timedelta(days=1))
return qs.all()
def current_month(self, qs, name):
self.start_date = local_tz.localize(
datetime(now().year, now().month, 1, 0, 0, 0))
self.end_date = now()
return qs.filter(**{
'%s__year' % name: self.start_date.year,
'%s__month' % name: self.start_date.month
})
def current_year(self, qs, name):
self.start_date = local_tz.localize(
datetime(now().year, 1, 1, 0, 0, 0))
self.end_date = now()
return qs.filter(**{
'%s__year' % name: now().year,
})
def past_x_days(self, qs, name, days):
self.start_date = _truncate(now() - timedelta(days=days))
self.end_date = _truncate(now() + timedelta(days=1))
return qs.filter(**{
'%s__gte' % name: self.start_date,
'%s__lt' % name: self.end_date,
})
def past_seven_days(self, qs, name):
return self.past_x_days(qs, name, 7)
def past_thirty_days(self, qs, name):
return self.past_x_days(qs, name, 30)
def past_ninety_days(self, qs, name):
return self.past_x_days(qs, name, 90)
def past_six_months(self, qs, name):
return self.past_x_days(qs, name, 183)
def past_year(self, qs, name):
return self.past_x_days(qs, name, 365)
options = {
'': (_('Past 30 days'), past_thirty_days),
1: (_('Past 7 days'), past_seven_days),
2: (_('Past 90 days'), past_ninety_days),
3: (_('Current month'), current_month),
4: (_('Current year'), current_year),
5: (_('Past 6 Months'), past_six_months),
6: (_('Past year'), past_year),
7: (_('Any date'), any),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(MetricsDateRangeFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
if get_earliest_finding() is not None:
start_date = local_tz.localize(datetime.combine(
get_earliest_finding().date, datetime.min.time())
)
self.start_date = _truncate(start_date - timedelta(days=1))
self.end_date = _truncate(now() + timedelta(days=1))
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](self, qs, self.field_name)
class EngagementFilter(DojoFilter):
engagement__lead = ModelChoiceFilter(
queryset=User.objects.filter(
engagement__lead__isnull=False).distinct(),
label="Lead")
name = CharFilter(lookup_expr='icontains')
prod_type = ModelMultipleChoiceFilter(
queryset=Product_Type.objects.all().order_by('name'),
label="Product Type")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('name', 'name'),
('prod_type__name', 'prod_type__name'),
),
field_labels={
'name': 'Product Name',
'prod_type__name': 'Product Type',
}
)
class Meta:
model = Product
fields = ['name', 'prod_type']
class ProductFilter(DojoFilter):
name = CharFilter(lookup_expr='icontains', label="Product Name")
prod_type = ModelMultipleChoiceFilter(
queryset=Product_Type.objects.all().order_by('name'),
label="Product Type")
business_criticality = MultipleChoiceFilter(choices=Product.BUSINESS_CRITICALITY_CHOICES)
platform = MultipleChoiceFilter(choices=Product.PLATFORM_CHOICES)
lifecycle = MultipleChoiceFilter(choices=Product.LIFECYCLE_CHOICES)
origin = MultipleChoiceFilter(choices=Product.ORIGIN_CHOICES)
external_audience = BooleanFilter(field_name='external_audience')
internet_accessible = BooleanFilter(field_name='internet_accessible')
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('name', 'name'),
('prod_type__name', 'prod_type__name'),
('business_criticality', 'business_criticality'),
('platform', 'platform'),
('lifecycle', 'lifecycle'),
('origin', 'origin'),
('external_audience', 'external_audience'),
('internet_accessible', 'internet_accessible'),
),
field_labels={
'name': 'Product Name',
'prod_type__name': 'Product Type',
'business_criticality': 'Business Criticality',
'platform': 'Platform ',
'lifecycle': 'Lifecycle ',
'origin': 'Origin ',
'external_audience': 'External Audience ',
'internet_accessible': 'Internet Accessible ',
}
)
# tags = CharFilter(lookup_expr='icontains', label="Tags")
def __init__(self, *args, **kwargs):
self.user = None
if 'user' in kwargs:
self.user = kwargs.pop('user')
super(ProductFilter, self).__init__(*args, **kwargs)
if self.user is not None and not self.user.is_staff:
self.form.fields[
'prod_type'].queryset = Product_Type.objects.filter(
prod_type__authorized_users__in=[self.user])
class Meta:
model = Product
fields = ['name', 'prod_type', 'business_criticality', 'platform', 'lifecycle', 'origin', 'external_audience',
'internet_accessible', ]
exclude = ['tags']
class OpenFindingFilter(DojoFilter):
title = CharFilter(lookup_expr='icontains')
duplicate = ReportBooleanFilter()
# sourcefile = CharFilter(lookup_expr='icontains')
sourcefilepath = CharFilter(lookup_expr='icontains')
param = CharFilter(lookup_expr='icontains')
payload = CharFilter(lookup_expr='icontains')
date = DateRangeFilter()
last_reviewed = DateRangeFilter()
cwe = MultipleChoiceFilter(choices=[])
severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES)
test__test_type = ModelMultipleChoiceFilter(
queryset=Test_Type.objects.all())
test__engagement__product = ModelMultipleChoiceFilter(
queryset=Product.objects.all(),
label="Product")
test__engagement__risk_acceptance = ReportRiskAcceptanceFilter(
label="Risk Accepted")
if get_system_setting('enable_jira'):
jira_issue = BooleanFilter(field_name='jira_issue',
lookup_expr='isnull',
exclude=True,
label='JIRA issue')
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('numerical_severity', 'numerical_severity'),
('date', 'date'),
('last_reviewed', 'last_reviewed'),
('title', 'title'),
('test__engagement__product__name',
'test__engagement__product__name'),
),
)
class Meta:
model = Finding
exclude = ['url', 'description', 'mitigation', 'impact',
'endpoint', 'references', 'test', 'is_template',
'thread_id', 'notes', 'scanner_confidence', 'mitigated',
'numerical_severity', 'reporter', 'last_reviewed', 'line',
'duplicate_finding', 'hash_code', 'images',
'line_number', 'reviewers', 'mitigated_by', 'sourcefile', 'jira_creation', 'jira_change', 'created']
def __init__(self, *args, **kwargs):
self.user = None
self.pid = None
if 'user' in kwargs:
self.user = kwargs.pop('user')
if 'pid' in kwargs:
self.pid = kwargs.pop('pid')
super(OpenFindingFilter, self).__init__(*args, **kwargs)
cwe = dict()
cwe = dict([cwe, cwe]
for cwe in self.queryset.values_list('cwe', flat=True).distinct()
if type(cwe) is int and cwe is not None and cwe > 0)
cwe = collections.OrderedDict(sorted(cwe.items()))
self.form.fields['cwe'].choices = list(cwe.items())
if self.user is not None and not self.user.is_staff:
if self.form.fields.get('test__engagement__product'):
qs = Product.objects.filter(authorized_users__in=[self.user])
self.form.fields['test__engagement__product'].queryset = qs
self.form.fields['endpoints'].queryset = Endpoint.objects.filter(
product__authorized_users__in=[self.user]).distinct()
# Don't show the product filter on the product finding view
if self.pid:
del self.form.fields['test__engagement__product']
class OpenFindingSuperFilter(OpenFindingFilter):
reporter = ModelMultipleChoiceFilter(
queryset=Dojo_User.objects.all())
test__engagement__product__prod_type = ModelMultipleChoiceFilter(
queryset=Product_Type.objects.all().order_by('name'),
label="Product Type")
class ClosedFindingFilter(DojoFilter):
title = CharFilter(lookup_expr='icontains')
sourcefile = CharFilter(lookup_expr='icontains')
sourcefilepath = CharFilter(lookup_expr='icontains')
param = CharFilter(lookup_expr='icontains')
payload = CharFilter(lookup_expr='icontains')
mitigated = DateRangeFilter(label="Mitigated Date")
cwe = MultipleChoiceFilter(choices=[])
severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES)
test__test_type = ModelMultipleChoiceFilter(
queryset=Test_Type.objects.all())
test__engagement__product = ModelMultipleChoiceFilter(
queryset=Product.objects.all(),
label="Product")
test__engagement__product__prod_type = ModelMultipleChoiceFilter(
queryset=Product_Type.objects.all(),
label="Product Type")
test__engagement__risk_acceptance = ReportRiskAcceptanceFilter(
label="Risk Accepted")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('numerical_severity', 'numerical_severity'),
('date', 'date'),
('mitigated', 'mitigated'),
('title', 'title'),
('test__engagement__product__name',
'test__engagement__product__name'),
),
field_labels={
'numerical_severity': 'Severity',
'date': 'Date',
'mitigated': 'Mitigated Date',
'title': 'Finding Name',
'test__engagement__product__name': 'Product Name',
}
)
class Meta:
model = Finding
exclude = ['url', 'description', 'mitigation', 'impact',
'endpoint', 'references', 'test', 'is_template',
'active', 'verified', 'out_of_scope', 'false_p',
'duplicate', 'thread_id', 'date', 'notes',
'numerical_severity', 'reporter', 'endpoints',
'last_reviewed', 'review_requested_by', 'defect_review_requested_by',
'last_reviewed_by', 'created', 'jira_creation', 'jira_change']
def __init__(self, *args, **kwargs):
super(ClosedFindingFilter, self).__init__(*args, **kwargs)
cwe = dict()
cwe = dict([cwe, cwe]
for cwe in self.queryset.values_list('cwe', flat=True).distinct()
if type(cwe) is int and cwe is not None and cwe > 0)
cwe = collections.OrderedDict(sorted(cwe.items()))
self.form.fields['cwe'].choices = list(cwe.items())
class ClosedFindingSuperFilter(ClosedFindingFilter):
reporter = ModelMultipleChoiceFilter(
queryset=Dojo_User.objects.all())
class AcceptedFindingFilter(DojoFilter):
title = CharFilter(lookup_expr='icontains')
sourcefile = CharFilter(lookup_expr='icontains')
sourcefilepath = CharFilter(lookup_expr='icontains')
param = CharFilter(lookup_expr='icontains')
payload = CharFilter(lookup_expr='icontains')
test__engagement__risk_acceptance__created = \
DateRangeFilter(label="Acceptance Date")
date = DateRangeFilter(label="Finding Date")
cwe = MultipleChoiceFilter(choices=[])
severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES)
test__test_type = ModelMultipleChoiceFilter(
queryset=Test_Type.objects.all())
test__engagement__product = ModelMultipleChoiceFilter(
queryset=Product.objects.all(),
label="Product")
test__engagement__product__prod_type = ModelMultipleChoiceFilter(
queryset=Product_Type.objects.all(),
label="Product Type")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('numerical_severity', 'numerical_severity'),
('date', 'date'),
('test__engagement__risk_acceptance__created',
'test__engagement__risk_acceptance__created'),
('title', 'title'),
('test__engagement__product__name',
'test__engagement__product__name'),
),
field_labels={
'numerical_severity': 'Severity',
'date': 'Finding Date',
'test__engagement__risk_acceptance__created': 'Acceptance Date',
'title': 'Finding Name',
'test__engagement__product__name': 'Product Name',
}
)
class Meta:
model = Finding
fields = ['title', 'test__engagement__risk_acceptance__created']
exclude = ['url', 'description', 'mitigation', 'impact',
'endpoint', 'references', 'test', 'is_template',
'active', 'verified', 'out_of_scope', 'false_p',
'duplicate', 'thread_id', 'mitigated', 'notes',
'numerical_severity', 'reporter', 'endpoints',
'last_reviewed', 'o', 'jira_creation', 'jira_change']
def __init__(self, *args, **kwargs):
super(AcceptedFindingFilter, self).__init__(*args, **kwargs)
cwe = dict()
cwe = dict([finding.cwe, finding.cwe]
for finding in self.queryset.distinct()
if type(finding.cwe) is int and finding.cwe is not None and finding.cwe > 0 and finding.cwe not in cwe)
cwe = collections.OrderedDict(sorted(cwe.items()))
self.form.fields['cwe'].choices = list(cwe.items())
class AcceptedFindingSuperFilter(AcceptedFindingFilter):
test__engagement__risk_acceptance__reporter = \
ModelMultipleChoiceFilter(
queryset=Dojo_User.objects.all(),
label="Risk Acceptance Reporter")
class ProductFindingFilter(DojoFilter):
title = CharFilter(lookup_expr='icontains')
sourcefile = CharFilter(lookup_expr='icontains')
sourcefilepath = CharFilter(lookup_expr='icontains')
param = CharFilter(lookup_expr='icontains')
payload = CharFilter(lookup_expr='icontains')
date = DateRangeFilter()
cwe = MultipleChoiceFilter(choices=[])
severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES)
test__test_type = ModelMultipleChoiceFilter(
queryset=Test_Type.objects.all())
test__engagement__risk_acceptance = ReportRiskAcceptanceFilter(
label="Risk Accepted")
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('numerical_severity', 'numerical_severity'),
('date', 'date'),
('test__engagement__risk_acceptance__created',
'test__engagement__risk_acceptance__created'),
('title', 'title'),
('test__engagement__product__name',
'test__engagement__product__name'),
),
field_labels={
'numerical_severity': 'Severity',
'date': 'Finding Date',
'test__engagement__risk_acceptance__created': 'Acceptance Date',
'title': 'Finding Name',
'test__engagement__product__name': 'Product Name',
}
)
class Meta:
model = Finding
exclude = ['url', 'description', 'mitigation', 'impact',
'endpoint', 'references', 'test', 'is_template',
'active', 'verified', 'out_of_scope', 'false_p',
'duplicate_finding', 'thread_id', 'mitigated', 'notes',
'numerical_severity', 'reporter', 'endpoints',
'last_reviewed', 'jira_creation', 'jira_change']
def __init__(self, *args, **kwargs):
super(ProductFindingFilter, self).__init__(*args, **kwargs)
cwe = dict()
cwe = dict([finding.cwe, finding.cwe]
for finding in self.queryset.distinct()
if type(finding.cwe) is int and finding.cwe is not None and finding.cwe > 0 and finding.cwe not in cwe)
cwe = collections.OrderedDict(sorted(cwe.items()))
self.form.fields['cwe'].choices = list(cwe.items())
class TemplateFindingFilter(DojoFilter):
title = CharFilter(lookup_expr='icontains')
cwe = MultipleChoiceFilter(choices=[])
severity = MultipleChoiceFilter(choices=[])
numerical_severity = MultipleChoiceFilter(choices=[])
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('cwe', 'cwe'),
('title', 'title'),
('numerical_severity', 'numerical_severity'),
),
field_labels={
'numerical_severity': 'Severity',
}
)
class Meta:
model = Finding_Template
exclude = ['description', 'mitigation', 'impact',
'references', 'numerical_severity']
def __init__(self, *args, **kwargs):
super(TemplateFindingFilter, self).__init__(*args, **kwargs)
cwe = dict()
cwe = dict([finding.cwe, finding.cwe]
for finding in self.queryset.distinct()
if type(finding.cwe) is int and finding.cwe is not None and finding.cwe > 0 and finding.cwe not in cwe)
cwe = collections.OrderedDict(sorted(cwe.items()))
self.form.fields['cwe'].choices = list(cwe.items())
self.form.fields['severity'].choices = (('Critical', 'Critical'),
('High', 'High'),
('Medium', 'Medium'),
('Low', 'Low'),
('Info', 'Info'))
self.form.fields['numerical_severity'].choices = (('S0', 'S0'),
('S1', 'S1'),
('S2', 'S2'),
('S3', 'S3'),
('S4', 'S4'))
class FindingStatusFilter(ChoiceFilter):
def any(self, qs, name):
return qs.filter(verified=True,
false_p=False,
duplicate=False,
out_of_scope=False)
def open(self, qs, name):
return qs.filter(mitigated__isnull=True,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False, )
def closed(self, qs, name):
return qs.filter(mitigated__isnull=False,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False, )
options = {
'': (_('Any'), any),
0: (_('Open'), open),
1: (_('Closed'), closed),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(FindingStatusFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
if get_earliest_finding() is not None:
start_date = local_tz.localize(datetime.combine(
get_earliest_finding().date, datetime.min.time())
)
self.start_date = _truncate(start_date - timedelta(days=1))
self.end_date = _truncate(now() + timedelta(days=1))
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](self, qs, self.field_name)
class MetricsFindingFilter(FilterSet):
date = MetricsDateRangeFilter()
test__engagement__product__prod_type = ModelMultipleChoiceFilter(
queryset=Product_Type.objects.all().order_by('name'),
label="Product Type")
severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES)
status = FindingStatusFilter(label='Status')
def __init__(self, *args, **kwargs):
super(MetricsFindingFilter, self).__init__(*args, **kwargs)
self.form.fields['severity'].choices = self.queryset.order_by(
'numerical_severity'
).values_list('severity', 'severity').distinct()
class Meta:
model = Finding
exclude = ['url',
'description',
'mitigation',
'unsaved_endpoints',
'unsaved_request',
'unsaved_response',
'unsaved_tags',
'references',
'review_requested_by',
'reviewers',
'defect_review_requested_by',
'thread_id',
'notes',
'last_reviewed_by',
'images',
'endpoints',
'is_template',
'jira_creation',
'jira_change']
class EndpointFilter(DojoFilter):
product = ModelMultipleChoiceFilter(
queryset=Product.objects.all().order_by('name'),
label="Product")
host = CharFilter(lookup_expr='icontains')
path = CharFilter(lookup_expr='icontains')
query = CharFilter(lookup_expr='icontains')
fragment = CharFilter(lookup_expr='icontains')
mitigated = CharFilter(lookup_expr='icontains')
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('product', 'product'),
('host', 'host'),
),
)
def __init__(self, *args, **kwargs):
self.user = None
if 'user' in kwargs:
self.user = kwargs.pop('user')
super(EndpointFilter, self).__init__(*args, **kwargs)
if self.user and not self.user.is_staff:
self.form.fields['product'].queryset = Product.objects.filter(
authorized_users__in=[self.user]).distinct().order_by('name')
class Meta:
model = Endpoint
exclude = ['mitigated']
class EndpointReportFilter(DojoFilter):
host = CharFilter(lookup_expr='icontains')
path = CharFilter(lookup_expr='icontains')
query = CharFilter(lookup_expr='icontains')
fragment = CharFilter(lookup_expr='icontains')
finding__severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES)
finding__mitigated = MitigatedDateRangeFilter()
class Meta:
model = Endpoint
exclude = ['product']
class ReportFindingFilter(DojoFilter):
title = CharFilter(lookup_expr='icontains', label='Name')
severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES)
active = ReportBooleanFilter()
mitigated = MitigatedDateRangeFilter()
verified = ReportBooleanFilter()
false_p = ReportBooleanFilter(label="False Positive")
test__engagement__risk_acceptance = ReportRiskAcceptanceFilter(
label="Risk Accepted")
duplicate = ReportBooleanFilter()
out_of_scope = ReportBooleanFilter()
class Meta:
model = Finding
exclude = ['date', 'cwe', 'url', 'description', 'mitigation', 'impact',
'endpoint', 'references', 'test', 'is_template',
'thread_id', 'notes', 'endpoints',
'numerical_severity', 'reporter', 'last_reviewed', 'images', 'jira_creation', 'jira_change']
class ReportAuthedFindingFilter(DojoFilter):
title = CharFilter(lookup_expr='icontains', label='Name')
test__engagement__product = ModelMultipleChoiceFilter(
queryset=Product.objects.all(), label="Product")
test__engagement__product__prod_type = ModelMultipleChoiceFilter(
queryset=Product_Type.objects.all(),
label="Product Type")
severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES)
active = ReportBooleanFilter()
mitigated = MitigatedDateRangeFilter()
verified = ReportBooleanFilter()
false_p = ReportBooleanFilter(label="False Positive")
test__engagement__risk_acceptance = ReportRiskAcceptanceFilter(
label="Risk Accepted")
duplicate = ReportBooleanFilter()
out_of_scope = ReportBooleanFilter()
def __init__(self, *args, **kwargs):
self.user = None
if 'user' in kwargs:
self.user = kwargs.pop('user')
super(ReportAuthedFindingFilter, self).__init__(*args, **kwargs)
if not self.user.is_staff:
self.form.fields[
'test__engagement__product'].queryset = Product.objects.filter(
authorized_users__in=[self.user])
@property
def qs(self):
parent = super(ReportAuthedFindingFilter, self).qs
if self.user.is_staff:
return parent
else:
return parent.filter(
test__engagement__product__authorized_users__in=[self.user])
class Meta:
model = Finding
exclude = ['date', 'cwe', 'url', 'description', 'mitigation', 'impact',
'endpoint', 'references', 'test', 'is_template',
'thread_id', 'notes', 'endpoints',
'numerical_severity', 'reporter', 'last_reviewed', 'jira_creation', 'jira_change']
class UserFilter(DojoFilter):
first_name = CharFilter(lookup_expr='icontains')
last_name = CharFilter(lookup_expr='icontains')
username = CharFilter(lookup_expr='icontains')
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('username', 'username'),
('last_name', 'last_name'),
('first_name', 'first_name'),
('email', 'email'),
('is_active', 'is_active'),
('is_staff', 'is_staff'),
('is_superuser', 'is_superuser'),
),
field_labels={
'username': 'User Name',
'is_active': 'Active',
'is_staff': 'Staff',
'is_superuser': 'Superuser',
}
)
class Meta:
model = Dojo_User
fields = ['is_staff', 'is_superuser', 'is_active', 'first_name',
'last_name', 'username']
exclude = ['password', 'last_login', 'groups', 'user_permissions',
'date_joined']
class ReportFilter(DojoFilter):
name = CharFilter(lookup_expr='icontains')
type = MultipleChoiceFilter(choices=[])
format = MultipleChoiceFilter(choices=[])
requester = ModelMultipleChoiceFilter(queryset=Dojo_User.objects.all())
datetime = DateTimeFilter()
status = MultipleChoiceFilter(choices=[])
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('datetime', 'datetime'),
('name', 'name'),
('type', 'type'),
('format', 'format'),
('requester', 'requester'),
),
field_labels={
'datetime': 'Date',
}
)
class Meta:
model = Report
exclude = ['task_id', 'file']
def __init__(self, *args, **kwargs):
super(ReportFilter, self).__init__(*args, **kwargs)
type = dict()
type = dict(
[report.type, report.type] for report in self.queryset.distinct()
if report.type is not None)
type = collections.OrderedDict(sorted(type.items()))
self.form.fields['type'].choices = list(type.items())
status = dict()
status = dict(
[report.status, report.status] for report in
self.queryset.distinct() if report.status is not None)
status = collections.OrderedDict(sorted(status.items()))
self.form.fields['status'].choices = list(status.items())
class EngineerFilter(DojoFilter):
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('username', 'username'),
('last_name', 'last_name'),
('first_name', 'first_name'),
('email', 'email'),
('is_active', 'is_active'),
('is_staff', 'is_staff'),
('is_superuser', 'is_superuser'),
),
field_labels={
'username': 'User Name',
'is_active': 'Active',
'is_staff': 'Staff',
'is_superuser': 'Superuser',
}
)
class Meta:
model = Dojo_User
fields = ['is_staff', 'is_superuser', 'is_active', 'username', 'email',
'last_name', 'first_name']
exclude = ['password', 'last_login', 'groups', 'user_permissions',
'date_joined']
class LogEntryFilter(DojoFilter):
from auditlog.models import LogEntry
action = MultipleChoiceFilter(choices=LogEntry.Action.choices)
actor = ModelMultipleChoiceFilter(queryset=Dojo_User.objects.all())
timestamp = DateRangeFilter()
class Meta:
model = LogEntry
exclude = ['content_type', 'object_pk', 'object_id', 'object_repr',
'changes', 'additional_data']
class ProductTypeFilter(DojoFilter):
name = CharFilter(lookup_expr='icontains')
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('name', 'name'),
),
)
class Meta:
model = Product_Type
exclude = []
include = ('name',)
class TestTypeFilter(DojoFilter):
name = CharFilter(lookup_expr='icontains')
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('name', 'name'),
),
)
class Meta:
model = Test_Type
exclude = []
include = ('name',)
class DevelopmentEnvironmentFilter(DojoFilter):
name = CharFilter(lookup_expr='icontains')
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('name', 'name'),
),
)
class Meta:
model = Development_Environment
exclude = []
include = ('name',)
class NoteTypesFilter(DojoFilter):
name = CharFilter(lookup_expr='icontains')
o = OrderingFilter(
# tuple-mapping retains order
fields=(
('name', 'name'),
('description', 'description'),
('is_single', 'is_single'),
('is_mandatory', 'is_mandatory'),
),
)
class Meta:
model = Note_Type
exclude = []
include = ('name', 'is_single', 'description')
# ==============================
# Defect Dojo Engaegment Surveys
# ==============================
class SurveyFilter(FilterSet):
name = CharFilter(lookup_expr='icontains')
description = CharFilter(lookup_expr='icontains')
active = BooleanFilter()
class Meta:
model = Engagement_Survey
exclude = ['questions']
survey_set = FilterSet
class QuestionTypeFilter(ChoiceFilter):
def any(self, qs, name):
return qs.all()
def text_question(self, qs, name):
return qs.filter(polymorphic_ctype=ContentType.objects.get_for_model(TextQuestion))
def choice_question(self, qs, name):
return qs.filter(polymorphic_ctype=ContentType.objects.get_for_model(ChoiceQuestion))
options = {
'': (_('Any'), any),
1: (_('Text Question'), text_question),
2: (_('Choice Question'), choice_question),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(QuestionTypeFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](self, qs, self.options[value][0])
class QuestionFilter(FilterSet):
text = CharFilter(lookup_expr='icontains')
type = QuestionTypeFilter()
class Meta:
model = Question
exclude = ['polymorphic_ctype', 'created', 'modified', 'order']
question_set = FilterSet
| 35.463661 | 122 | 0.588835 |
0a36325af6d380faf56c30bab1c768105316b4db | 1,329 | py | Python | seq2seq/combine.py | scarletcho/hypernym-path-generation | 9573aa94ba7c7fadd58879c15dfda35a56254198 | [
"MIT"
] | 2 | 2020-12-12T21:50:24.000Z | 2021-07-04T21:53:44.000Z | seq2seq/combine.py | scarletcho/hypernym-path-generation | 9573aa94ba7c7fadd58879c15dfda35a56254198 | [
"MIT"
] | null | null | null | seq2seq/combine.py | scarletcho/hypernym-path-generation | 9573aa94ba7c7fadd58879c15dfda35a56254198 | [
"MIT"
] | 1 | 2021-07-04T21:54:23.000Z | 2021-07-04T21:54:23.000Z | import sys
if __name__ == '__main__':
src_file = sys.argv[1]
tgt_file = sys.argv[2]
pred_file = sys.argv[3]
epochs = sys.argv[4]
category = sys.argv[5] # verbs, nouns or instnouns
split = sys.argv[6] # val, or test
#optional (if used 'reversed' version, hypernym is first, not last)
try: # use 1 here if want reversed.
reverse = sys.argv[7]
reverse = bool(reverse)
except:
reverse=False
if category in {'verbs', 'nouns'}:
hyp_name = '_hypernym'
elif category == 'instnouns':
hyp_name = '_instance_hypernym'
else:
raise ValueError("Must be 'nouns', 'verbs', or 'instnouns' ")
with open(pred_file, 'r') as fd:
pred = fd.readlines()
with open(src_file, 'r') as fd:
srcval = fd.readlines()
with open(tgt_file, 'r') as fd:
tgtval = fd.readlines()
pred = [i.strip() for i in pred]
if reverse:
pred = [i.split(' ')[0] for i in pred]
else:
pred = [i.split(' ')[-1] for i in pred]
srcval = [i.strip() for i in srcval]
tgtval = [i.strip() for i in tgtval]
with open('y_results_'+epochs+'e_'+category+'_'+ split +'.txt','w') as fd:
for ii,i in enumerate(pred):
fd.write(srcval[ii]+'\t'+hyp_name+'\t'+tgtval[ii]+'\t'+pred[ii]+'\n' )
| 26.58 | 82 | 0.567344 |
f1d00a59c4f8f0b5aa28d789f2b2db1aad53200f | 2,022 | py | Python | tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | 5 | 2018-07-04T22:14:02.000Z | 2018-07-04T22:21:43.000Z | tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | null | null | null | tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | 2 | 2019-02-26T16:21:15.000Z | 2020-12-04T17:48:17.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `ShuffleAndRepeatFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ShuffleAndRepeatFusionTest(test_base.DatasetTestBase):
def testShuffleAndRepeatFusion(self):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["ShuffleAndRepeat"])).shuffle(10).repeat(2)
options = dataset_ops.Options()
options.experimental_shuffle_and_repeat_fusion = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
for _ in range(2):
results = []
for _ in range(10):
results.append(self.evaluate(get_next()))
self.assertAllEqual([x for x in range(10)], sorted(results))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
| 38.884615 | 80 | 0.739367 |
f2b97694d0002349aaf516e73d271a6cd8cb2899 | 837 | py | Python | examples/ces/delete_alarm.py | artem-lifshits/python-otcextensions | 2021da124f393e0429dd5913a3bc635e6143ba1e | [
"Apache-2.0"
] | 10 | 2018-03-03T17:59:59.000Z | 2020-01-08T10:03:00.000Z | examples/ces/delete_alarm.py | artem-lifshits/python-otcextensions | 2021da124f393e0429dd5913a3bc635e6143ba1e | [
"Apache-2.0"
] | 208 | 2020-02-10T08:27:46.000Z | 2022-03-29T15:24:21.000Z | examples/ces/delete_alarm.py | artem-lifshits/python-otcextensions | 2021da124f393e0429dd5913a3bc635e6143ba1e | [
"Apache-2.0"
] | 15 | 2020-04-01T20:45:54.000Z | 2022-03-23T12:45:43.000Z | #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Delete CloudEye alarm rule
"""
import openstack
openstack.enable_logging(True)
conn = openstack.connect(cloud='otc')
alarm = 'al1596533022051EZVV2nlZ8'
alarm = conn.ces.find_alarm(alarm)
alarm = conn.ces.delete_alarm(alarm)
print(alarm)
| 32.192308 | 76 | 0.740741 |
7118ca50b773cab142c4cf0de43d7a5b595f7c72 | 2,071 | py | Python | adafruit-circuitpython-bundle-py-20201107/examples/monsterm4sk_rainbow_stars.py | rantler/AdaFruit | 9b0aa56ede9ac358b835162cad4c6531c09ba5b0 | [
"CC0-1.0"
] | 7 | 2021-03-15T10:06:20.000Z | 2022-03-23T02:53:15.000Z | adafruit-circuitpython-bundle-py-20201107/examples/monsterm4sk_rainbow_stars.py | rantler/AdaFruit | 9b0aa56ede9ac358b835162cad4c6531c09ba5b0 | [
"CC0-1.0"
] | 5 | 2021-04-27T18:21:11.000Z | 2021-05-02T14:17:14.000Z | adafruit-circuitpython-bundle-py-20201107/examples/monsterm4sk_rainbow_stars.py | rantler/AdaFruit | 9b0aa56ede9ac358b835162cad4c6531c09ba5b0 | [
"CC0-1.0"
] | null | null | null | # SPDX-FileCopyrightText: 2020 Foamyguy, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
CircuitPython example for Monster M4sk.
Draws star images on each screen. When buttons are pressed
set the stars to a different color. When the nose is booped
make the eyes change through the rainbow.
"""
import time
import board
import displayio
import adafruit_imageload
import adafruit_monsterm4sk
SCREEN_SIZE = 240
IMAGE_SIZE = 64 * 3
i2c_bus = board.I2C()
mask = adafruit_monsterm4sk.MonsterM4sk(i2c=i2c_bus)
left_group = displayio.Group(max_size=4, scale=3)
mask.left_display.show(left_group)
right_group = displayio.Group(max_size=4, scale=3)
mask.right_display.show(right_group)
left_group.x = (SCREEN_SIZE - IMAGE_SIZE) // 2
left_group.y = (SCREEN_SIZE - IMAGE_SIZE) // 2
right_group.x = (SCREEN_SIZE - IMAGE_SIZE) // 2
right_group.y = (SCREEN_SIZE - IMAGE_SIZE) // 2
# load in party parrot bitmap
star_bitmap, star_palette = adafruit_imageload.load(
"/rainbow_star.bmp", bitmap=displayio.Bitmap, palette=displayio.Palette
)
right_star_grid = displayio.TileGrid(
star_bitmap,
pixel_shader=star_palette,
width=1,
height=1,
tile_height=64,
tile_width=64,
default_tile=0,
x=0,
y=0,
)
left_star_grid = displayio.TileGrid(
star_bitmap,
pixel_shader=star_palette,
width=1,
height=1,
tile_height=64,
tile_width=64,
default_tile=0,
x=0,
y=0,
)
right_group.append(right_star_grid)
left_group.append(left_star_grid)
while True:
if mask.boop:
for i in range(6 * 3):
right_star_grid[0] = i % 6
left_star_grid[0] = i % 6
time.sleep(0.02)
time.sleep(0.5)
if mask.buttons["S9"]:
right_star_grid[0] = 2
left_star_grid[0] = 2
if mask.buttons["S10"]:
right_star_grid[0] = 4
left_star_grid[0] = 4
if mask.buttons["S11"]:
right_star_grid[0] = 3
left_star_grid[0] = 3
| 23.269663 | 76 | 0.661516 |
71288cdb66a060553da9dc362a262fd4a0219e39 | 20,690 | py | Python | pywifi/_wifiutil_win.py | hktkqj/pywifi | addfd97fed6252fe7c7951a8e64579144b3637b2 | [
"MIT"
] | null | null | null | pywifi/_wifiutil_win.py | hktkqj/pywifi | addfd97fed6252fe7c7951a8e64579144b3637b2 | [
"MIT"
] | null | null | null | pywifi/_wifiutil_win.py | hktkqj/pywifi | addfd97fed6252fe7c7951a8e64579144b3637b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# vim: set fileencoding=utf-8
"""Implementations of wifi functions of Linux."""
import re
import platform
import time
import logging
from ctypes import *
from ctypes.wintypes import *
from comtypes import GUID
from .const import *
from .profile import Profile
if platform.release().lower() == 'xp':
if platform.win32_ver()[2].lower() in ['sp2', 'sp3']:
CLIENT_VERSION = 1
else:
CLIENT_VERSION = 2
"""
Some types does not exist in python2 ctypes.wintypes so we fake them
using how its defined in python3 ctypes.wintypes.
"""
if not "PDWORD" in dir():
PDWORD = POINTER(DWORD)
if not "PWCHAR" in dir():
PWCHAR= POINTER(WCHAR)
ERROR_SUCCESS = 0
WLAN_MAX_PHY_TYPE_NUMBER = 8
DOT11_MAC_ADDRESS = c_ubyte * 6
native_wifi = windll.wlanapi
status_dict = [
IFACE_INACTIVE,
IFACE_CONNECTED,
IFACE_CONNECTED,
IFACE_DISCONNECTED,
IFACE_DISCONNECTED,
IFACE_CONNECTING,
IFACE_CONNECTING,
IFACE_CONNECTING
]
auth_value_to_str_dict = {
AUTH_ALG_OPEN: 'open',
AUTH_ALG_SHARED: 'shared'
}
auth_str_to_value_dict = {
'open': AUTH_ALG_OPEN,
'shared': AUTH_ALG_SHARED
}
akm_str_to_value_dict = {
'NONE': AKM_TYPE_NONE,
'WPA': AKM_TYPE_WPA,
'WPAPSK': AKM_TYPE_WPAPSK,
'WPA2': AKM_TYPE_WPA2,
'WPA2PSK': AKM_TYPE_WPA2PSK,
'OTHER': AKM_TYPE_UNKNOWN
}
akm_value_to_str_dict = {
AKM_TYPE_NONE: 'NONE',
AKM_TYPE_WPA: 'WPA',
AKM_TYPE_WPAPSK: 'WPAPSK',
AKM_TYPE_WPA2: 'WPA2',
AKM_TYPE_WPA2PSK: 'WPA2PSK',
AKM_TYPE_UNKNOWN: 'OTHER'
}
cipher_str_to_value_dict = {
'NONE': CIPHER_TYPE_NONE,
'WEP': CIPHER_TYPE_WEP,
'TKIP': CIPHER_TYPE_TKIP,
'AES': CIPHER_TYPE_CCMP,
'OTHER': CIPHER_TYPE_UNKNOWN
}
cipher_value_to_str_dict = {
CIPHER_TYPE_NONE: 'NONE',
CIPHER_TYPE_WEP: 'WEP',
CIPHER_TYPE_TKIP: 'TKIP',
CIPHER_TYPE_CCMP: 'AES',
CIPHER_TYPE_UNKNOWN: 'UNKNOWN'
}
class WLAN_INTERFACE_INFO(Structure):
_fields_ = [
("InterfaceGuid", GUID),
("strInterfaceDescription", c_wchar * 256),
("isState", c_uint)
]
class WLAN_INTERFACE_INFO_LIST(Structure):
_fields_ = [
("dwNumberOfItems", DWORD),
("dwIndex", DWORD),
("InterfaceInfo", WLAN_INTERFACE_INFO * 1)
]
class DOT11_SSID(Structure):
_fields_ = [("uSSIDLength", c_ulong),
("ucSSID", c_char * 32)]
class WLAN_RATE_SET(Structure):
_fields_ = [
("uRateSetLength", c_ulong),
("usRateSet", c_ushort * 126)
]
class WLAN_RAW_DATA(Structure):
_fields_ = [
("dwDataSize", DWORD),
("DataBlob", c_byte * 1)
]
class WLAN_AVAILABLE_NETWORK(Structure):
_fields_ = [
("strProfileName", c_wchar * 256),
("dot11Ssid", DOT11_SSID),
("dot11BssType", c_uint),
("uNumberOfBssids", c_ulong),
("bNetworkConnectable", c_bool),
("wlanNotConnectableReason", c_uint),
("uNumberOfPhyTypes", c_ulong * WLAN_MAX_PHY_TYPE_NUMBER),
("dot11PhyTypes", c_uint),
("bMorePhyTypes", c_bool),
("wlanSignalQuality", c_ulong),
("bSecurityEnabled", c_bool),
("dot11DefaultAuthAlgorithm", c_uint),
("dot11DefaultCipherAlgorithm", c_uint),
("dwFlags", DWORD),
("dwReserved", DWORD)
]
class WLAN_AVAILABLE_NETWORK_LIST(Structure):
_fields_ = [
("dwNumberOfItems", DWORD),
("dwIndex", DWORD),
("Network", WLAN_AVAILABLE_NETWORK * 1)
]
class WLAN_BSS_ENTRY(Structure):
_fields_ = [
("dot11Ssid", DOT11_SSID),
("uPhyId", c_ulong),
("dot11Bssid", DOT11_MAC_ADDRESS),
("dot11BssType", c_uint),
("dot11BssPhyType", c_uint),
("lRssi", c_long),
("uLinkQuality", c_ulong),
("bInRegDomain", c_bool),
("usBeaconPeriod", c_ushort),
("ullTimestamp", c_ulonglong),
("ullHostTimestamp", c_ulonglong),
("usCapabilityInformation", c_ushort),
("ulChCenterFrequency", c_ulong),
("wlanRateSet", WLAN_RATE_SET),
("ulIeOffset", c_ulong),
("ulIeSize", c_ulong)
]
class WLAN_BSS_LIST(Structure):
_fields_ = [
("dwTotalSize", DWORD),
("dwNumberOfItems", DWORD),
("wlanBssEntries", WLAN_BSS_ENTRY * 1)
]
class NDIS_OBJECT_HEADER(Structure):
_fields_ = [
("Type", c_ubyte),
("Revision", c_ubyte),
("Size", c_ushort)
]
class DOT11_BSSID_LIST(Structure):
_fields_ = [
("Header", NDIS_OBJECT_HEADER),
("uNumOfEntries", c_ulong),
("uTotalNumOfEntries", c_ulong),
("BSSIDs", DOT11_MAC_ADDRESS * 1)
]
class WLAN_CONNECTION_PARAMETERS(Structure):
_fields_ = [
("wlanConnectionMode", c_uint),
("strProfile", c_wchar_p),
("pDot11Ssid", POINTER(DOT11_SSID)),
("pDesiredBssidList", POINTER(DOT11_BSSID_LIST)),
("dot11BssType", c_uint),
("dwFlags", DWORD)
]
class WLAN_PROFILE_INFO(Structure):
_fields_ = [
("strProfileName", c_wchar * 256),
("dwFlags", DWORD)
]
class WLAN_PROFILE_INFO_LIST(Structure):
_fields_ = [
("dwNumberOfItems", DWORD),
("dwIndex", DWORD),
("ProfileInfo", WLAN_PROFILE_INFO * 1)
]
class WifiUtil():
"""WifiUtil implements the wifi functions in Windows."""
_nego_version = DWORD()
_handle = HANDLE()
_ifaces = pointer(WLAN_INTERFACE_INFO_LIST())
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._wlan_scan(self._handle, byref(obj['guid']))
def scan_results(self, obj):
"""Get the AP list after scanning."""
avail_network_list = pointer(WLAN_AVAILABLE_NETWORK_LIST())
self._wlan_get_available_network_list(self._handle,
byref(obj['guid']), byref(avail_network_list))
networks = cast(avail_network_list.contents.Network,
POINTER(WLAN_AVAILABLE_NETWORK))
self._logger.debug("Scan found %d networks.",
avail_network_list.contents.dwNumberOfItems)
network_list = []
for i in range(avail_network_list.contents.dwNumberOfItems):
if networks[i].dot11BssType == 1 and networks[i].bNetworkConnectable :
ssid = ''
for j in range(networks[i].dot11Ssid.uSSIDLength):
if networks[i].dot11Ssid.ucSSID != b'':
ssid += "%c" % networks[i].dot11Ssid.ucSSID[j]
#This part is used for converting to UTF-8 encoded SSID
temp_cnt = 0
temp_hex_res = 0
bytes_list = []
converted_name = ""
for bin_encode_char in ssid:
if (33 <= ord(bin_encode_char) <= 126):
converted_name = converted_name + bin_encode_char
else:
temp_cnt = temp_cnt + 1
temp_now = int(str(bin(ord(bin_encode_char)))[2:6], 2)
temp_now1 = int(str(bin(ord(bin_encode_char)))[6:10], 2)
temp_hex_res = temp_hex_res + temp_now * 16 + temp_now1
bytes_list.append(temp_hex_res)
temp_hex_res = 0
if temp_cnt == 3:
converted_name = converted_name + bytes(bytes_list).decode('utf-8', 'ignore')
bytes_list = []
temp_hex_res = 0
temp_cnt = 0
ssid = converted_name
#print("Work + " + ssid + " strlen : " + str(len(ssid)))
#End of converting part
bss_list = pointer(WLAN_BSS_LIST())
self._wlan_get_network_bss_list(self._handle,
byref(obj['guid']), byref(bss_list), networks[i].dot11Ssid, networks[i].bSecurityEnabled)
bsses = cast(bss_list.contents.wlanBssEntries,
POINTER(WLAN_BSS_ENTRY))
if networks[i].bSecurityEnabled:
akm = self._get_akm(networks[i].dot11DefaultCipherAlgorithm)
auth_alg = self._get_auth_alg(networks[i].dot11DefaultAuthAlgorithm)
else:
akm = [AKM_TYPE_NONE]
auth_alg = [AUTH_ALG_OPEN]
for j in range(bss_list.contents.dwNumberOfItems):
network = Profile()
network.ssid = ssid
network.bssid = ''
for k in range(6):
network.bssid += "%02x:" % bsses[j].dot11Bssid[k]
network.signal = bsses[j].lRssi
network.freq = bsses[j].ulChCenterFrequency
network.auth = auth_alg
network.akm = akm
network_list.append(network)
return network_list
def connect(self, obj, params):
"""Connect to the specified AP."""
connect_params = WLAN_CONNECTION_PARAMETERS()
connect_params.wlanConnectionMode = 0 # Profile
connect_params.dot11BssType = 1 # infra
profile_name = create_unicode_buffer(params.ssid)
connect_params.strProfile = profile_name.value
ret = self._wlan_connect(
self._handle, obj['guid'], byref(connect_params))
self._logger.debug('connect result: %d', ret)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._wlan_disconnect(self._handle, obj['guid'])
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list
def network_profiles(self, obj):
"""Get AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
profile_list = []
for profile_name in profile_name_list:
profile = Profile()
flags = DWORD()
access = DWORD()
xml = LPWSTR()
self._wlan_get_profile(self._handle, obj['guid'],
profile_name, byref(xml), byref(flags),
byref(access))
# fill profile info
profile.ssid = re.search(r'<name>(.*)</name>', xml.value).group(1)
auth = re.search(r'<authentication>(.*)</authentication>',
xml.value).group(1).upper()
profile.akm = []
if auth not in akm_str_to_value_dict:
if auth not in auth_str_to_value_dict:
profile.auth = AUTH_ALG_OPEN
else:
profile.auth = auth_str_to_value_dict[auth]
profile.akm.append(AKM_TYPE_NONE)
else:
profile.auth = AUTH_ALG_OPEN
profile.akm.append(akm_str_to_value_dict[auth])
profile_list.append(profile)
return profile_list
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def status(self, obj):
"""Get the wifi interface status."""
data_size = DWORD()
data = PDWORD()
opcode_value_type = DWORD()
self._wlan_query_interface(self._handle, obj['guid'], 6,
byref(data_size), byref(data),
byref(opcode_value_type))
return status_dict[data.contents.value]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
if self._wlan_open_handle(CLIENT_VERSION,
byref(self._nego_version),
byref(self._handle)) \
is not ERROR_SUCCESS:
self._logger.error("Open handle failed!")
if self._wlan_enum_interfaces(self._handle, byref(self._ifaces)) \
is not ERROR_SUCCESS:
self._logger.error("Enum interface failed!")
for interface in self._ifaces.contents.InterfaceInfo:
iface = {}
iface['guid'] = interface.InterfaceGuid
iface['name'] = interface.strInterfaceDescription
ifaces.append(iface)
return ifaces
def _wlan_open_handle(self, client_version, _nego_version, handle):
func = native_wifi.WlanOpenHandle
func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]
func.restypes = [DWORD]
return func(client_version, None, _nego_version, handle)
def _wlan_close_handle(self, handle):
func = native_wifi.WlanCloseHandle
func.argtypes = [HANDLE, c_void_p]
func.restypes = [DWORD]
return func(handle, None)
def _wlan_enum_interfaces(self, handle, ifaces):
func = native_wifi.WlanEnumInterfaces
func.argtypes = [HANDLE, c_void_p, POINTER(
POINTER(WLAN_INTERFACE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, None, ifaces)
def _wlan_get_available_network_list(self, handle,
iface_guid,
network_list):
func = native_wifi.WlanGetAvailableNetworkList
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
POINTER(WLAN_AVAILABLE_NETWORK_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, None, network_list)
def _wlan_get_network_bss_list(self, handle, iface_guid, bss_list, ssid = None, security = False):
func = native_wifi.WlanGetNetworkBssList
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), c_uint, c_bool, c_void_p, POINTER(POINTER(WLAN_BSS_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, ssid, 1, security, None, bss_list)
def _wlan_scan(self, handle, iface_guid):
func = native_wifi.WlanScan
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), POINTER(WLAN_RAW_DATA), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None, None, None)
def _wlan_connect(self, handle, iface_guid, params):
func = native_wifi.WlanConnect
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
WLAN_CONNECTION_PARAMETERS), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, params, None)
def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):
func = native_wifi.WlanSetProfile
func.argtypes = [HANDLE, POINTER(
GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, xml, None, overwrite, None, reason_code)
def _wlan_reason_code_to_str(self, reason_code, buf_size, buf):
func = native_wifi.WlanReasonCodeToString
func.argtypes = [DWORD, DWORD, PWCHAR, c_void_p]
func.restypes = [DWORD]
return func(reason_code, buf_size, buf, None)
def _wlan_get_profile_list(self, handle, iface_guid, profile_list):
func = native_wifi.WlanGetProfileList
func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(
POINTER(WLAN_PROFILE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, None, profile_list)
def _wlan_get_profile(self, handle, iface_guid, profile_name, xml, flags, access):
func = native_wifi.WlanGetProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(
c_wchar_p), POINTER(DWORD), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None, xml, flags, access)
def _wlan_delete_profile(self, handle, iface_guid, profile_name):
func = native_wifi.WlanDeleteProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None)
def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):
func = native_wifi.WlanQueryInterface
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, opcode, None, data_size, data, opcode_value_type)
def _wlan_disconnect(self, handle, iface_guid):
func = native_wifi.WlanDisconnect
func.argtypes = [HANDLE, POINTER(GUID), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None)
def _get_auth_alg(self, auth_val):
auth_alg = []
if auth_val in [1, 3, 4, 6, 7]:
auth_alg.append(AUTH_ALG_OPEN)
elif auth_val == 2:
auth_alg.append(AUTH_ALG_SHARED)
return auth_alg
def _get_akm(self, akm_val):
akm = []
if akm_val == 2:
akm.append(AKM_TYPE_WPAPSK)
elif akm_val == 4:
akm.append(AKM_TYPE_WPA2PSK)
return akm
| 31.929012 | 109 | 0.589125 |
de4152fd7f7ec52477164848911f8b1521fcf97c | 1,094 | py | Python | examples/widgets/span_selector.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2019-11-18T21:53:55.000Z | 2019-11-18T21:53:55.000Z | examples/widgets/span_selector.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | 2 | 2021-05-10T17:57:41.000Z | 2021-07-26T16:23:09.000Z | examples/widgets/span_selector.py | kdavies4/matplotlib | 330aefbd031ee227213afe655c5158320015d45b | [
"MIT",
"BSD-3-Clause"
] | 1 | 2015-12-21T07:24:54.000Z | 2015-12-21T07:24:54.000Z | #!/usr/bin/env python
"""
The SpanSelector is a mouse widget to select a xmin/xmax range and plot the
detail view of the selected region in the lower axes
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import SpanSelector
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(211, axisbg='#FFFFCC')
x = np.arange(0.0, 5.0, 0.01)
y = np.sin(2*np.pi*x) + 0.5*np.random.randn(len(x))
ax.plot(x, y, '-')
ax.set_ylim(-2, 2)
ax.set_title('Press left mouse button and drag to test')
ax2 = fig.add_subplot(212, axisbg='#FFFFCC')
line2, = ax2.plot(x, y, '-')
def onselect(xmin, xmax):
indmin, indmax = np.searchsorted(x, (xmin, xmax))
indmax = min(len(x) - 1, indmax)
thisx = x[indmin:indmax]
thisy = y[indmin:indmax]
line2.set_data(thisx, thisy)
ax2.set_xlim(thisx[0], thisx[-1])
ax2.set_ylim(thisy.min(), thisy.max())
fig.canvas.draw()
# set useblit True on gtkagg for enhanced performance
span = SpanSelector(ax, onselect, 'horizontal', useblit=True,
rectprops=dict(alpha=0.5, facecolor='red'))
plt.show()
| 26.682927 | 75 | 0.671846 |
6fc1d58400b8e7856475b228f395f8f7a44ba6fa | 2,755 | py | Python | boxen/_depot.py | artPlusPlus/boxen | 93ce396ce1646fef1a0e3f6bc5aa44c1d6313e7f | [
"MIT"
] | null | null | null | boxen/_depot.py | artPlusPlus/boxen | 93ce396ce1646fef1a0e3f6bc5aa44c1d6313e7f | [
"MIT"
] | null | null | null | boxen/_depot.py | artPlusPlus/boxen | 93ce396ce1646fef1a0e3f6bc5aa44c1d6313e7f | [
"MIT"
] | null | null | null | import os
try:
import ujson as jlib
except ImportError:
try:
import simplejson as jlib
except ImportError:
import json as jlib
from ._object_cache import ObjectCache
from ._container import Container
class Depot(object):
"""
A Depot manages Containers.
"""
@property
def _current_container(self):
if self._cur_container is None or len(self._cur_container) == self._container_object_limit:
container = Container()
container_name = '{0:06d}'.format(len(self._containers))
container_name = '_'.join([self._container_prefix, container_name, self._container_suffix])
container._path = os.path.join(self._location, container_name)
self._containers.append(container)
self._cur_container = container
return self._cur_container
def __init__(self, cache_capacity=1000):
self._name = None
self._location = None
self._object_cache = _ObjectCache(cache_capacity)
self._container_object_limit = 1000
self._map__object_key__container_key = {}
self._containers = {}
self._next_available_container = None
self._object_cache_size = 1000
self._container_prefix = ''
self._container_suffix = ''
def get(self, object_key):
try:
result = self._object_cache[object_key]
except KeyError:
container_key = self._map__object_key__container_key[object_key]
container = self._containers[container_key]
result = container.get(object_key)
self._object_cache[object_key] = result
# TODO: Implemement cache resizing
return result
def post(self, object_data):
self._next_available_container.post(object_data)
def put(self, key, object_data):
try:
container = self._map__object_key__container[key]
except KeyError:
container = self._next_available_container
container.put(key, object_data)
def delete(self, key):
container = self._map__object_key__container[key]
container.delete(key)
def _load(self, depot_path):
if not os.path.isdir(depot_path):
msg = 'boxen depot directory not found: {0}'.format(depot_path)
raise IOError(msg)
depot_config = os.path.join(depot_path, '.boxen')
if not os.path.isfile(depot_config):
msg = 'No boxen depot configuration found in path: {0}'.format(depot_path)
raise IOError(msg)
depot_data = jlib.loads(depot_config)
self._container_object_limit = depot_data['container_object_limit']
self._object_cache_size = depot_data['object_cache_size']
| 32.034884 | 103 | 0.657713 |
155cc19037af3f9c2f0840fe344daeb4496cea30 | 7,698 | py | Python | experiments.py | mattjj/pyhsmm-collapsedinfinite | 81a60c025beec6fb065bc9f4e23cea43b6f6725c | [
"MIT"
] | null | null | null | experiments.py | mattjj/pyhsmm-collapsedinfinite | 81a60c025beec6fb065bc9f4e23cea43b6f6725c | [
"MIT"
] | null | null | null | experiments.py | mattjj/pyhsmm-collapsedinfinite | 81a60c025beec6fb065bc9f4e23cea43b6f6725c | [
"MIT"
] | 1 | 2021-10-06T15:12:44.000Z | 2021-10-06T15:12:44.000Z | from __future__ import division
import numpy as np
na = np.newaxis
import cPickle, os
from matplotlib import pyplot as plt
import pyhsmm, models, util, timing
import diskmemo
SAVING = False
obs_hypparams = dict(mu_0=0.,kappa_0=0.02,sigmasq_0=1,nu_0=10)
dur_psn_hypparams = dict(alpha_0=2*10,beta_0=2)
dur_geo_hypparams = dict(alpha_0=4,beta_0=20)
#####################
# Data generation #
#####################
def generate_hsmm_data():
return pyhsmm.models.HSMM(6,6,
[pyhsmm.basic.distributions.ScalarGaussianNIX(**obs_hypparams) for s in range(10)],
[pyhsmm.basic.distributions.PoissonDuration(**dur_psn_hypparams) for s in range(10)]).generate(50)
def generate_hmm_data():
return pyhsmm.models.HSMM(6,6,
[pyhsmm.basic.distributions.ScalarGaussianNIX(**obs_hypparams) for s in range(10)],
[pyhsmm.basic.distributions.GeometricDuration(**dur_geo_hypparams) for s in range(10)]).generate(50)
if os.path.isfile('data'):
with open('data','r') as infile:
(hmm_data, hmm_labels), (hsmm_data, hsmm_labels) = cPickle.load(infile)
else:
(hmm_data, hmm_labels), (hsmm_data, hsmm_labels) = thetuple = \
generate_hmm_data(), generate_hsmm_data()
with open('data','w') as outfile:
cPickle.dump(thetuple,outfile,protocol=2)
#################
# Experiments #
#################
allfigfuncs = []
def compare_timing():
wl_timing = timing.get_wl_timing(alpha=6,gamma=6,L=10,data=hsmm_data,obsdistnstring='pyhsmm.basic.distributions.ScalarGaussianNIX(mu_0=0.,kappa_0=0.02,sigmasq_0=1,nu_0=10)',durdistnstring='pyhsmm.basic.distributions.PoissonDuration(2*10,2)')
da_timing = timing.get_da_timing(alpha_0=6,gamma_0=6,data=hsmm_data,obsclassstring='pyhsmm.basic.distributions.ScalarGaussianNIX(mu_0=0.,kappa_0=0.02,sigmasq_0=1,nu_0=10)',durclassstring='pyhsmm.basic.distributions.PoissonDuration(2*10,2)')
print 'WL time per iteration: %0.4f' % wl_timing
print 'DA time per iteration: % 0.4f' % da_timing
return wl_timing, da_timing
def wl_is_faster_hamming():
# show hamming error to true state sequence decreases faster with wl
### get samples
wl_samples = get_hdphsmm_wl_poisson_samples(hsmm_data,nruns=100,niter=300,L=10)
da_samples = get_hdphsmm_da_poisson_samples(hsmm_data,nruns=24,niter=150)
### get hamming errors for samples
def f(tup):
return util.stateseq_hamming_error(tup[0],tup[1])
wl_errs = np.array(dv.map_sync(f,zip(wl_samples,[hsmm_labels]*len(wl_samples))))
da_errs = np.array(dv.map_sync(f,zip(da_samples,[hsmm_labels]*len(da_samples))))
### plot
plt.figure()
for errs, samplername, color in zip([wl_errs, da_errs],['Weak Limit','Direct Assignment'],['b','g']):
plt.plot(np.median(errs,axis=0),color+'-',label='%s Sampler' % samplername)
plt.plot(util.scoreatpercentile(errs.copy(),per=25,axis=0),color+'--')
plt.plot(util.scoreatpercentile(errs.copy(),per=75,axis=0),color+'--')
plt.legend()
plt.xlabel('iteration')
plt.ylabel('Hamming error')
save('figures/wl_is_faster_hamming.pdf')
return wl_errs, da_errs
allfigfuncs.append(wl_is_faster_hamming)
def hsmm_vs_stickyhmm():
# show convergence rates in #iter are same
### get samples
hsmm_samples = get_hdphsmm_da_geo_samples(hmm_data,nruns=50,niter=100)
shmm_samples = get_shdphmm_da_samples(hmm_data,nruns=50,niter=100)
### get hamming errors for samples
def f(tup):
return util.stateseq_hamming_error(tup[0],tup[1])
hsmm_errs = np.array(dv.map_sync(f,zip(hsmm_samples,[hmm_labels]*len(hsmm_samples))))
shmm_errs = np.array(dv.map_sync(f,zip(shmm_samples,[hmm_labels]*len(shmm_samples))))
### plot
plt.figure()
for errs, samplername, color in zip([hsmm_errs, shmm_errs],['Geo-HDP-HSMM DA','Sticky-HDP-HMM DA'],['b','g']):
plt.plot(np.median(errs,axis=0),color+'-',label='%s Sampler' % samplername)
plt.plot(util.scoreatpercentile(errs.copy(),per=25,axis=0),color+'--')
plt.plot(util.scoreatpercentile(errs.copy(),per=75,axis=0),color+'--')
plt.legend()
plt.xlabel('iteration')
plt.ylabel('Hamming error')
save('figures/hsmm_vs_stickyhmm.pdf')
return hsmm_errs, shmm_errs
allfigfuncs.append(hsmm_vs_stickyhmm)
####################
# Sample-getting #
####################
@diskmemo.memoize
def get_hdphsmm_wl_poisson_samples(data,nruns,niter,L):
return get_samples_parallel(hdphsmm_wl_poisson_sampler,nruns,niter=niter,data=hsmm_data,L=L,alpha_0=6,gamma_0=6)
@diskmemo.memoize
def get_hdphsmm_da_poisson_samples(data,nruns,niter):
return get_samples_parallel(hdphsmm_da_poisson_sampler,nruns,niter=niter,data=hsmm_data,alpha_0=6,gamma_0=6)
@diskmemo.memoize
def get_shdphmm_da_samples(data,nruns,niter):
return get_samples_parallel(shdphmm_da_sampler,nruns,niter=niter,data=data,alpha_0=6,gamma_0=6,kappa=30)
@diskmemo.memoize
def get_hdphsmm_da_geo_samples(data,nruns,niter):
return get_samples_parallel(hdphsmm_da_geo_sampler,nruns,niter=niter,data=data,alpha_0=6,gamma_0=6)
####################
# Sample-running #
####################
def run_model(model,data,niter):
model.add_data(data)
seqs = np.empty((niter,data.shape[0]))
seqs[0] = model.states_list[0].stateseq
for itr in range(1,niter):
model.resample_model()
seqs[itr] = model.states_list[0].stateseq
return seqs
def hdphsmm_wl_poisson_sampler(niter,data,L,alpha_0,gamma_0):
model = pyhsmm.models.HSMM(alpha_0,gamma_0,
[pyhsmm.basic.distributions.ScalarGaussianNIX(**obs_hypparams) for s in range(L)],
[pyhsmm.basic.distributions.PoissonDuration(**dur_psn_hypparams) for s in range(L)])
return run_model(model,data,niter)
def hdphsmm_da_poisson_sampler(niter,data,alpha_0,gamma_0):
model = models.collapsed_hdphsmm(alpha_0,gamma_0,
obs=pyhsmm.basic.distributions.ScalarGaussianNIX(**obs_hypparams),
dur=pyhsmm.basic.distributions.PoissonDuration(**dur_psn_hypparams))
return run_model(model,data,niter)
def hdphsmm_da_geo_sampler(niter,data,alpha_0,gamma_0):
model = models.collapsed_hdphsmm(alpha_0,gamma_0,
obs=pyhsmm.basic.distributions.ScalarGaussianNIX(**obs_hypparams),
dur=pyhsmm.basic.distributions.GeometricDuration(**dur_geo_hypparams))
return run_model(model,data,niter)
def shdphmm_da_sampler(niter,data,alpha_0,gamma_0,kappa):
model = models.collapsed_stickyhdphmm(alpha_0,gamma_0,kappa,
obs=pyhsmm.basic.distributions.ScalarGaussianNIX(**obs_hypparams))
return run_model(model,data,niter)
def get_samples_parallel(sampler,nruns,**kwargs):
def applier(tup):
return apply(tup[0],(),tup[1])
samples_list = dv.map_sync(applier, zip([sampler]*nruns,[kwargs]*nruns))
dv.purge_results('all')
return samples_list
###################
# Figure saving #
###################
import os
def save(pathstr):
filepath = os.path.abspath(pathstr)
if SAVING:
if (not os.path.isfile(pathstr)) or raw_input('save over %s? [y/N] ' % filepath).lower() == 'y':
plt.savefig(filepath)
print 'saved %s' % filepath
return
print 'not saved'
#######################
# Parallelism stuff #
#######################
class dummy_directview(object):
map_sync = map
__len__ = lambda self: 1
purge_results = lambda x,y: None
dv = dummy_directview()
def go_parallel():
global dv, c
from IPython.parallel import Client
c = Client()
dv = c[:]
##########
# Main #
##########
def main():
for f in allfigfuncs:
f()
if __name__ == '__main__':
main()
| 34.832579 | 245 | 0.68966 |
34e5959119328cadecf03fe2ee59e835598e4230 | 699 | py | Python | general-practice/Exercises solved/codingbat/Logic1/date_fashion.py | lugabrielbueno/Projeto | f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0 | [
"MIT"
] | null | null | null | general-practice/Exercises solved/codingbat/Logic1/date_fashion.py | lugabrielbueno/Projeto | f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0 | [
"MIT"
] | null | null | null | general-practice/Exercises solved/codingbat/Logic1/date_fashion.py | lugabrielbueno/Projeto | f012c5bb9ce6f6d7c9e8196cc7986127dba3eba0 | [
"MIT"
] | null | null | null | #You and your date are trying to get a table at a restaurant. The parameter "you" is the stylishness of your clothes, in the range 0..10, and "date" is the stylishness of your date's clothes. The result getting the table is encoded as an int value with 0=no, 1=maybe, 2=yes. If either of you is very stylish, 8 or more, then the result is 2 (yes). With the exception that if either of you has style of 2 or less, then the result is 0 (no). Otherwise the result is 1 (maybe).
#date_fashion(5, 10) → 2
#date_fashion(5, 2) → 0
#date_fashion(5, 5) → 1
def date_fashion(you, date):
if you <=2 or date <=2:
return 0
elif you >= 8 or date >= 8:
return 2
else:
return 1 | 53.769231 | 474 | 0.678112 |
7940cf9fc374d7add1579145f2b30e5bfe418350 | 425 | py | Python | src/veem/models/address.py | veeminc/Veem-python-sdk | 2f7527af0139a3f12e544fe2b51b3021df404f3c | [
"MIT"
] | 1 | 2021-07-05T22:52:46.000Z | 2021-07-05T22:52:46.000Z | src/veem/models/address.py | veeminc/Veem-python-sdk | 2f7527af0139a3f12e544fe2b51b3021df404f3c | [
"MIT"
] | 1 | 2020-09-15T16:25:39.000Z | 2020-09-15T16:25:39.000Z | src/veem/models/address.py | veeminc/Veem-python-sdk | 2f7527af0139a3f12e544fe2b51b3021df404f3c | [
"MIT"
] | 2 | 2021-08-11T18:05:08.000Z | 2022-02-06T08:20:49.000Z |
from veem.models.base import Base
class Address(Base):
def __init__(self,
line1=None,
line2=None,
city=None,
stateProvince=None,
postalCode=None,
**kwargs):
self.line1 = line1
self.line2 = line2
self.city = city
self.stateProvince = stateProvince
self.postalCode = postalCode
| 23.611111 | 42 | 0.508235 |
70180e99fdb14765936bb6afd96d7736fa9a787b | 6,293 | py | Python | snappy_wrappers/wrappers/mutect_par/wrapper.py | PotatoThrone/snappy-pipeline | 31200eba84bff8e459e9e210d6d95e2984627f5c | [
"MIT"
] | 5 | 2021-02-26T10:39:56.000Z | 2021-12-23T07:53:26.000Z | snappy_wrappers/wrappers/mutect_par/wrapper.py | PotatoThrone/snappy-pipeline | 31200eba84bff8e459e9e210d6d95e2984627f5c | [
"MIT"
] | 93 | 2021-02-22T11:23:59.000Z | 2022-03-31T09:58:39.000Z | snappy_wrappers/wrappers/mutect_par/wrapper.py | PotatoThrone/snappy-pipeline | 31200eba84bff8e459e9e210d6d95e2984627f5c | [
"MIT"
] | 3 | 2021-02-22T11:44:59.000Z | 2021-06-21T19:33:53.000Z | # -*- coding: utf-8 -*-
"""Wrapper for running MuTect variant caller in parallel, genome is split into windows
isort:skip_file
"""
import os
import sys
import textwrap
from snakemake import shell
# A hack is required for being able to import snappy_wrappers modules when in development mode.
# TODO: is there a more elegant way?
base_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
sys.path.insert(0, base_dir)
from snappy_wrappers.wrapper_parallel import (
ParallelSomaticVariantCallingBaseWrapper,
ResourceUsage,
gib,
hours,
)
class ParallelMutectWrapper(ParallelSomaticVariantCallingBaseWrapper):
"""Parallel execution of MuTect"""
# TODO: probably, nobody looked at anything but the vcf/tbi files... get rid of them?
realpath_output_keys = (
"vcf",
"vcf_md5",
"tbi",
"tbi_md5",
"full_vcf",
"full_vcf_md5",
"full_tbi",
"full_tbi",
"full_tbi_md5",
"txt",
"txt_md5",
"wig",
"wig_md5",
)
key_ext = {
"txt": "full.out.txt.gz",
"txt_md5": "full.out.txt.gz.md5",
"vcf": "vcf.gz",
"vcf_md5": "vcf.gz.md5",
"tbi": "vcf.gz.tbi",
"tbi_md5": "vcf.gz.tbi.md5",
"full_vcf": "full.vcf.gz",
"full_vcf_md5": "full.vcf.gz.md5",
"full_tbi": "full.vcf.gz.tbi",
"full_tbi_md5": "full.vcf.gz.tbi.md5",
"wig": "full.wig.txt.gz",
"wig_md5": "full.wig.txt.gz.md5",
}
inner_wrapper = "mutect"
step_name = "somatic_variant_calling"
tool_name = "mutect"
def __init__(self, snakemake):
super().__init__(snakemake)
self.job_resources = ResourceUsage(
cores=1,
memory=gib(7.5 * self.get_job_mult_memory()),
duration=hours(4 * self.get_job_mult_time()),
)
self.merge_resources = ResourceUsage(
cores=1,
memory=gib(2.0 * self.get_merge_mult_memory()),
duration=hours(4 * self.get_merge_mult_time()),
)
def construct_merge_rule(self):
"""Join the overall result files"""
return (
textwrap.dedent(
r"""
rule merge_all:
input: [{all_input}]
output: **{all_output}
log: **{all_log}
shell:
r'''
# Initialize output directory -----------------------------------------
outdir=$(basename {{output.vcf}})
mkdir -p output
# Take first header -------------------------------------------------------
set +o pipefail
zcat job_out.0.d/out/tmp_0.full.out.txt.gz | head -n 2 > output/result.full.out.txt
zcat job_out.0.d/out/tmp_0.full.wig.txt.gz | head -n 2 > output/result.full.wig.txt
set -o pipefail
# Append body contents ----------------------------------------------------
for jobno in {{{{0..{max_jobno}}}}}; do
set +o pipefail
zcat job_out.$jobno.d/out/tmp_$jobno.full.out.txt.gz | tail -n +3 >> output/result.full.out.txt
zcat job_out.$jobno.d/out/tmp_$jobno.full.wig.txt.gz | tail -n +3 >> output/result.full.wig.txt
set -o pipefail
done
# Use bcftools concat for VCF files ---------------------------------------
bcftools concat -a -d none -O z -o output/result.full.vcf.gz job_out.*.d/out/tmp_*.full.vcf.gz
bcftools concat -a -d none -O z -o output/result.vcf.gz job_out.*.d/out/tmp_*[0-9].vcf.gz
# bgzip output and create tabix index -------------------------------------
bgzip -f output/result.full.out.txt
bgzip -f output/result.full.wig.txt
tabix -f output/result.full.vcf.gz
tabix -f output/result.vcf.gz
pushd output
for f in *; do md5sum $f >$f.md5; done
popd
# Move to output directory ------------------------------------------------
mkdir -p $(dirname {{output.txt}})
mv output/result.full.out.txt.gz {{output.txt}}
mv output/result.full.out.txt.gz.md5 {{output.txt_md5}}
mv output/result.full.vcf.gz {{output.full_vcf}}
mv output/result.full.vcf.gz.md5 {{output.full_vcf_md5}}
mv output/result.full.vcf.gz.tbi {{output.full_tbi}}
mv output/result.full.vcf.gz.tbi.md5 {{output.full_tbi_md5}}
mv output/result.vcf.gz {{output.vcf}}
mv output/result.vcf.gz.md5 {{output.vcf_md5}}
mv output/result.vcf.gz.tbi {{output.tbi}}
mv output/result.vcf.gz.tbi.md5 {{output.tbi_md5}}
mv output/result.full.wig.txt.gz {{output.wig}}
mv output/result.full.wig.txt.gz.md5 {{output.wig_md5}}
# Write out information about conda installation.
conda list >{{log.conda_list}}
conda info >{{log.conda_info}}
md5sum {{log.conda_list}} >{{log.conda_list_md5}}
md5sum {{log.conda_info}} >{{log.conda_info_md5}}
'''
cluster_config['merge_all'] = {resources}
"""
)
.lstrip()
.format(
all_input=", ".join(map(repr, self.construct_parallel_result_files())),
all_output=repr(self.get_all_output()),
all_log=repr(self.get_all_log_files()),
max_jobno=len(self.get_regions()) - 1,
resources=repr(self.res_converter(self.merge_resources).to_res_dict()),
)
)
# Kick off execution using the wrapper class defined above.
ParallelMutectWrapper(snakemake).run().shutdown_logging()
# Compute MD5 sums of logs.
shell(
r"""
md5sum {snakemake.log.log} >{snakemake.log.log_md5}
"""
)
| 36.166667 | 119 | 0.508502 |
9c4b76d8009f699790de39c8a2081c09cc5ea36b | 1,967 | py | Python | homeassistant/components/gios/__init__.py | thehaxxa/core | bc9f0caf4ac1fd7b67d694cedb02876fe221c8e6 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/gios/__init__.py | thehaxxa/core | bc9f0caf4ac1fd7b67d694cedb02876fe221c8e6 | [
"Apache-2.0"
] | 70 | 2020-08-05T07:20:00.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/gios/__init__.py | zacwest/hass-core | b04247110736a6d700b477de8fe023cc7e79a022 | [
"Apache-2.0"
] | null | null | null | """The GIOS component."""
import logging
from aiohttp.client_exceptions import ClientConnectorError
from async_timeout import timeout
from gios import ApiError, Gios, InvalidSensorsData, NoStationError
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import API_TIMEOUT, CONF_STATION_ID, DOMAIN, SCAN_INTERVAL
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["air_quality"]
async def async_setup_entry(hass, entry):
"""Set up GIOS as config entry."""
station_id = entry.data[CONF_STATION_ID]
_LOGGER.debug("Using station_id: %s", station_id)
websession = async_get_clientsession(hass)
coordinator = GiosDataUpdateCoordinator(hass, websession, station_id)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.data[DOMAIN].pop(entry.entry_id)
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
class GiosDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold GIOS data."""
def __init__(self, hass, session, station_id):
"""Class to manage fetching GIOS data API."""
self.gios = Gios(station_id, session)
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)
async def _async_update_data(self):
"""Update data via library."""
try:
with timeout(API_TIMEOUT):
return await self.gios.async_update()
except (
ApiError,
NoStationError,
ClientConnectorError,
InvalidSensorsData,
) as error:
raise UpdateFailed(error) from error
| 31.222222 | 88 | 0.723945 |
65245848991781faac81776c4297423730762000 | 979 | py | Python | User/migrations/0004_auto_20180613_2107.py | Arianxx/ShareForum | b3bf6a3b2ace869cb88a0b41bbbd4575b6cdfd28 | [
"MIT"
] | 39 | 2018-09-08T04:40:59.000Z | 2021-09-10T02:02:04.000Z | User/migrations/0004_auto_20180613_2107.py | STARCASTPOD/BookForum | b3bf6a3b2ace869cb88a0b41bbbd4575b6cdfd28 | [
"MIT"
] | null | null | null | User/migrations/0004_auto_20180613_2107.py | STARCASTPOD/BookForum | b3bf6a3b2ace869cb88a0b41bbbd4575b6cdfd28 | [
"MIT"
] | 8 | 2018-09-08T08:50:45.000Z | 2020-03-13T15:38:56.000Z | # Generated by Django 2.0.5 on 2018-06-13 13:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Discussion', '0002_auto_20180613_1819'),
('Content', '0001_initial'),
('User', '0003_auto_20180613_1836'),
]
operations = [
migrations.RemoveField(
model_name='usercollection',
name='books',
),
migrations.AddField(
model_name='usercollection',
name='books',
field=models.ManyToManyField(null=True, related_name='collection_users', to='Content.Book'),
),
migrations.RemoveField(
model_name='usercollection',
name='discussions',
),
migrations.AddField(
model_name='usercollection',
name='discussions',
field=models.ManyToManyField(null=True, related_name='collection_users', to='Discussion.Discuss'),
),
]
| 29.666667 | 110 | 0.592441 |
44270788c1d01db501021fcdb572cfc34aae098e | 917 | py | Python | cartopy/run_test.py | stevemkim/conda-recipes | 4fa403587b187d87cd6f77abf0b24b8c3f351564 | [
"Apache-2.0"
] | 3 | 2016-09-02T15:49:19.000Z | 2019-02-06T22:54:11.000Z | cartopy/run_test.py | stevemkim/conda-recipes | 4fa403587b187d87cd6f77abf0b24b8c3f351564 | [
"Apache-2.0"
] | 43 | 2015-03-03T14:07:21.000Z | 2018-02-16T16:31:34.000Z | cartopy/run_test.py | stevemkim/conda-recipes | 4fa403587b187d87cd6f77abf0b24b8c3f351564 | [
"Apache-2.0"
] | 24 | 2015-03-04T02:03:48.000Z | 2018-02-16T16:20:07.000Z | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import cartopy.crs as ccrs
def sample_data(shape=(73, 145)):
"""Returns ``lons``, ``lats`` and ``data`` of some fake data."""
nlats, nlons = shape
lats = np.linspace(-np.pi / 2, np.pi / 2, nlats)
lons = np.linspace(0, 2 * np.pi, nlons)
lons, lats = np.meshgrid(lons, lats)
wave = 0.75 * (np.sin(2 * lats) ** 8) * np.cos(4 * lons)
mean = 0.5 * np.cos(2 * lats) * ((np.sin(2 * lats)) ** 2 + 2)
lats = np.rad2deg(lats)
lons = np.rad2deg(lons)
data = wave + mean
return lons, lats, data
def main():
ax = plt.axes(projection=ccrs.Mollweide())
lons, lats, data = sample_data()
ax.contourf(lons, lats, data,
transform=ccrs.PlateCarree(),
cmap='spectral')
ax.coastlines()
ax.set_global()
if __name__ == '__main__':
main()
| 22.925 | 68 | 0.585605 |
54e02a16cf4cb2eec8a406a7bdcbed7566639d5a | 4,035 | py | Python | setup.py | silver-dragon/hy | c7b2f47681f54b365da22ec8d65c7dbc59ab7501 | [
"MIT"
] | null | null | null | setup.py | silver-dragon/hy | c7b2f47681f54b365da22ec8d65c7dbc59ab7501 | [
"MIT"
] | null | null | null | setup.py | silver-dragon/hy | c7b2f47681f54b365da22ec8d65c7dbc59ab7501 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright 2021 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import glob
import importlib
import inspect
import os
import sys
from setuptools import find_packages, setup
from setuptools.command.install import install
import fastentrypoints # Monkey-patches setuptools.
from get_version import __version__
os.chdir(os.path.split(os.path.abspath(__file__))[0])
PKG = "hy"
long_description = """Hy is a Python <--> Lisp layer. It helps
make things work nicer, and lets Python and the Hy lisp variant play
nice together. """
class Install(install):
def __compile_hy_bytecode(self):
for path in sorted(glob.iglob('hy/**.hy', recursive=True)):
importlib.util.cache_from_source(path, optimize=self.optimize)
def run(self):
# Don't bother messing around with deps if they wouldn't be installed anyway.
# Code is based on setuptools's install.py.
if not (self.old_and_unmanageable or self.single_version_externally_managed
or not self._called_from_setup(inspect.currentframe())):
easy_install = self.distribution.get_command_class('easy_install')
cmd = easy_install(
self.distribution, args="x", root=self.root, record=self.record,
)
cmd.ensure_finalized()
cmd.always_copy_from = '.'
cmd.package_index.scan(glob.glob('*.egg'))
cmd.args = self.distribution.install_requires
# Avoid deprecation warnings on new setuptools versions.
if 'show_deprecation' in inspect.signature(cmd.run).parameters:
cmd.run(show_deprecation=False)
else:
cmd.run()
# Make sure any new packages get picked up.
import site
importlib.reload(site)
importlib.invalidate_caches()
self.__compile_hy_bytecode()
# The deps won't be reinstalled because of:
# https://github.com/pypa/setuptools/issues/456
return install.run(self)
setup(
name=PKG,
version=__version__,
install_requires=[
'rply>=0.7.7',
'funcparserlib>=0.3.6',
'colorama',
'astor>=0.8 ; python_version < "3.9"',
'pyreadline>=2.1 ; os_name == "nt"',
],
cmdclass=dict(install=Install),
entry_points={
'console_scripts': [
'hy = hy.cmdline:hy_main',
'hy3 = hy.cmdline:hy_main',
'hyc = hy.cmdline:hyc_main',
'hyc3 = hy.cmdline:hyc_main',
'hy2py = hy.cmdline:hy2py_main',
'hy2py3 = hy.cmdline:hy2py_main',
]
},
packages=find_packages(exclude=['tests*']),
package_data={
'hy.contrib': ['*.hy', '__pycache__/*'],
'hy.core': ['*.hy', '__pycache__/*'],
'hy.extra': ['*.hy', '__pycache__/*'],
},
data_files=[
('get_version', ['get_version.py'])
],
author="Paul Tagliamonte",
author_email="tag@pault.ag",
long_description=long_description,
description='Lisp and Python love each other.',
license="Expat",
url="http://hylang.org/",
platforms=['any'],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: DFSG approved",
"License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
"Operating System :: OS Independent",
"Programming Language :: Lisp",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Libraries",
]
)
| 33.625 | 85 | 0.610657 |
b4897a82875296b3943857794fbd2ebfe8968d0a | 11,881 | py | Python | baselineTagger.py | chaitanyamalaviya/NeuralFactorGraph | 6cd664b7edc43d56c6f1165baa7e7625eb0f7cd8 | [
"MIT"
] | 48 | 2018-05-15T12:46:36.000Z | 2021-03-11T09:34:10.000Z | baselineTagger.py | chaitanyamalaviya/NeuralFactorGraph | 6cd664b7edc43d56c6f1165baa7e7625eb0f7cd8 | [
"MIT"
] | 1 | 2018-10-28T21:11:47.000Z | 2018-10-31T20:31:09.000Z | baselineTagger.py | chaitanyamalaviya/NeuralFactorGraph | 6cd664b7edc43d56c6f1165baa7e7625eb0f7cd8 | [
"MIT"
] | 6 | 2018-07-03T01:28:41.000Z | 2020-01-23T13:25:49.000Z | from __future__ import division, print_function
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import argparse
import pdb
import numpy as np
import os
import pickle
import utils, models
parser = argparse.ArgumentParser()
parser.add_argument("--treebank_path", type=str,
default="/projects/tir2/users/cmalaviy/ud_exp/ud-treebanks-v2.1/")
parser.add_argument("--optim", type=str, default='adam', choices=["sgd","adam","adagrad","rmsprop"])
parser.add_argument("--emb_dim", type=int, default=128)
parser.add_argument("--hidden_dim", type=int, default=256)
parser.add_argument("--mlp_dim", type=int, default=128)
parser.add_argument("--n_layers", type=int, default=2)
parser.add_argument("--dropout", type=float, default=0.2)
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--langs", type=str, default="uk",
help="Languages separated by delimiter '/' with last language being target language")
parser.add_argument("--tgt_size", type=int, default=None)
parser.add_argument("--model_name", type=str, default="model_pos")
parser.add_argument("--continue_train", action='store_true')
parser.add_argument("--model_type", type=str, default="baseline", choices=["universal","joint","mono","specific","baseline"])
parser.add_argument("--sum_word_char", action='store_true')
parser.add_argument("--sent_attn", action='store_true')
parser.add_argument("--patience", type=int, default=3)
parser.add_argument("--test", action='store_true')
parser.add_argument("--gpu", action='store_true')
parser.add_argument("--seed", type=int, default=42)
args = parser.parse_args()
print(args)
# Set seed
torch.manual_seed(args.seed)
# Create dictionaries for language codes, morph tags and pos tags
langs = args.langs.split("/")
args.model_name = args.model_type + "".join(["_" + l for l in langs])
if args.sum_word_char:
args.model_name += "_wc-sum"
if args.sent_attn:
args.model_name += "_sent-attn"
if args.tgt_size:
args.model_name += "-" + str(args.tgt_size)
lang_to_code, code_to_lang = utils.get_lang_code_dicts()
print("Reading training data...")
training_data_langwise, train_tgt_labels = utils.read_conll(args.treebank_path, langs, code_to_lang, tgt_size=args.tgt_size, train_or_dev="train")
training_data = []
if args.tgt_size==100 and args.model_type!="mono":
training_data_langwise[langs[-1]] = training_data_langwise[langs[-1]] * 10
elif args.tgt_size==1000 and args.model_type!="mono":
training_data_langwise[langs[-1]] = training_data_langwise[langs[-1]]
for l in langs:
training_data += training_data_langwise[l]
labels_to_ix = train_tgt_labels
# t = str(args.tgt_size) if args.tgt_size is not None else ""
# with open('labels-'+langs[0]+t+'.txt', 'w') as file:
# file.write(pickle.dumps(labels_to_ix))
# labels_to_ix = dict([(b, a) for a, b in enumerate(train_tgt_labels)])
# labels_to_ix = {v: k for k, v in ix_to_labels.iteritems()}
dev_data_langwise, dev_tgt_labels = utils.read_conll(args.treebank_path, [langs[-1]], code_to_lang, train_or_dev="dev")
dev_data = dev_data_langwise[langs[-1]]
if args.test:
test_lang = langs[-1]
test_data_langwise, test_tgt_labels = utils.read_conll(args.treebank_path, [test_lang], code_to_lang, train_or_dev="test", test=True)
test_data = test_data_langwise[test_lang]
word_to_ix = {}
char_to_ix = {}
word_freq = {}
for sent, _ in training_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
if word_to_ix[word] not in word_freq:
word_freq[word_to_ix[word]] = 1
else:
word_freq[word_to_ix[word]] += 1
for char in word:
if char not in char_to_ix:
char_to_ix[char] = len(char_to_ix)
if args.model_type=='universal':
for lang in langs:
char_to_ix[lang] = len(char_to_ix)
# training_data_langwise.sort(key=lambda x: -len(x[0]))
# test_data.sort(key=lambda x: -len(x[0]))
# train_order = [x*args.batch_size for x in range(int((len(training_data_langwise)-1)/args.batch_size + 1))]
# test_order = [x*args.batch_size for x in range(int((len(test_data)-1)/args.batch_size + 1))]
def main():
if not os.path.isfile(args.model_name) or args.continue_train:
if args.continue_train:
print("Loading tagger model from " + args.model_name + "...")
tagger_model = torch.load(args.model_name, map_location=lambda storage, loc: storage)
if args.gpu:
tagger_model = tagger_model.cuda()
else:
tagger_model = models.BiLSTMTagger(args.model_type, args.sum_word_char, word_freq, args.sent_attn, langs, args.emb_dim, args.hidden_dim,
args.mlp_dim, len(char_to_ix), len(word_to_ix), len(labels_to_ix), args.n_layers, args.dropout, args.gpu)
if args.gpu:
tagger_model = tagger_model.cuda()
loss_function = nn.NLLLoss()
if args.optim=="sgd":
optimizer = optim.SGD(tagger_model.parameters(), lr=0.1)
elif args.optim=="adam":
optimizer = optim.Adam(tagger_model.parameters())
elif args.optim=="adagrad":
optimizer = optim.Adagrad(tagger_model.parameters())
elif args.optim=="rmsprop":
optimizer = optim.RMSprop(tagger_model.parameters())
print("Training tagger model...")
patience_counter = 0
prev_avg_tok_accuracy = 0
for epoch in xrange(args.epochs):
accuracies = []
sent = 0
tokens = 0
cum_loss = 0
correct = 0
print("Starting epoch %d .." %epoch)
for lang in langs:
lang_id = []
if args.model_type=="universal":
lang_id = [lang]
for sentence, morph in training_data_langwise[lang]:
sent += 1
if sent%100==0:
print("[Epoch %d] \
Sentence %d/%d, \
Tokens %d \
Cum_Loss: %f \
Average Accuracy: %f"
% (epoch, sent, len(training_data), tokens,
cum_loss/tokens, correct/tokens))
tagger_model.zero_grad()
sent_in = []
tokens += len(sentence)
for word in sentence:
s_appended_word = lang_id + [c for c in word] + lang_id
word_in = utils.prepare_sequence(s_appended_word, char_to_ix, args.gpu)
# targets = utils.prepare_sequence(s_appended_word[1:], char_to_ix, args.gpu)
sent_in.append(word_in)
# sent_in = torch.stack(sent_in)
tagger_model.char_hidden = tagger_model.init_hidden()
tagger_model.hidden = tagger_model.init_hidden()
targets = utils.prepare_sequence(morph, labels_to_ix, args.gpu)
if args.sum_word_char:
word_seq = utils.prepare_sequence(sentence, word_to_ix, args.gpu)
else:
word_seq = None
if args.model_type=="specific" or args.model_type=="joint":
tag_scores = tagger_model(sent_in, word_idxs=word_seq, lang=lang)
else:
tag_scores = tagger_model(sent_in, word_idxs=word_seq)
values, indices = torch.max(tag_scores, 1)
out_tags = indices.cpu().data.numpy().flatten()
correct += np.count_nonzero(out_tags==targets.cpu().data.numpy())
loss = loss_function(tag_scores, targets)
cum_loss += loss.cpu().data[0]
loss.backward()
optimizer.step()
print("Loss: %f" % loss.cpu().data.numpy())
print("Accuracy: %f" %(correct/tokens))
print("Saving model..")
torch.save(tagger_model, args.model_name)
print("Evaluating on dev set...")
#avg_tok_accuracy, f1_score = eval(tagger_model, curEpoch=epoch)
# Early Stopping
#if avg_tok_accuracy <= prev_avg_tok_accuracy:
# patience_counter += 1
# if patience_counter==args.patience:
# print("Model hasn't improved on dev set for %d epochs. Stopping Training." % patience_counter)
# break
#prev_avg_tok_accuracy = avg_tok_accuracy
else:
print("Loading tagger model from " + args.model_name + "...")
tagger_model = torch.load(args.model_name, map_location=lambda storage, loc: storage)
if args.gpu:
tagger_model = tagger_model.cuda()
if args.test:
avg_tok_accuracy, f1_score = eval(tagger_model, dev_or_test="test")
def eval(tagger_model, curEpoch=None, dev_or_test="dev"):
eval_data = dev_data if dev_or_test=="dev" else test_data
correct = 0
toks = 0
hypTags = []
goldTags = []
all_out_tags = np.array([])
all_targets = np.array([])
print("Starting evaluation on %s set... (%d sentences)" % (dev_or_test, len(eval_data)))
lang_id = []
if args.model_type=="universal":
lang_id = [lang]
s = 0
for sentence, morph in eval_data:
tagger_model.zero_grad()
tagger_model.char_hidden = tagger_model.init_hidden()
tagger_model.hidden = tagger_model.init_hidden()
sent_in = []
for word in sentence:
s_appended_word = lang_id + [c for c in word] + lang_id
word_in = utils.prepare_sequence(s_appended_word, char_to_ix, args.gpu)
sent_in.append(word_in)
targets = utils.prepare_sequence(morph, labels_to_ix, args.gpu)
if args.sum_word_char:
word_seq = utils.prepare_sequence(sentence, word_to_ix, args.gpu)
else:
word_seq = None
if args.model_type=="specific":
tag_scores = tagger_model(sent_in, word_idxs=word_seq, lang=langs[-1], test=True)
else:
tag_scores = tagger_model(sent_in, word_idxs=word_seq, test=True)
values, indices = torch.max(tag_scores, 1)
out_tags = indices.cpu().data.numpy().flatten()
hypTags += [labels_to_ix[idx] for idx in out_tags]
goldTags.append(morph)
targets = targets.cpu().data.numpy()
correct += np.count_nonzero(out_tags==targets)
toks += len(sentence)
avg_tok_accuracy = correct / toks
prefix = args.model_type + "_"
if args.sum_word_char:
prefix += "_wc-sum"
if dev_or_test=="dev":
prefix += "-".join([l for l in langs]) + "_" + dev_or_test + "_" + str(curEpoch)
else:
prefix += "-".join([l for l in langs]) + "_" + dev_or_test
if args.sent_attn:
prefix += "-sent_attn"
if args.tgt_size:
prefix += "_" + str(args.tgt_size)
finalTgts = []
for tags in goldTags:
for tag in tags:
finalTgts.append(tag)
f1_score, f1_micro_score = utils.computeF1(hypTags, finalTgts, prefix, labels_to_ix, baseline=True, write_results=True)
print("Test Set Accuracy: %f" % avg_tok_accuracy)
print("Test Set Avg F1 Score (Macro): %f" % f1_score)
print("Test Set Avg F1 Score (Micro): %f" % f1_micro_score)
with open(prefix + '_results_f1.txt', 'a') as file:
file.write("\nAccuracy: " + str(avg_tok_accuracy) + "\n")
return avg_tok_accuracy, f1_score
if __name__=="__main__":
main()
| 39.34106 | 149 | 0.616531 |
59353379ac8b35cf2745507680ae2ff35fb1f143 | 14,056 | py | Python | st2common/tests/unit/test_executions.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | null | null | null | st2common/tests/unit/test_executions.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | 15 | 2021-02-11T22:58:54.000Z | 2021-08-06T18:03:47.000Z | st2common/tests/unit/test_executions.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | 1 | 2021-07-10T15:02:29.000Z | 2021-07-10T15:02:29.000Z | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
import bson
import datetime
from st2tests.fixtures.packs import executions as fixture
from st2tests import DbTestCase
from st2common.util import isotime
from st2common.util import date as date_utils
from st2common.persistence.execution import ActionExecution
from st2common.models.api.execution import ActionExecutionAPI
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from six.moves import range
class TestActionExecutionHistoryModel(DbTestCase):
def setUp(self):
super(TestActionExecutionHistoryModel, self).setUp()
# Fake execution record for action liveactions triggered by workflow runner.
self.fake_history_subtasks = [
{
'id': str(bson.ObjectId()),
'action': copy.deepcopy(fixture.ARTIFACTS['actions']['local']),
'runner': copy.deepcopy(fixture.ARTIFACTS['runners']['run-local']),
'liveaction': copy.deepcopy(fixture.ARTIFACTS['liveactions']['task1']),
'status': fixture.ARTIFACTS['liveactions']['task1']['status'],
'start_timestamp': fixture.ARTIFACTS['liveactions']['task1']['start_timestamp'],
'end_timestamp': fixture.ARTIFACTS['liveactions']['task1']['end_timestamp']
},
{
'id': str(bson.ObjectId()),
'action': copy.deepcopy(fixture.ARTIFACTS['actions']['local']),
'runner': copy.deepcopy(fixture.ARTIFACTS['runners']['run-local']),
'liveaction': copy.deepcopy(fixture.ARTIFACTS['liveactions']['task2']),
'status': fixture.ARTIFACTS['liveactions']['task2']['status'],
'start_timestamp': fixture.ARTIFACTS['liveactions']['task2']['start_timestamp'],
'end_timestamp': fixture.ARTIFACTS['liveactions']['task2']['end_timestamp']
}
]
# Fake execution record for a workflow action execution triggered by rule.
self.fake_history_workflow = {
'id': str(bson.ObjectId()),
'trigger': copy.deepcopy(fixture.ARTIFACTS['trigger']),
'trigger_type': copy.deepcopy(fixture.ARTIFACTS['trigger_type']),
'trigger_instance': copy.deepcopy(fixture.ARTIFACTS['trigger_instance']),
'rule': copy.deepcopy(fixture.ARTIFACTS['rule']),
'action': copy.deepcopy(fixture.ARTIFACTS['actions']['chain']),
'runner': copy.deepcopy(fixture.ARTIFACTS['runners']['action-chain']),
'liveaction': copy.deepcopy(fixture.ARTIFACTS['liveactions']['workflow']),
'children': [task['id'] for task in self.fake_history_subtasks],
'status': fixture.ARTIFACTS['liveactions']['workflow']['status'],
'start_timestamp': fixture.ARTIFACTS['liveactions']['workflow']['start_timestamp'],
'end_timestamp': fixture.ARTIFACTS['liveactions']['workflow']['end_timestamp']
}
# Assign parent to the execution records for the subtasks.
for task in self.fake_history_subtasks:
task['parent'] = self.fake_history_workflow['id']
def test_model_complete(self):
# Create API object.
obj = ActionExecutionAPI(**copy.deepcopy(self.fake_history_workflow))
self.assertDictEqual(obj.trigger, self.fake_history_workflow['trigger'])
self.assertDictEqual(obj.trigger_type, self.fake_history_workflow['trigger_type'])
self.assertDictEqual(obj.trigger_instance, self.fake_history_workflow['trigger_instance'])
self.assertDictEqual(obj.rule, self.fake_history_workflow['rule'])
self.assertDictEqual(obj.action, self.fake_history_workflow['action'])
self.assertDictEqual(obj.runner, self.fake_history_workflow['runner'])
self.assertEqual(obj.liveaction, self.fake_history_workflow['liveaction'])
self.assertIsNone(getattr(obj, 'parent', None))
self.assertListEqual(obj.children, self.fake_history_workflow['children'])
# Convert API object to DB model.
model = ActionExecutionAPI.to_model(obj)
self.assertEqual(str(model.id), obj.id)
self.assertDictEqual(model.trigger, self.fake_history_workflow['trigger'])
self.assertDictEqual(model.trigger_type, self.fake_history_workflow['trigger_type'])
self.assertDictEqual(model.trigger_instance, self.fake_history_workflow['trigger_instance'])
self.assertDictEqual(model.rule, self.fake_history_workflow['rule'])
self.assertDictEqual(model.action, self.fake_history_workflow['action'])
self.assertDictEqual(model.runner, self.fake_history_workflow['runner'])
doc = copy.deepcopy(self.fake_history_workflow['liveaction'])
doc['start_timestamp'] = doc['start_timestamp']
doc['end_timestamp'] = doc['end_timestamp']
self.assertDictEqual(model.liveaction, doc)
self.assertIsNone(getattr(model, 'parent', None))
self.assertListEqual(model.children, self.fake_history_workflow['children'])
# Convert DB model to API object.
obj = ActionExecutionAPI.from_model(model)
self.assertEqual(str(model.id), obj.id)
self.assertDictEqual(obj.trigger, self.fake_history_workflow['trigger'])
self.assertDictEqual(obj.trigger_type, self.fake_history_workflow['trigger_type'])
self.assertDictEqual(obj.trigger_instance, self.fake_history_workflow['trigger_instance'])
self.assertDictEqual(obj.rule, self.fake_history_workflow['rule'])
self.assertDictEqual(obj.action, self.fake_history_workflow['action'])
self.assertDictEqual(obj.runner, self.fake_history_workflow['runner'])
self.assertDictEqual(obj.liveaction, self.fake_history_workflow['liveaction'])
self.assertIsNone(getattr(obj, 'parent', None))
self.assertListEqual(obj.children, self.fake_history_workflow['children'])
def test_crud_complete(self):
# Create the DB record.
obj = ActionExecutionAPI(**copy.deepcopy(self.fake_history_workflow))
ActionExecution.add_or_update(ActionExecutionAPI.to_model(obj))
model = ActionExecution.get_by_id(obj.id)
self.assertEqual(str(model.id), obj.id)
self.assertDictEqual(model.trigger, self.fake_history_workflow['trigger'])
self.assertDictEqual(model.trigger_type, self.fake_history_workflow['trigger_type'])
self.assertDictEqual(model.trigger_instance, self.fake_history_workflow['trigger_instance'])
self.assertDictEqual(model.rule, self.fake_history_workflow['rule'])
self.assertDictEqual(model.action, self.fake_history_workflow['action'])
self.assertDictEqual(model.runner, self.fake_history_workflow['runner'])
doc = copy.deepcopy(self.fake_history_workflow['liveaction'])
doc['start_timestamp'] = doc['start_timestamp']
doc['end_timestamp'] = doc['end_timestamp']
self.assertDictEqual(model.liveaction, doc)
self.assertIsNone(getattr(model, 'parent', None))
self.assertListEqual(model.children, self.fake_history_workflow['children'])
# Update the DB record.
children = [str(bson.ObjectId()), str(bson.ObjectId())]
model.children = children
ActionExecution.add_or_update(model)
model = ActionExecution.get_by_id(obj.id)
self.assertListEqual(model.children, children)
# Delete the DB record.
ActionExecution.delete(model)
self.assertRaises(StackStormDBObjectNotFoundError, ActionExecution.get_by_id, obj.id)
def test_model_partial(self):
# Create API object.
obj = ActionExecutionAPI(**copy.deepcopy(self.fake_history_subtasks[0]))
self.assertIsNone(getattr(obj, 'trigger', None))
self.assertIsNone(getattr(obj, 'trigger_type', None))
self.assertIsNone(getattr(obj, 'trigger_instance', None))
self.assertIsNone(getattr(obj, 'rule', None))
self.assertDictEqual(obj.action, self.fake_history_subtasks[0]['action'])
self.assertDictEqual(obj.runner, self.fake_history_subtasks[0]['runner'])
self.assertDictEqual(obj.liveaction, self.fake_history_subtasks[0]['liveaction'])
self.assertEqual(obj.parent, self.fake_history_subtasks[0]['parent'])
self.assertIsNone(getattr(obj, 'children', None))
# Convert API object to DB model.
model = ActionExecutionAPI.to_model(obj)
self.assertEqual(str(model.id), obj.id)
self.assertDictEqual(model.trigger, {})
self.assertDictEqual(model.trigger_type, {})
self.assertDictEqual(model.trigger_instance, {})
self.assertDictEqual(model.rule, {})
self.assertDictEqual(model.action, self.fake_history_subtasks[0]['action'])
self.assertDictEqual(model.runner, self.fake_history_subtasks[0]['runner'])
doc = copy.deepcopy(self.fake_history_subtasks[0]['liveaction'])
doc['start_timestamp'] = doc['start_timestamp']
doc['end_timestamp'] = doc['end_timestamp']
self.assertDictEqual(model.liveaction, doc)
self.assertEqual(model.parent, self.fake_history_subtasks[0]['parent'])
self.assertListEqual(model.children, [])
# Convert DB model to API object.
obj = ActionExecutionAPI.from_model(model)
self.assertEqual(str(model.id), obj.id)
self.assertIsNone(getattr(obj, 'trigger', None))
self.assertIsNone(getattr(obj, 'trigger_type', None))
self.assertIsNone(getattr(obj, 'trigger_instance', None))
self.assertIsNone(getattr(obj, 'rule', None))
self.assertDictEqual(obj.action, self.fake_history_subtasks[0]['action'])
self.assertDictEqual(obj.runner, self.fake_history_subtasks[0]['runner'])
self.assertDictEqual(obj.liveaction, self.fake_history_subtasks[0]['liveaction'])
self.assertEqual(obj.parent, self.fake_history_subtasks[0]['parent'])
self.assertIsNone(getattr(obj, 'children', None))
def test_crud_partial(self):
# Create the DB record.
obj = ActionExecutionAPI(**copy.deepcopy(self.fake_history_subtasks[0]))
ActionExecution.add_or_update(ActionExecutionAPI.to_model(obj))
model = ActionExecution.get_by_id(obj.id)
self.assertEqual(str(model.id), obj.id)
self.assertDictEqual(model.trigger, {})
self.assertDictEqual(model.trigger_type, {})
self.assertDictEqual(model.trigger_instance, {})
self.assertDictEqual(model.rule, {})
self.assertDictEqual(model.action, self.fake_history_subtasks[0]['action'])
self.assertDictEqual(model.runner, self.fake_history_subtasks[0]['runner'])
doc = copy.deepcopy(self.fake_history_subtasks[0]['liveaction'])
doc['start_timestamp'] = doc['start_timestamp']
doc['end_timestamp'] = doc['end_timestamp']
self.assertDictEqual(model.liveaction, doc)
self.assertEqual(model.parent, self.fake_history_subtasks[0]['parent'])
self.assertListEqual(model.children, [])
# Update the DB record.
children = [str(bson.ObjectId()), str(bson.ObjectId())]
model.children = children
ActionExecution.add_or_update(model)
model = ActionExecution.get_by_id(obj.id)
self.assertListEqual(model.children, children)
# Delete the DB record.
ActionExecution.delete(model)
self.assertRaises(StackStormDBObjectNotFoundError, ActionExecution.get_by_id, obj.id)
def test_datetime_range(self):
base = date_utils.add_utc_tz(datetime.datetime(2014, 12, 25, 0, 0, 0))
for i in range(60):
timestamp = base + datetime.timedelta(seconds=i)
doc = copy.deepcopy(self.fake_history_subtasks[0])
doc['id'] = str(bson.ObjectId())
doc['start_timestamp'] = isotime.format(timestamp)
obj = ActionExecutionAPI(**doc)
ActionExecution.add_or_update(ActionExecutionAPI.to_model(obj))
dt_range = '2014-12-25T00:00:10Z..2014-12-25T00:00:19Z'
objs = ActionExecution.query(start_timestamp=dt_range)
self.assertEqual(len(objs), 10)
dt_range = '2014-12-25T00:00:19Z..2014-12-25T00:00:10Z'
objs = ActionExecution.query(start_timestamp=dt_range)
self.assertEqual(len(objs), 10)
def test_sort_by_start_timestamp(self):
base = date_utils.add_utc_tz(datetime.datetime(2014, 12, 25, 0, 0, 0))
for i in range(60):
timestamp = base + datetime.timedelta(seconds=i)
doc = copy.deepcopy(self.fake_history_subtasks[0])
doc['id'] = str(bson.ObjectId())
doc['start_timestamp'] = isotime.format(timestamp)
obj = ActionExecutionAPI(**doc)
ActionExecution.add_or_update(ActionExecutionAPI.to_model(obj))
dt_range = '2014-12-25T00:00:10Z..2014-12-25T00:00:19Z'
objs = ActionExecution.query(start_timestamp=dt_range,
order_by=['start_timestamp'])
self.assertLess(objs[0]['start_timestamp'],
objs[9]['start_timestamp'])
dt_range = '2014-12-25T00:00:19Z..2014-12-25T00:00:10Z'
objs = ActionExecution.query(start_timestamp=dt_range,
order_by=['-start_timestamp'])
self.assertLess(objs[9]['start_timestamp'],
objs[0]['start_timestamp'])
| 53.444867 | 100 | 0.683552 |
6e1bdaed34ec244a1caf86e8731264a5c9f03a15 | 15,516 | py | Python | tests/test_timeseries.py | sixy6e/PyRate | 8203989e7d9b675491e79610cff04728d8dc3973 | [
"Apache-2.0"
] | 145 | 2017-01-20T08:47:37.000Z | 2022-03-28T07:37:28.000Z | tests/test_timeseries.py | sixy6e/PyRate | 8203989e7d9b675491e79610cff04728d8dc3973 | [
"Apache-2.0"
] | 215 | 2017-01-24T22:42:05.000Z | 2022-03-16T00:03:34.000Z | tests/test_timeseries.py | sixy6e/PyRate | 8203989e7d9b675491e79610cff04728d8dc3973 | [
"Apache-2.0"
] | 63 | 2017-02-13T12:52:16.000Z | 2022-03-30T06:47:38.000Z | # This Python module is part of the PyRate software package.
#
# Copyright 2021 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
This Python module contains tests for the timeseries.py PyRate module.
"""
import os
import shutil
from copy import deepcopy
import pytest
from datetime import date, timedelta
from numpy import nan, asarray, where, array
import numpy as np
from numpy.testing import assert_array_almost_equal
import pyrate.constants as C
import pyrate.core.orbital
import pyrate.core.prepifg_helper
import pyrate.core.ref_phs_est
import pyrate.core.refpixel
import tests.common as common
from pyrate.core import mst, covariance
from pyrate import correct, prepifg, conv2tif
from pyrate.configuration import Configuration
from pyrate.core.timeseries import time_series, linear_rate_pixel, linear_rate_array, TimeSeriesError
def default_params():
return {C.TIME_SERIES_METHOD: 1,
C.TIME_SERIES_PTHRESH: 0,
C.TIME_SERIES_SM_ORDER: 2,
C.TIME_SERIES_SM_FACTOR: -0.25,
C.PARALLEL: 0,
C.PROCESSES: 1,
C.NAN_CONVERSION: 1,
C.NO_DATA_VALUE: 0}
class SinglePixelIfg(object):
"""
A single pixel ifg (interferogram) solely for unit testing
"""
def __init__(self, first, second, phase, nan_fraction):
self.phase_data = asarray([[phase]])
self.first = first
self.second = second
self.nrows = 1
self.ncols = 1
self.nan_fraction = asarray([nan_fraction])
def convert_to_nans(self, val=0):
"""
Converts given values in phase data to NaNs
val - value to convert, default is 0
"""
self.phase_data = where(self.phase_data == val, nan, self.phase_data)
self.nan_converted = True
class TestTimeSeries:
"""Verifies error checking capabilities of the time_series function"""
@classmethod
def setup_class(cls):
cls.ifgs = common.small_data_setup()
cls.params = default_params()
cls.mstmat = mst.mst_boolean_array(cls.ifgs)
r_dist = covariance.RDist(cls.ifgs[0])()
cls.maxvar = [covariance.cvd(i.data_path, cls.params, r_dist)[0]
for i in cls.ifgs]
cls.vcmt = covariance.get_vcmt(cls.ifgs, cls.maxvar)
def test_time_series_unit(self):
"""
Checks that the code works the same as the calculated example
"""
ifirst = asarray([1, 1, 2, 2, 3, 3, 4, 5])
isecond = asarray([2, 4, 3, 4, 5, 6, 6, 6])
timeseries = asarray([0.0, 0.1, 0.6, 0.8, 1.1, 1.3])
phase = asarray([0.5, 4, 2.5, 3.5, 2.5, 3.5, 2.5, 1])
nan_fraction = asarray([0.5, 0.4, 0.2, 0.3, 0.1, 0.3, 0.2, 0.1])
now = date.today()
dates = [now + timedelta(days=(t*365.25)) for t in timeseries]
dates.sort()
first = [dates[m_num - 1] for m_num in ifirst]
second = [dates[s_num - 1] for s_num in isecond]
self.ifgs = [SinglePixelIfg(m, s, p, n) for m, s, p, n in
zip(first, second, phase, nan_fraction)]
tsincr, tscum, tsvel = time_series(
self.ifgs, params=self.params, vcmt=self.vcmt, mst=None)
expected = asarray([[[0.50, 3.0, 4.0, 5.5, 6.5]]])
assert_array_almost_equal(tscum, expected, decimal=2)
class TestLegacyTimeSeriesEquality:
@classmethod
def setup_class(cls):
params = Configuration(common.TEST_CONF_ROIPAC).__dict__
params[C.TEMP_MLOOKED_DIR] = os.path.join(params[C.OUT_DIR],
C.TEMP_MLOOKED_DIR)
conv2tif.main(params)
prepifg.main(params)
params[C.REF_EST_METHOD] = 2
xlks, _, crop = pyrate.core.prepifg_helper.transform_params(params)
dest_paths, headers = common.repair_params_for_correct_tests(params[C.INTERFEROGRAM_DIR], params)
correct._copy_mlooked(params)
copied_dest_paths = [os.path.join(params[C.TEMP_MLOOKED_DIR], os.path.basename(d)) for d in dest_paths]
del dest_paths
# start run_pyrate copy
ifgs = common.pre_prepare_ifgs(copied_dest_paths, params)
mst_grid = common.mst_calculation(copied_dest_paths, params)
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(params)
params[C.REFX] = refx
params[C.REFY] = refy
params[C.ORBFIT_OFFSET] = True
# Estimate and remove orbit errors
pyrate.core.orbital.remove_orbital_error(ifgs, params)
ifgs = common.prepare_ifgs_without_phase(copied_dest_paths, params)
for ifg in ifgs:
ifg.close()
correct._update_params_with_tiles(params)
_, ifgs = pyrate.core.ref_phs_est.ref_phase_est_wrapper(params)
ifgs[0].open()
r_dist = covariance.RDist(ifgs[0])()
ifgs[0].close()
maxvar = [covariance.cvd(i, params, r_dist)[0] for i in copied_dest_paths]
for ifg in ifgs:
ifg.open()
vcmt = covariance.get_vcmt(ifgs, maxvar)
for ifg in ifgs:
ifg.close()
ifg.open()
ifg.nodata_value = 0.0
params[C.TIME_SERIES_METHOD] = 1
params[C.PARALLEL] = 0
# Calculate time series
cls.tsincr_0, cls.tscum_0, _ = common.calculate_time_series(ifgs, params, vcmt, mst=mst_grid)
params[C.PARALLEL] = 1
cls.tsincr_1, cls.tscum_1, cls.tsvel_1 = common.calculate_time_series(ifgs, params, vcmt, mst=mst_grid)
# load the legacy data
ts_dir = os.path.join(common.SML_TEST_DIR, 'time_series')
tsincr_path = os.path.join(ts_dir, 'ts_incr_interp0_method1.csv')
ts_incr = np.genfromtxt(tsincr_path)
tscum_path = os.path.join(ts_dir, 'ts_cum_interp0_method1.csv')
ts_cum = np.genfromtxt(tscum_path)
cls.ts_incr = np.reshape(ts_incr, newshape=cls.tsincr_0.shape, order='F')
cls.ts_cum = np.reshape(ts_cum, newshape=cls.tscum_0.shape, order='F')
cls.params = params
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.params[C.OUT_DIR])
def test_time_series_equality_parallel_by_rows(self):
"""
check time series parallel by rows jobs
"""
self.assertEqual(self.tsincr_1.shape, self.tscum_1.shape)
self.assertEqual(self.tsvel_1.shape, self.tsincr_1.shape)
np.testing.assert_array_almost_equal(
self.ts_incr, self.tsincr_1, decimal=3)
np.testing.assert_array_almost_equal(
self.ts_cum, self.tscum_1, decimal=3)
def test_time_series_equality_serial_by_the_pixel(self):
"""
check time series
"""
self.assertEqual(self.tsincr_0.shape, self.tscum_0.shape)
np.testing.assert_array_almost_equal(
self.ts_incr, self.tsincr_0, decimal=3)
np.testing.assert_array_almost_equal(
self.ts_cum, self.tscum_0, decimal=3)
@staticmethod
def assertEqual(val1, val2):
assert val1 == val2
class TestLegacyTimeSeriesEqualityMethod2Interp0:
@classmethod
def setup_class(cls):
params = Configuration(common.TEST_CONF_ROIPAC).__dict__
params[C.TEMP_MLOOKED_DIR] = os.path.join(params[C.OUT_DIR],
C.TEMP_MLOOKED_DIR)
conv2tif.main(params)
prepifg.main(params)
params[C.REF_EST_METHOD] = 2
xlks, _, crop = pyrate.core.prepifg_helper.transform_params(params)
dest_paths, headers = common.repair_params_for_correct_tests(params[C.INTERFEROGRAM_DIR], params)
correct._copy_mlooked(params)
copied_dest_paths = [os.path.join(params[C.TEMP_MLOOKED_DIR], os.path.basename(d)) for d in dest_paths]
del dest_paths
# start run_pyrate copy
ifgs = common.pre_prepare_ifgs(copied_dest_paths, params)
mst_grid = common.mst_calculation(copied_dest_paths, params)
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(params)
params[C.REFX] = refx
params[C.REFY] = refy
params[C.ORBFIT_OFFSET] = True
# Estimate and remove orbit errors
pyrate.core.orbital.remove_orbital_error(ifgs, params)
ifgs = common.prepare_ifgs_without_phase(copied_dest_paths, params)
for ifg in ifgs:
ifg.close()
correct._update_params_with_tiles(params)
_, ifgs = pyrate.core.ref_phs_est.ref_phase_est_wrapper(params)
ifgs[0].open()
r_dist = covariance.RDist(ifgs[0])()
ifgs[0].close()
# Calculate interferogram noise
maxvar = [covariance.cvd(i, params, r_dist)[0] for i in copied_dest_paths]
for ifg in ifgs:
ifg.open()
vcmt = covariance.get_vcmt(ifgs, maxvar)
for ifg in ifgs:
ifg.close()
ifg.open()
ifg.nodata_value = 0.0
params[C.TIME_SERIES_METHOD] = 2
params[C.PARALLEL] = 1
# Calculate time series
cls.tsincr, cls.tscum, _ = common.calculate_time_series(ifgs, params, vcmt, mst=mst_grid)
params[C.PARALLEL] = 0
# Calculate time series serailly by the pixel
cls.tsincr_0, cls.tscum_0, _ = common.calculate_time_series(ifgs, params, vcmt, mst=mst_grid)
# copy legacy data
SML_TIME_SERIES_DIR = os.path.join(common.SML_TEST_DIR, 'time_series')
tsincr_path = os.path.join(SML_TIME_SERIES_DIR, 'ts_incr_interp0_method2.csv')
ts_incr = np.genfromtxt(tsincr_path)
tscum_path = os.path.join(SML_TIME_SERIES_DIR, 'ts_cum_interp0_method2.csv')
ts_cum = np.genfromtxt(tscum_path)
cls.ts_incr = np.reshape(ts_incr, newshape=cls.tsincr_0.shape, order='F')
cls.ts_cum = np.reshape(ts_cum, newshape=cls.tscum_0.shape, order='F')
cls.params = params
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.params[C.OUT_DIR])
def test_time_series_equality_parallel_by_rows(self):
assert self.tsincr.shape == self.tscum.shape
np.testing.assert_array_almost_equal(self.ts_incr, self.tsincr, decimal=1)
np.testing.assert_array_almost_equal(self.ts_cum, self.tscum, decimal=1)
def test_time_series_equality_serial_by_the_pixel(self):
assert self.tsincr_0.shape == self.tscum_0.shape
np.testing.assert_array_almost_equal(self.ts_incr, self.tsincr_0, decimal=3)
np.testing.assert_array_almost_equal(self.ts_cum, self.tscum_0, decimal=3)
class TestLinearRatePixel:
"""
Tests the linear regression algorithm for determining the best
fitting velocity from a cumulative time series
"""
def test_linear_rate_pixel_clean(self):
y = array([0, 2, 4, 6, 8, 10])
t = array([0, 1, 2, 3, 4, 5])
exp = (2.0, 0.0, 1.0, 0.0, 6)
res = linear_rate_pixel(y, t)
assert res == exp
def test_linear_rate_pixel_neg_rate(self):
y = array([0, -2, -4, -6, -8, -10])
t = array([0, 1, 2, 3, 4, 5])
exp = (-2.0, 0.0, 1.0, 0.0, 6)
res = linear_rate_pixel(y, t)
assert res == exp
def test_linear_rate_pixel_outlier(self):
y = array([0, 2, 4, 6, 8, 20])
t = array([0, 1, 2, 3, 4, 5])
exp = (3.428571, -1.904761, 0.812030, 0.824786, 6)
res = linear_rate_pixel(y, t)
assert res == pytest.approx(exp, rel=1e-6)
def test_linear_rate_pixel_noise(self):
y = array([0, 2, 4, 6, 8, 10])
r = y + np.random.rand(6) # add different uniform noise each time
t = array([0, 1, 2, 3, 4, 5])
exprate = 2.0
explsqd = 1.0
experr = 0.0
rate, _, lsqd, err, _ = linear_rate_pixel(y, t)
assert exprate == pytest.approx(rate, rel=1e-1)
assert explsqd == pytest.approx(lsqd, rel=1e-1)
assert experr == pytest.approx(err, rel=1e-1)
def test_linear_rate_pixel_exception(self):
# input vectors should be equal length
y = array([2, 4, 6, 8, 10])
t = array([0, 1, 2, 3, 4, 5])
with pytest.raises(TimeSeriesError):
res = linear_rate_pixel(y, t)
def test_linear_rate_pixel_nans(self):
# at least two obs are required for line fitting
y = array([0, nan, nan, nan, nan, nan])
t = array([0, 1, 2, 3, 4, 5])
exp = (nan, nan, nan, nan, nan)
res = linear_rate_pixel(y, t)
assert res == exp
class TestLinearRateArray:
"""
Tests the array loop wrapper for the linear regression algorithm using real data
"""
@classmethod
@pytest.fixture(autouse=True)
def setup_class(cls, roipac_params):
cls.params = roipac_params
cls.ifgs = common.small_data_setup()
# read in input (tscuml) and expected output arrays
tscuml_path = os.path.join(common.SML_TEST_LINRATE, "tscuml_0.npy")
cls.tscuml0 = np.load(tscuml_path)
# add zero epoch to tscuml 3D array
cls.tscuml = np.insert(cls.tscuml0, 0, 0, axis=2)
linrate_path = os.path.join(common.SML_TEST_LINRATE, "linear_rate.npy")
cls.linrate = np.load(linrate_path)
error_path = os.path.join(common.SML_TEST_LINRATE, "linear_error.npy")
cls.error = np.load(error_path)
icpt_path = os.path.join(common.SML_TEST_LINRATE, "linear_intercept.npy")
cls.icpt = np.load(icpt_path)
samp_path = os.path.join(common.SML_TEST_LINRATE, "linear_samples.npy")
cls.samp = np.load(samp_path)
rsq_path = os.path.join(common.SML_TEST_LINRATE, "linear_rsquared.npy")
cls.rsq = np.load(rsq_path)
def test_linear_rate_array(self):
"""
Input and expected output are on disk. This test only tests the linear_rate_array
and linear_rate_pixel functions using real data.
"""
l, i, r, e, s = linear_rate_array(self.tscuml, self.ifgs, self.params)
# test to 20 decimal places
assert_array_almost_equal(self.linrate, l, 1e-20)
assert_array_almost_equal(self.icpt, i, 1e-20)
assert_array_almost_equal(self.rsq, r, 1e-20)
assert_array_almost_equal(self.error, e, 1e-20)
assert_array_almost_equal(self.samp, s, 1e-20)
def test_linear_rate_array_two_sigma(self):
"""
Check that the "nsigma" switch in the config dictionary
actually results in a change in the error map.
"""
# make a deep copy of the params dict to avoid changing
# state for other tests if this one fails
params = deepcopy(self.params)
params[C.VELERROR_NSIG] = 2
_, _, _, e, _ = linear_rate_array(self.tscuml, self.ifgs, params)
assert_array_almost_equal(self.error*2, e, 1e-20)
def test_linear_rate_array_exception(self):
# depth of tscuml should equal nepochs
with pytest.raises(TimeSeriesError):
res = linear_rate_array(self.tscuml0, self.ifgs, self.params)
| 36.855107 | 111 | 0.643529 |
847da45e40ed3cdc6567f1f52b95e8bfad511a77 | 4,494 | py | Python | sdk/python/pulumi_azure_native/documentdb/v20200401/list_database_account_keys.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/documentdb/v20200401/list_database_account_keys.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/documentdb/v20200401/list_database_account_keys.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListDatabaseAccountKeysResult',
'AwaitableListDatabaseAccountKeysResult',
'list_database_account_keys',
]
@pulumi.output_type
class ListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
"""
def __init__(__self__, primary_master_key=None, primary_readonly_master_key=None, secondary_master_key=None, secondary_readonly_master_key=None):
if primary_master_key and not isinstance(primary_master_key, str):
raise TypeError("Expected argument 'primary_master_key' to be a str")
pulumi.set(__self__, "primary_master_key", primary_master_key)
if primary_readonly_master_key and not isinstance(primary_readonly_master_key, str):
raise TypeError("Expected argument 'primary_readonly_master_key' to be a str")
pulumi.set(__self__, "primary_readonly_master_key", primary_readonly_master_key)
if secondary_master_key and not isinstance(secondary_master_key, str):
raise TypeError("Expected argument 'secondary_master_key' to be a str")
pulumi.set(__self__, "secondary_master_key", secondary_master_key)
if secondary_readonly_master_key and not isinstance(secondary_readonly_master_key, str):
raise TypeError("Expected argument 'secondary_readonly_master_key' to be a str")
pulumi.set(__self__, "secondary_readonly_master_key", secondary_readonly_master_key)
@property
@pulumi.getter(name="primaryMasterKey")
def primary_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-write key.
"""
return pulumi.get(self, "primary_master_key")
@property
@pulumi.getter(name="primaryReadonlyMasterKey")
def primary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-only key.
"""
return pulumi.get(self, "primary_readonly_master_key")
@property
@pulumi.getter(name="secondaryMasterKey")
def secondary_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-write key.
"""
return pulumi.get(self, "secondary_master_key")
@property
@pulumi.getter(name="secondaryReadonlyMasterKey")
def secondary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-only key.
"""
return pulumi.get(self, "secondary_readonly_master_key")
class AwaitableListDatabaseAccountKeysResult(ListDatabaseAccountKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDatabaseAccountKeysResult(
primary_master_key=self.primary_master_key,
primary_readonly_master_key=self.primary_readonly_master_key,
secondary_master_key=self.secondary_master_key,
secondary_readonly_master_key=self.secondary_readonly_master_key)
def list_database_account_keys(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20200401:listDatabaseAccountKeys', __args__, opts=opts, typ=ListDatabaseAccountKeysResult).value
return AwaitableListDatabaseAccountKeysResult(
primary_master_key=__ret__.primary_master_key,
primary_readonly_master_key=__ret__.primary_readonly_master_key,
secondary_master_key=__ret__.secondary_master_key,
secondary_readonly_master_key=__ret__.secondary_readonly_master_key)
| 42.8 | 158 | 0.722074 |
bf49d986c6bb725688f2aa36bf2eac92ad0a471c | 544 | py | Python | manage.py | zhangyu836/django-excel-export-demo | 0fbb14991a16f89f1b029db75c7e6a21633f4b6a | [
"BSD-2-Clause"
] | null | null | null | manage.py | zhangyu836/django-excel-export-demo | 0fbb14991a16f89f1b029db75c7e6a21633f4b6a | [
"BSD-2-Clause"
] | null | null | null | manage.py | zhangyu836/django-excel-export-demo | 0fbb14991a16f89f1b029db75c7e6a21633f4b6a | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demo.hksettings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
execute_from_command_line(sys.argv)
| 34 | 74 | 0.665441 |
259e6bc387f7c4b8a80b0d13afa73fc941128f1a | 1,149 | py | Python | tests/tracker_generic_code.py | J-Pai/408DaisyJetson | a873154325c790303f09ecfc03377066751cd601 | [
"MIT"
] | 1 | 2018-07-04T03:03:34.000Z | 2018-07-04T03:03:34.000Z | tests/tracker_generic_code.py | J-Pai/408DaisyJetson | a873154325c790303f09ecfc03377066751cd601 | [
"MIT"
] | null | null | null | tests/tracker_generic_code.py | J-Pai/408DaisyJetson | a873154325c790303f09ecfc03377066751cd601 | [
"MIT"
] | 1 | 2020-07-24T19:48:57.000Z | 2020-07-24T19:48:57.000Z | import cv2
CAM_NUM = 1
def track_object():
video = cv2.VideoCapture(CAM_NUM) # Setup the input video
video.set(3, 640)
video.set(4, 480)
ok, frame = video.read()
tracker = cv2.TrackerKCF_create() # Create the tracker object
bbox = cv2.selectROI(frame, False) # Select the desired object to track
ok = tracker.init(frame, bbox) # Initialize tracker with bbox and starting frame
while True:
timer = cv2.getTickCount()
_, frame = video.read()
ok, bbox = tracker.update(frame) # Update tracker with new frame to obtain new bbox
if ok:
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 1)
cv2.imshow("Tracking", frame)
k = cv2.waitKey(1) & 0xff
if k == ord('q'):
break
track_object()
| 31.054054 | 98 | 0.556136 |
877b3688176f87566f266ed3e8b0c106381ec2e5 | 2,441 | py | Python | InvenTree/build/urls.py | onurtatli/InvenTree | 4ce479cf97cc62563ab08ed6123d3c6ca28ba6a3 | [
"MIT"
] | null | null | null | InvenTree/build/urls.py | onurtatli/InvenTree | 4ce479cf97cc62563ab08ed6123d3c6ca28ba6a3 | [
"MIT"
] | null | null | null | InvenTree/build/urls.py | onurtatli/InvenTree | 4ce479cf97cc62563ab08ed6123d3c6ca28ba6a3 | [
"MIT"
] | null | null | null | """
URL lookup for Build app
"""
from django.conf.urls import url, include
from . import views
build_detail_urls = [
url(r'^edit/', views.BuildUpdate.as_view(), name='build-edit'),
url(r'^allocate/', views.BuildAllocate.as_view(), name='build-allocate'),
url(r'^cancel/', views.BuildCancel.as_view(), name='build-cancel'),
url(r'^delete/', views.BuildDelete.as_view(), name='build-delete'),
url(r'^create-output/', views.BuildOutputCreate.as_view(), name='build-output-create'),
url(r'^delete-output/', views.BuildOutputDelete.as_view(), name='build-output-delete'),
url(r'^complete-output/?', views.BuildOutputComplete.as_view(), name='build-output-complete'),
url(r'^auto-allocate/?', views.BuildAutoAllocate.as_view(), name='build-auto-allocate'),
url(r'^unallocate/', views.BuildUnallocate.as_view(), name='build-unallocate'),
url(r'^complete/', views.BuildComplete.as_view(), name='build-complete'),
url(r'^notes/', views.BuildNotes.as_view(), name='build-notes'),
url(r'^children/', views.BuildDetail.as_view(template_name='build/build_children.html'), name='build-children'),
url(r'^parts/', views.BuildDetail.as_view(template_name='build/parts.html'), name='build-parts'),
url(r'^attachments/', views.BuildDetail.as_view(template_name='build/attachments.html'), name='build-attachments'),
url(r'^output/', views.BuildDetail.as_view(template_name='build/build_output.html'), name='build-output'),
url(r'^.*$', views.BuildDetail.as_view(), name='build-detail'),
]
build_urls = [
url(r'item/', include([
url(r'^(?P<pk>\d+)/', include([
url('^edit/?', views.BuildItemEdit.as_view(), name='build-item-edit'),
url('^delete/?', views.BuildItemDelete.as_view(), name='build-item-delete'),
])),
url('^new/', views.BuildItemCreate.as_view(), name='build-item-create'),
])),
url('^attachment/', include([
url('^new/', views.BuildAttachmentCreate.as_view(), name='build-attachment-create'),
url(r'^(?P<pk>\d+)/edit/', views.BuildAttachmentEdit.as_view(), name='build-attachment-edit'),
url(r'^(?P<pk>\d+)/delete/', views.BuildAttachmentDelete.as_view(), name='build-attachment-delete'),
])),
url(r'new/', views.BuildCreate.as_view(), name='build-create'),
url(r'^(?P<pk>\d+)/', include(build_detail_urls)),
url(r'.*$', views.BuildIndex.as_view(), name='build-index'),
]
| 46.942308 | 119 | 0.663662 |
67fa6229c4a934057c5e25c1b03d000d5676a2f3 | 12,761 | py | Python | django/contrib/staticfiles/management/commands/collectstatic.py | AlexHill/django | fe1389e911b0cdc487e5547c09c920c12f4e1ce0 | [
"BSD-3-Clause"
] | 1 | 2017-02-21T09:06:04.000Z | 2017-02-21T09:06:04.000Z | django/contrib/staticfiles/management/commands/collectstatic.py | amit2014/django | 072e25eee70c0e629fcbb37f0485a6c6694b6856 | [
"BSD-3-Clause"
] | null | null | null | django/contrib/staticfiles/management/commands/collectstatic.py | amit2014/django | 072e25eee70c0e629fcbb37f0485a6c6694b6856 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import os
import sys
from collections import OrderedDict
from optparse import make_option
from django.core.files.storage import FileSystemStorage
from django.core.management.base import CommandError, NoArgsCommand
from django.utils.encoding import smart_text
from django.utils.six.moves import input
from django.contrib.staticfiles import finders, storage
class Command(NoArgsCommand):
"""
Command that allows to copy or symlink static files from different
locations to the settings.STATIC_ROOT.
"""
option_list = NoArgsCommand.option_list + (
make_option('--noinput',
action='store_false', dest='interactive', default=True,
help="Do NOT prompt the user for input of any kind."),
make_option('--no-post-process',
action='store_false', dest='post_process', default=True,
help="Do NOT post process collected files."),
make_option('-i', '--ignore', action='append', default=[],
dest='ignore_patterns', metavar='PATTERN',
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more."),
make_option('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except modify the filesystem."),
make_option('-c', '--clear',
action='store_true', dest='clear', default=False,
help="Clear the existing files using the storage "
"before trying to copy or link the original file."),
make_option('-l', '--link',
action='store_true', dest='link', default=False,
help="Create a symbolic link to each file instead of copying."),
make_option('--no-default-ignore', action='store_false',
dest='use_default_ignore_patterns', default=True,
help="Don't ignore the common private glob-style patterns 'CVS', "
"'.*' and '*~'."),
)
help = "Collect static files in a single location."
requires_model_validation = False
def __init__(self, *args, **kwargs):
super(NoArgsCommand, self).__init__(*args, **kwargs)
self.copied_files = []
self.symlinked_files = []
self.unmodified_files = []
self.post_processed_files = []
self.storage = storage.staticfiles_storage
try:
self.storage.path('')
except NotImplementedError:
self.local = False
else:
self.local = True
def set_options(self, **options):
"""
Set instance variables based on an options dict
"""
self.interactive = options['interactive']
self.verbosity = int(options.get('verbosity', 1))
self.symlink = options['link']
self.clear = options['clear']
self.dry_run = options['dry_run']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~']
self.ignore_patterns = list(set(ignore_patterns))
self.post_process = options['post_process']
def collect(self):
"""
Perform the bulk of the work of collectstatic.
Split off from handle_noargs() to facilitate testing.
"""
if self.symlink:
if sys.platform == 'win32':
raise CommandError("Symlinking is not supported by this "
"platform (%s)." % sys.platform)
if not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
if self.symlink:
handler = self.link_file
else:
handler = self.copy_file
found_files = OrderedDict()
for finder in finders.get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
# Here we check if the storage backend has a post_process
# method and pass it the list of modified files.
if self.post_process and hasattr(self.storage, 'post_process'):
processor = self.storage.post_process(found_files,
dry_run=self.dry_run)
for original_path, processed_path, processed in processor:
if isinstance(processed, Exception):
self.stderr.write("Post-processing '%s' failed!" % original_path)
# Add a blank line before the traceback, otherwise it's
# too easy to miss the relevant part of the error message.
self.stderr.write("")
raise processed
if processed:
self.log("Post-processed '%s' as '%s'" %
(original_path, processed_path), level=1)
self.post_processed_files.append(original_path)
else:
self.log("Skipped post-processing '%s'" % original_path)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
}
def handle_noargs(self, **options):
self.set_options(**options)
# Warn before doing anything more.
if (isinstance(self.storage, FileSystemStorage) and
self.storage.location):
destination_path = self.storage.location
destination_display = ':\n\n %s' % destination_path
else:
destination_path = None
destination_display = '.'
if self.clear:
clear_display = 'This will DELETE EXISTING FILES!'
else:
clear_display = 'This will overwrite existing files!'
if self.interactive:
confirm = input("""
You have requested to collect static files at the destination
location as specified in your settings%s
%s
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """
% (destination_display, clear_display))
if confirm != 'yes':
raise CommandError("Collecting static files cancelled.")
collected = self.collect()
modified_count = len(collected['modified'])
unmodified_count = len(collected['unmodified'])
post_processed_count = len(collected['post_processed'])
if self.verbosity >= 1:
template = ("\n%(modified_count)s %(identifier)s %(action)s"
"%(destination)s%(unmodified)s%(post_processed)s.\n")
summary = template % {
'modified_count': modified_count,
'identifier': 'static file' + ('' if modified_count == 1 else 's'),
'action': 'symlinked' if self.symlink else 'copied',
'destination': (" to '%s'" % destination_path if destination_path else ''),
'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''),
'post_processed': (collected['post_processed'] and
', %s post-processed'
% post_processed_count or ''),
}
self.stdout.write(summary)
def log(self, msg, level=2):
"""
Small log helper
"""
if self.verbosity >= level:
self.stdout.write(msg)
def clear_dir(self, path):
"""
Deletes the given relative path using the destination storage backend.
"""
dirs, files = self.storage.listdir(path)
for f in files:
fpath = os.path.join(path, f)
if self.dry_run:
self.log("Pretending to delete '%s'" %
smart_text(fpath), level=1)
else:
self.log("Deleting '%s'" % smart_text(fpath), level=1)
self.storage.delete(fpath)
for d in dirs:
self.clear_dir(os.path.join(path, d))
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = \
self.storage.modified_time(prefixed_path)
except (OSError, NotImplementedError, AttributeError):
# The storage doesn't support ``modified_time`` or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
else:
full_path = None
# Skip the file if the source file is younger
# Avoid sub-second precision (see #14665, #19540)
if (target_last_modified.replace(microsecond=0)
>= source_last_modified.replace(microsecond=0)):
if not ((self.symlink and full_path
and not os.path.islink(full_path)) or
(not self.symlink and full_path
and os.path.islink(full_path))):
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True
def link_file(self, path, prefixed_path, source_storage):
"""
Attempt to link ``path``
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally link the file
if self.dry_run:
self.log("Pretending to link '%s'" % source_path, level=1)
else:
self.log("Linking '%s'" % source_path, level=1)
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
os.symlink(source_path, full_path)
if prefixed_path not in self.symlinked_files:
self.symlinked_files.append(prefixed_path)
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
if not prefixed_path in self.copied_files:
self.copied_files.append(prefixed_path)
| 42.395349 | 104 | 0.570175 |
ef5cbbe3dd53bccd8ae710d2cf50505094791235 | 27,347 | py | Python | lib/pyasn1_modules/rfc4210.py | itielshwartz/BackendApi | bc21013f8d96bbf0fba7a99f1deb5486ad32b168 | [
"Apache-2.0"
] | null | null | null | lib/pyasn1_modules/rfc4210.py | itielshwartz/BackendApi | bc21013f8d96bbf0fba7a99f1deb5486ad32b168 | [
"Apache-2.0"
] | null | null | null | lib/pyasn1_modules/rfc4210.py | itielshwartz/BackendApi | bc21013f8d96bbf0fba7a99f1deb5486ad32b168 | [
"Apache-2.0"
] | null | null | null | #
# Certificate Management Protocol structures as per RFC4210
#
# Based on Alex Railean's work
#
from pyasn1.type import tag, namedtype, namedval, univ, constraint, char, useful
from pyasn1_modules import rfc2459, rfc2511, rfc2314
MAX = 64
class KeyIdentifier(univ.OctetString): pass
class CMPCertificate(rfc2459.Certificate): pass
class OOBCert(CMPCertificate): pass
class CertAnnContent(CMPCertificate): pass
class PKIFreeText(univ.SequenceOf):
"""
PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String
"""
componentType = char.UTF8String()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
class PollRepContent(univ.SequenceOf):
"""
PollRepContent ::= SEQUENCE OF SEQUENCE {
certReqId INTEGER,
checkAfter INTEGER, -- time in seconds
reason PKIFreeText OPTIONAL
}
"""
class CertReq(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('checkAfter', univ.Integer()),
namedtype.OptionalNamedType('reason', PKIFreeText())
)
componentType = CertReq()
class PollReqContent(univ.SequenceOf):
"""
PollReqContent ::= SEQUENCE OF SEQUENCE {
certReqId INTEGER
}
"""
class CertReq(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer())
)
componentType = CertReq()
class InfoTypeAndValue(univ.Sequence):
"""
InfoTypeAndValue ::= SEQUENCE {
infoType OBJECT IDENTIFIER,
infoValue ANY DEFINED BY infoType OPTIONAL
}"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('infoType', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('infoValue', univ.Any())
)
class GenRepContent(univ.SequenceOf):
componentType = InfoTypeAndValue()
class GenMsgContent(univ.SequenceOf):
componentType = InfoTypeAndValue()
class PKIConfirmContent(univ.Null): pass
class CRLAnnContent(univ.SequenceOf):
componentType = rfc2459.CertificateList()
class CAKeyUpdAnnContent(univ.Sequence):
"""
CAKeyUpdAnnContent ::= SEQUENCE {
oldWithNew CMPCertificate,
newWithOld CMPCertificate,
newWithNew CMPCertificate
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('oldWithNew', CMPCertificate()),
namedtype.NamedType('newWithOld', CMPCertificate()),
namedtype.NamedType('newWithNew', CMPCertificate())
)
class RevDetails(univ.Sequence):
"""
RevDetails ::= SEQUENCE {
certDetails CertTemplate,
crlEntryDetails Extensions OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certDetails', rfc2511.CertTemplate()),
namedtype.OptionalNamedType('crlEntryDetails', rfc2459.Extensions())
)
class RevReqContent(univ.SequenceOf):
componentType = RevDetails()
class CertOrEncCert(univ.Choice):
"""
CertOrEncCert ::= CHOICE {
certificate [0] CMPCertificate,
encryptedCert [1] EncryptedValue
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', CMPCertificate().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.NamedType('encryptedCert', rfc2511.EncryptedValue().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class CertifiedKeyPair(univ.Sequence):
"""
CertifiedKeyPair ::= SEQUENCE {
certOrEncCert CertOrEncCert,
privateKey [0] EncryptedValue OPTIONAL,
publicationInfo [1] PKIPublicationInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certOrEncCert', CertOrEncCert()),
namedtype.OptionalNamedType('privateKey', rfc2511.EncryptedValue().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType('publicationInfo', rfc2511.PKIPublicationInfo().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class POPODecKeyRespContent(univ.SequenceOf):
componentType = univ.Integer()
class Challenge(univ.Sequence):
"""
Challenge ::= SEQUENCE {
owf AlgorithmIdentifier OPTIONAL,
witness OCTET STRING,
challenge OCTET STRING
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('witness', univ.OctetString()),
namedtype.NamedType('challenge', univ.OctetString())
)
class PKIStatus(univ.Integer):
"""
PKIStatus ::= INTEGER {
accepted (0),
grantedWithMods (1),
rejection (2),
waiting (3),
revocationWarning (4),
revocationNotification (5),
keyUpdateWarning (6)
}
"""
namedValues = namedval.NamedValues(
('accepted', 0),
('grantedWithMods', 1),
('rejection', 2),
('waiting', 3),
('revocationWarning', 4),
('revocationNotification', 5),
('keyUpdateWarning', 6)
)
class PKIFailureInfo(univ.BitString):
"""
PKIFailureInfo ::= BIT STRING {
badAlg (0),
badMessageCheck (1),
badRequest (2),
badTime (3),
badCertId (4),
badDataFormat (5),
wrongAuthority (6),
incorrectData (7),
missingTimeStamp (8),
badPOP (9),
certRevoked (10),
certConfirmed (11),
wrongIntegrity (12),
badRecipientNonce (13),
timeNotAvailable (14),
unacceptedPolicy (15),
unacceptedExtension (16),
addInfoNotAvailable (17),
badSenderNonce (18),
badCertTemplate (19),
signerNotTrusted (20),
transactionIdInUse (21),
unsupportedVersion (22),
notAuthorized (23),
systemUnavail (24),
systemFailure (25),
duplicateCertReq (26)
"""
namedValues = namedval.NamedValues(
('badAlg', 0),
('badMessageCheck', 1),
('badRequest', 2),
('badTime', 3),
('badCertId', 4),
('badDataFormat', 5),
('wrongAuthority', 6),
('incorrectData', 7),
('missingTimeStamp', 8),
('badPOP', 9),
('certRevoked', 10),
('certConfirmed', 11),
('wrongIntegrity', 12),
('badRecipientNonce', 13),
('timeNotAvailable', 14),
('unacceptedPolicy', 15),
('unacceptedExtension', 16),
('addInfoNotAvailable', 17),
('badSenderNonce', 18),
('badCertTemplate', 19),
('signerNotTrusted', 20),
('transactionIdInUse', 21),
('unsupportedVersion', 22),
('notAuthorized', 23),
('systemUnavail', 24),
('systemFailure', 25),
('duplicateCertReq', 26)
)
class PKIStatusInfo(univ.Sequence):
"""
PKIStatusInfo ::= SEQUENCE {
status PKIStatus,
statusString PKIFreeText OPTIONAL,
failInfo PKIFailureInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatus()),
namedtype.OptionalNamedType('statusString', PKIFreeText()),
namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
)
class ErrorMsgContent(univ.Sequence):
"""
ErrorMsgContent ::= SEQUENCE {
pKIStatusInfo PKIStatusInfo,
errorCode INTEGER OPTIONAL,
-- implementation-specific error codes
errorDetails PKIFreeText OPTIONAL
-- implementation-specific error details
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('pKIStatusInfo', PKIStatusInfo()),
namedtype.OptionalNamedType('errorCode', univ.Integer()),
namedtype.OptionalNamedType('errorDetails', PKIFreeText())
)
class CertStatus(univ.Sequence):
"""
CertStatus ::= SEQUENCE {
certHash OCTET STRING,
certReqId INTEGER,
statusInfo PKIStatusInfo OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certHash', univ.OctetString()),
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.OptionalNamedType('statusInfo', PKIStatusInfo())
)
class CertConfirmContent(univ.SequenceOf):
componentType = CertStatus()
class RevAnnContent(univ.Sequence):
"""
RevAnnContent ::= SEQUENCE {
status PKIStatus,
certId CertId,
willBeRevokedAt GeneralizedTime,
badSinceDate GeneralizedTime,
crlDetails Extensions OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatus()),
namedtype.NamedType('certId', rfc2511.CertId()),
namedtype.NamedType('willBeRevokedAt', useful.GeneralizedTime()),
namedtype.NamedType('badSinceDate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('crlDetails', rfc2459.Extensions())
)
class RevRepContent(univ.Sequence):
"""
RevRepContent ::= SEQUENCE {
status SEQUENCE SIZE (1..MAX) OF PKIStatusInfo,
revCerts [0] SEQUENCE SIZE (1..MAX) OF CertId
OPTIONAL,
crls [1] SEQUENCE SIZE (1..MAX) OF CertificateList
OPTIONAL
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('revCerts', univ.SequenceOf(
componentType=rfc2511.CertId()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType('crls', univ.SequenceOf(
componentType=rfc2459.CertificateList()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class KeyRecRepContent(univ.Sequence):
"""
KeyRecRepContent ::= SEQUENCE {
status PKIStatusInfo,
newSigCert [0] CMPCertificate OPTIONAL,
caCerts [1] SEQUENCE SIZE (1..MAX) OF
CMPCertificate OPTIONAL,
keyPairHist [2] SEQUENCE SIZE (1..MAX) OF
CertifiedKeyPair OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('newSigCert', CMPCertificate().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType('caCerts', univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
)
),
namedtype.OptionalNamedType('keyPairHist', univ.SequenceOf(
componentType=CertifiedKeyPair()
).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
)
)
)
class CertResponse(univ.Sequence):
"""
CertResponse ::= SEQUENCE {
certReqId INTEGER,
status PKIStatusInfo,
certifiedKeyPair CertifiedKeyPair OPTIONAL,
rspInfo OCTET STRING OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('certReqId', univ.Integer()),
namedtype.NamedType('status', PKIStatusInfo()),
namedtype.OptionalNamedType('certifiedKeyPair', CertifiedKeyPair()),
namedtype.OptionalNamedType('rspInfo', univ.OctetString())
)
class CertRepMessage(univ.Sequence):
"""
CertRepMessage ::= SEQUENCE {
caPubs [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
OPTIONAL,
response SEQUENCE OF CertResponse
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('caPubs', univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
),
namedtype.NamedType('response', univ.SequenceOf(
componentType=CertResponse())
)
)
class POPODecKeyChallContent(univ.SequenceOf):
componentType = Challenge()
class OOBCertHash(univ.Sequence):
"""
OOBCertHash ::= SEQUENCE {
hashAlg [0] AlgorithmIdentifier OPTIONAL,
certId [1] CertId OPTIONAL,
hashVal BIT STRING
}
"""
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('hashAlg',
rfc2459.AlgorithmIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.OptionalNamedType('certId', rfc2511.CertId().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
),
namedtype.NamedType('hashVal', univ.BitString())
)
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
class NestedMessageContent(univ.SequenceOf):
"""
NestedMessageContent ::= PKIMessages
"""
componentType = univ.Any()
class DHBMParameter(univ.Sequence):
"""
DHBMParameter ::= SEQUENCE {
owf AlgorithmIdentifier,
-- AlgId for a One-Way Function (SHA-1 recommended)
mac AlgorithmIdentifier
-- the MAC AlgId (e.g., DES-MAC, Triple-DES-MAC [PKCS11],
} -- or HMAC [RFC2104, RFC2202])
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
)
id_DHBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.30')
class PBMParameter(univ.Sequence):
"""
PBMParameter ::= SEQUENCE {
salt OCTET STRING,
owf AlgorithmIdentifier,
iterationCount INTEGER,
mac AlgorithmIdentifier
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('salt', univ.OctetString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, 128)
)
),
namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('iterationCount', univ.Integer()),
namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
)
id_PasswordBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.13')
class PKIProtection(univ.BitString): pass
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
nestedMessageContent = NestedMessageContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 20))
class PKIBody(univ.Choice):
"""
PKIBody ::= CHOICE { -- message-specific body elements
ir [0] CertReqMessages, --Initialization Request
ip [1] CertRepMessage, --Initialization Response
cr [2] CertReqMessages, --Certification Request
cp [3] CertRepMessage, --Certification Response
p10cr [4] CertificationRequest, --imported from [PKCS10]
popdecc [5] POPODecKeyChallContent, --pop Challenge
popdecr [6] POPODecKeyRespContent, --pop Response
kur [7] CertReqMessages, --Key Update Request
kup [8] CertRepMessage, --Key Update Response
krr [9] CertReqMessages, --Key Recovery Request
krp [10] KeyRecRepContent, --Key Recovery Response
rr [11] RevReqContent, --Revocation Request
rp [12] RevRepContent, --Revocation Response
ccr [13] CertReqMessages, --Cross-Cert. Request
ccp [14] CertRepMessage, --Cross-Cert. Response
ckuann [15] CAKeyUpdAnnContent, --CA Key Update Ann.
cann [16] CertAnnContent, --Certificate Ann.
rann [17] RevAnnContent, --Revocation Ann.
crlann [18] CRLAnnContent, --CRL Announcement
pkiconf [19] PKIConfirmContent, --Confirmation
nested [20] NestedMessageContent, --Nested Message
genm [21] GenMsgContent, --General Message
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('ir', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
),
namedtype.NamedType('ip', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
),
namedtype.NamedType('cr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
),
namedtype.NamedType('cp', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
),
namedtype.NamedType('p10cr', rfc2314.CertificationRequest().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)
)
),
namedtype.NamedType('popdecc', POPODecKeyChallContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
)
),
namedtype.NamedType('popdecr', POPODecKeyRespContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
)
),
namedtype.NamedType('kur', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
)
),
namedtype.NamedType('kup', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
)
),
namedtype.NamedType('krr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)
)
),
namedtype.NamedType('krp', KeyRecRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 10)
)
),
namedtype.NamedType('rr', RevReqContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 11)
)
),
namedtype.NamedType('rp', RevRepContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 12)
)
),
namedtype.NamedType('ccr', rfc2511.CertReqMessages().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 13)
)
),
namedtype.NamedType('ccp', CertRepMessage().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 14)
)
),
namedtype.NamedType('ckuann', CAKeyUpdAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 15)
)
),
namedtype.NamedType('cann', CertAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 16)
)
),
namedtype.NamedType('rann', RevAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 17)
)
),
namedtype.NamedType('crlann', CRLAnnContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 18)
)
),
namedtype.NamedType('pkiconf', PKIConfirmContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 19)
)
),
namedtype.NamedType('nested', nestedMessageContent),
# namedtype.NamedType('nested', NestedMessageContent().subtype(
# explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20)
# )
# ),
namedtype.NamedType('genm', GenMsgContent().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 21)
)
)
)
class PKIHeader(univ.Sequence):
"""
PKIHeader ::= SEQUENCE {
pvno INTEGER { cmp1999(1), cmp2000(2) },
sender GeneralName,
recipient GeneralName,
messageTime [0] GeneralizedTime OPTIONAL,
protectionAlg [1] AlgorithmIdentifier OPTIONAL,
senderKID [2] KeyIdentifier OPTIONAL,
recipKID [3] KeyIdentifier OPTIONAL,
transactionID [4] OCTET STRING OPTIONAL,
senderNonce [5] OCTET STRING OPTIONAL,
recipNonce [6] OCTET STRING OPTIONAL,
freeText [7] PKIFreeText OPTIONAL,
generalInfo [8] SEQUENCE SIZE (1..MAX) OF
InfoTypeAndValue OPTIONAL
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('pvno', univ.Integer(
namedValues=namedval.NamedValues(
('cmp1999', 1),
('cmp2000', 2)
)
)
),
namedtype.NamedType('sender', rfc2459.GeneralName()),
namedtype.NamedType('recipient', rfc2459.GeneralName()),
namedtype.OptionalNamedType('messageTime', useful.GeneralizedTime().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('protectionAlg', rfc2459.AlgorithmIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.OptionalNamedType('senderKID', rfc2459.KeyIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('recipKID', rfc2459.KeyIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('transactionID', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('senderNonce', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.OptionalNamedType('recipNonce', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.OptionalNamedType('freeText', PKIFreeText().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))),
namedtype.OptionalNamedType('generalInfo',
univ.SequenceOf(
componentType=InfoTypeAndValue().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)
)
)
)
)
class ProtectedPart(univ.Sequence):
"""
ProtectedPart ::= SEQUENCE {
header PKIHeader,
body PKIBody
}
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PKIHeader()),
namedtype.NamedType('infoValue', PKIBody())
)
class PKIMessage(univ.Sequence):
"""
PKIMessage ::= SEQUENCE {
header PKIHeader,
body PKIBody,
protection [0] PKIProtection OPTIONAL,
extraCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
OPTIONAL
}"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PKIHeader()),
namedtype.NamedType('body', PKIBody()),
namedtype.OptionalNamedType('protection', PKIProtection().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('extraCerts',
univ.SequenceOf(
componentType=CMPCertificate()
).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
)
)
class PKIMessages(univ.SequenceOf):
"""
PKIMessages ::= SEQUENCE SIZE (1..MAX) OF PKIMessage
"""
componentType = PKIMessage()
subtypeSpec = univ.SequenceOf.subtypeSpec + constraint.ValueSizeConstraint(1, MAX)
# pyasn1 does not naturally handle recursive definitions, thus this hack:
# NestedMessageContent ::= PKIMessages
NestedMessageContent.componentType = PKIMessages()
nestedMessageContent.componentType = PKIMessages()
| 36.462667 | 109 | 0.578564 |
21ee11095fdb772822fa2796a520a4ca22a5b5b6 | 14,240 | py | Python | src/visitpy/visit_flow/flow/src/filters/pyocl_env.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 226 | 2018-12-29T01:13:49.000Z | 2022-03-30T19:16:31.000Z | src/visitpy/visit_flow/flow/src/filters/pyocl_env.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 5,100 | 2019-01-14T18:19:25.000Z | 2022-03-31T23:08:36.000Z | src/visitpy/visit_flow/flow/src/filters/pyocl_env.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 84 | 2019-01-24T17:41:50.000Z | 2022-03-10T10:01:46.000Z | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: pyocl_env.py
author: Cyrus Harrison <cyrush@llnl.gov>
created: 4/21/2012
description:
Use lazy created singleton pyopencl context, to get around
issues with the NVIDIA driver on edge.
"""
# Guarded import of pyopencl
found_pyopencl = False
try:
import numpy as npy
import pyopencl as cl
found_pyopencl = True
except ImportError:
pass
__all__ = ["Manager",
"Pool"]
from ..core import WallTimer, log
def info(msg):
log.info(msg,"pyocl_env")
def err(msg):
log.error(msg,"pyocl_env")
def calc_nbytes(shape,dtype):
res = npy.dtype(dtype).itemsize
for s in shape:
res*=s
return res
def nbytes_mb_gb(nbytes):
mbytes = float(nbytes) * 9.53674e-7
gbytes = mbytes * 0.000976562
return mbytes, gbytes
def nbytes_str(nbytes):
mbytes, gbytes = nbytes_mb_gb(nbytes)
return "%d (MB: %s GB: %s)" % (nbytes,repr(mbytes),repr(gbytes))
class PyOpenCLBuffer(object):
def __init__(self,id,shape,dtype):
self.id = id
self.shape = shape
self.out_shape = shape
self.dtype = dtype
self.nbytes = calc_nbytes(shape,dtype)
ctx = PyOpenCLContextManager.context()
balloc = PyOpenCLHostTimer("balloc",self.nbytes)
balloc.start()
self.cl_obj = cl.Buffer(ctx, cl.mem_flags.READ_WRITE, self.nbytes)
balloc.stop()
PyOpenCLContextManager.add_host_event(balloc)
self.__active = 2
info("PyOpenCLBuffer create: " + str(self))
def write(self,data):
nbytes = calc_nbytes(data.shape,data.dtype)
info("PyOpenCLBuffer write %s bytes to %s" % (nbytes,str(self)))
evnt = cl.enqueue_copy(PyOpenCLContextManager.queue(),self.cl_obj,data)
PyOpenCLContextManager.add_event("win",evnt,nbytes)
return evnt
def read(self):
nbytes = calc_nbytes(self.out_shape,self.dtype)
info("PyOpenCLBuffer read %d bytes from %s " % (nbytes,str(self)))
htimer = PyOpenCLHostTimer("ralloc",self.nbytes)
htimer.start()
res = npy.zeros(self.out_shape,dtype=self.dtype)
htimer.stop()
PyOpenCLContextManager.add_host_event(htimer)
# this is blocking ...
evnt = cl.enqueue_copy(PyOpenCLContextManager.queue(),res,self.cl_obj)
PyOpenCLContextManager.add_event("rout",evnt,nbytes)
evnt.wait()
return res
def active(self):
return self.__active == 2
def reactivate(self,out_shape,dtype):
self.out_shape = out_shape
self.dtype = dtype
self.__active = 2
def released(self):
return self.__active == 1
def release(self):
info("PyOpenCLBuffer release: " + str(self))
self.__active = 1
def reclaim(self):
self.__active = 0
def available(self):
return self.__active == 0
def __str__(self):
res = "(%d) dtype: %s, nbytes: %s, alloc_shape: %s, out_shape: %s status:%d"
res = res % (self.id,self.dtype,self.nbytes,self.shape,self.out_shape,self.__active)
return res
class PyOpenCLBufferPool(object):
buffers = []
total_alloc = 0
@classmethod
def reset(cls):
rset = PyOpenCLHostTimer("pool_reset",0)
rset.start()
# this should trigger pyopencl cleanup of buffers
cls.buffers = []
cls.total_alloc = 0
cls.max_alloc = 0
rset.stop()
PyOpenCLContextManager.add_host_event(rset)
@classmethod
def available_device_memory(cls,percentage=False):
devm = PyOpenCLContextManager.device_memory()
res = devm - cls.total_alloc
if percentage:
res = round(100.0 * (float(res) / float(devm)),2)
return res
@classmethod
def device_memory_high_water(cls):
return cls.max_alloc
@classmethod
def request_buffer(cls,shape,dtype):
avail = [b for b in cls.buffers if b.available()]
rbytes = calc_nbytes(shape,dtype)
res_buf = None
for b in avail:
# first check for exact buffer size match
if b.nbytes == rbytes:
# we can reuse
dreuse = PyOpenCLHostTimer("dreuse",b.nbytes)
dreuse.start()
info("PyOpenCLBufferPool reuse: " + str(b))
b.reactivate(shape,dtype)
res_buf = b
dreuse.stop()
PyOpenCLContextManager.add_host_event(dreuse)
break
if res_buf is None:
res_buf = cls.__create_buffer(shape,dtype)
return res_buf
@classmethod
def buffer_info(cls):
res = "Total Device Memory: %s\n" % nbytes_str(PyOpenCLContextManager.device_memory())
res += "Available Memory: %s " % nbytes_str(cls.available_device_memory())
res += "(" + repr(cls.available_device_memory(True)) + " %)\n"
res += "Buffers:\n"
for b in cls.buffers:
res += " " + str(b) + "\n"
return res
@classmethod
def reclaim(cls):
#if released(), the buffer is avail for the next request
for b in cls.buffers:
if b.released():
b.reclaim()
@classmethod
def release_buffer(cls,buff):
drel = PyOpenCLHostTimer("drelease",buff.nbytes)
drel.start()
cls.total_alloc -= buff.nbytes
cls.buffers.remove(buff)
drel.stop()
PyOpenCLContextManager.add_host_event(drel)
@classmethod
def __create_buffer(cls,shape,dtype):
# no suitable buffer, we need to create a new one
rbytes = calc_nbytes(shape,dtype)
# see if we have enough bytes left on the device
# if not, try to reclaim some memory from released buffers
# if rbytes > cls.available_device_memory():
cls.__reap(rbytes)
if rbytes > cls.available_device_memory():
msg = "Reap failed\n"
msg += " Free Request: %s\n" % nbytes_str(rbytes)
msg += PyOpenCLContextManager.events_summary()[0] + "\n"
msg += cls.buffer_info() + "\n"
err(msg)
raise MemoryError
res = PyOpenCLBuffer(len(cls.buffers),shape,dtype)
cls.total_alloc += res.nbytes
if cls.total_alloc > cls.max_alloc:
cls.max_alloc = cls.total_alloc
cls.buffers.append(res)
info(cls.buffer_info())
return res
@classmethod
def __reap(cls,nbytes):
rbytes = 0
avail = [b for b in cls.buffers if b.available()]
for b in avail:
rbytes += b.nbytes
cls.release_buffer(b)
if cls.available_device_memory() >= nbytes:
# we have enough mem, so break
break
del avail
msg = "PyOpenCLBufferPool reap reclaimed: "
msg += nbytes_str(rbytes)
info(msg)
class PyOpenCLContextEvent(object):
def __init__(self,tag,cl_evnt,nbytes):
self.etype = "device"
self.tag = tag
self.cl_evnt = cl_evnt
self.nbytes = nbytes
def summary(self):
qts = 1e-9*(self.cl_evnt.profile.submit - self.cl_evnt.profile.queued)
sts = 1e-9*(self.cl_evnt.profile.start - self.cl_evnt.profile.submit)
ste = 1e-9*(self.cl_evnt.profile.end - self.cl_evnt.profile.start)
qte = 1e-9*(self.cl_evnt.profile.end - self.cl_evnt.profile.queued)
res = "Device Event: %s (nbytes=%d)\n" % (self.tag,self.nbytes)
res += " Queued to Submit: %s\n" % repr(qts)
res += " Submit to Start: %s\n" % repr(sts)
res += " Start to End: %s\n" % repr(ste)
res += " Queued to End: %s\n" % repr(qte)
return res
def queued_to_end(self):
return 1e-9*(self.cl_evnt.profile.end - self.cl_evnt.profile.queued)
def start_to_end(self):
return 1e-9*(self.cl_evnt.profile.end - self.cl_evnt.profile.start)
class PyOpenCLHostTimer(WallTimer):
def __init__(self,tag,nbytes):
super(PyOpenCLHostTimer,self).__init__(tag)
self.etype = "host"
self.nbytes = nbytes
def summary(self):
res = "Host Event: %s (nbytes=%d)\n" % (self.tag,self.nbytes)
res += " Start to End: %s\n" % repr(self.start_to_end())
return res
def queued_to_end(self):
return self.get_elapsed()
def start_to_end(self):
return self.get_elapsed()
class PyOpenCLContextManager(object):
plat_id = 0
dev_id = 0
ctx = None
ctx_info = None
device = None
cmdq = None
events = []
@classmethod
def select_device(cls,platform_id,device_id):
cls.plat_id = platform_id
cls.dev_id = device_id
@classmethod
def queue(cls):
if cls.cmdq is None:
ctx = cls.context()
prof = cl.command_queue_properties.PROFILING_ENABLE
cls.cmdq = cl.CommandQueue(ctx,properties=prof)
return cls.cmdq
@classmethod
def instance(cls):
return cls.context()
@classmethod
def context(cls):
if not found_pyopencl:
return None
if cls.ctx is None:
csetup = PyOpenCLHostTimer("ctx_setup",0)
csetup .start()
platform = cl.get_platforms()[cls.plat_id]
device = platform.get_devices()[cls.dev_id]
cinfo = "OpenCL Context Info\n"
cinfo += " Using platform id = %d\n" % cls.plat_id
cinfo += " Platform name: %s\n" % platform.name
cinfo += " Platform profile: %s\n" % platform.profile
cinfo += " Platform vendor: %s\n" % platform.vendor
cinfo += " Platform version: %s\n" % platform.version
cinfo += " Using device id = %d\n" % cls.dev_id
cinfo += " Device name: %s\n" % device.name
cinfo += " Device type: %s\n" % cl.device_type.to_string(device.type)
cinfo += " Device memory: %s\n" % device.global_mem_size
cinfo += " Device max clock speed: %s MHz\n" % device.max_clock_frequency
cinfo += " Device compute units: %s\n" % device.max_compute_units
info(cinfo)
cls.device = device
cls.ctx = cl.Context([device])
cls.ctx_info = cinfo
csetup.stop()
PyOpenCLContextManager.add_host_event(csetup)
return cls.ctx
@classmethod
def dispatch_kernel(cls,kernel_source,out_shape,buffers):
kdisp = PyOpenCLHostTimer("kdispatch",0)
kdisp.start()
ibuffs = [ b.cl_obj for b in buffers]
prg = cl.Program(cls.context(),kernel_source).build()
evnt = prg.kmain(cls.queue(), out_shape, None, *ibuffs)
cls.add_event("kexec",evnt)
kdisp.stop()
PyOpenCLContextManager.add_host_event(kdisp)
return evnt
@classmethod
def device_memory(cls):
cls.context()
return cls.device.global_mem_size
@classmethod
def clear_events(cls):
cls.events = []
@classmethod
def add_event(cls,tag,cl_evnt,nbytes=0):
cls.events.append(PyOpenCLContextEvent(tag,cl_evnt,nbytes))
@classmethod
def add_host_event(cls,host_timer):
cls.events.append(host_timer)
@classmethod
def events_summary(cls):
res = ""
tbytes = 0
ttag = {}
tqte = 0.0
tste = 0.0
tnevents = len(cls.events)
maxalloc = PyOpenCLBufferPool.device_memory_high_water()
for e in cls.events:
tbytes += e.nbytes
tqte += e.queued_to_end()
tste += e.start_to_end()
if e.tag in list(ttag.keys()):
t = ttag[e.tag]
t["nevents"] += 1
t["nbytes"] += e.nbytes
t["qte"] += e.queued_to_end()
t["ste"] += e.start_to_end()
else:
ttag[e.tag] = {"tag": e.tag,
"etype": e.etype,
"nevents":1,
"nbytes":e.nbytes,
"qte":e.queued_to_end(),
"ste":e.start_to_end()}
tmbytes, tgbytes = nbytes_mb_gb(tbytes)
res += cls.ctx_info
res += "\nTag Totals:\n"
for k,v in list(ttag.items()):
nevents = v["nevents"]
etype = v["etype"]
nbytes = v["nbytes"]
qte = v["qte"]
ste = v["ste"]
nmbytes, ngbytes = nbytes_mb_gb(nbytes)
avg_bytes = nbytes / float(nevents)
avg_mbytes, avg_gbytes = nbytes_mb_gb(avg_bytes)
gbps = ngbytes / ste
v["avg_bytes"] = avg_bytes
v["gbps"] = gbps
res += " Tag: %s (%s)\n" % (k ,etype)
res += " Total # of events: %d\n" % nevents
res += " Total queued to end: %s (s)\n" % repr(qte)
res += " Total start to end: %s (s)\n" % repr(ste)
res += " Total nbytes: %s\n" % nbytes_str(nbytes)
res += " Total gb/s: %s [ngbytes / ste]\n" % repr(gbps)
res += " Average nbytes: %s\n" % nbytes_str(avg_bytes)
res += "%s\n" % v
res += "Total # of events: %d\n" % tnevents
res += "Total nbytes: %s\n" % nbytes_str(tbytes)
res += "Total start to end: %s (s)\n" % repr(tqte)
res += "Total queued to end: %s (s)\n" % repr(tste)
res += "Dev max alloc: %s \n" % nbytes_str(maxalloc)
ttag["total"] = {"tag":"total",
"etype":"total",
"nevents": tnevents,
"nbytes": tbytes,
"qte": tqte,
"ste": tste,
"dev_max_alloc": maxalloc}
res += "%s\n" % ttag["total"]
return res, ttag
Manager = PyOpenCLContextManager
Pool = PyOpenCLBufferPool
| 36.606684 | 95 | 0.567556 |
a9412ed05a40b14c6dd54a85bf781593c0008936 | 508 | py | Python | prep_data.py | mlrun/ltm | 74d0b3ddc2462265528615d91013fdb409e91d66 | [
"Apache-2.0"
] | null | null | null | prep_data.py | mlrun/ltm | 74d0b3ddc2462265528615d91013fdb409e91d66 | [
"Apache-2.0"
] | 8 | 2021-09-01T06:50:59.000Z | 2021-09-01T11:48:03.000Z | prep_data.py | mlrun/ltm | 74d0b3ddc2462265528615d91013fdb409e91d66 | [
"Apache-2.0"
] | null | null | null | import mlrun
def prep_data(context, source_url: mlrun.DataItem, label_column='label'):
# Convert the DataItem to a pandas DataFrame
df = source_url.as_df()
print("data url:", source_url.url)
df[label_column] = df[label_column].astype('category').cat.codes
# Record the DataFrane length after the run
context.log_result('num_rows', df.shape[0])
# Store the data set in your artifacts database
context.log_dataset('cleaned_data', df=df, index=False, format='csv') | 39.076923 | 74 | 0.702756 |
a7b07e61870397dea9d60d450e2ca633cc123e2c | 203 | py | Python | tccli/services/sms/__init__.py | hapsyou/tencentcloud-cli-intl-en | fa8ba71164484f9a2be4b983080a1de08606c0b0 | [
"Apache-2.0"
] | null | null | null | tccli/services/sms/__init__.py | hapsyou/tencentcloud-cli-intl-en | fa8ba71164484f9a2be4b983080a1de08606c0b0 | [
"Apache-2.0"
] | null | null | null | tccli/services/sms/__init__.py | hapsyou/tencentcloud-cli-intl-en | fa8ba71164484f9a2be4b983080a1de08606c0b0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from tccli.services.sms.sms_client import register_arg
from tccli.services.sms.sms_client import get_actions_info
from tccli.services.sms.sms_client import AVAILABLE_VERSION_LIST
| 40.6 | 64 | 0.827586 |
71fe4d9f10a66dda9899f7f659594c967a8b7908 | 1,054 | py | Python | setup.py | trendels/fieldmarshal | d3d1803d62d84fc1efc4613cd5b86e5ea976d2f4 | [
"MIT"
] | null | null | null | setup.py | trendels/fieldmarshal | d3d1803d62d84fc1efc4613cd5b86e5ea976d2f4 | [
"MIT"
] | null | null | null | setup.py | trendels/fieldmarshal | d3d1803d62d84fc1efc4613cd5b86e5ea976d2f4 | [
"MIT"
] | null | null | null | import os
import re
from setuptools import setup, find_packages
with open(os.path.join('src', 'fieldmarshal', '__init__.py')) as f:
VERSION = re.search(
r'^__version__\s*=\s*[\'"](.*)[\'"]', f.read(), re.M
).group(1)
with open('README.md') as f:
README = f.read()
setup(
name='fieldmarshal',
version=VERSION,
author='Stanis Trendelenburg',
author_email='stanis.trendelenburg@gmail.com',
url='https://github.com/trendels/fieldmarshal',
license='MIT',
description='Marshal/unmarshal attrs-based data classes to and from JSON',
long_description=README,
long_description_content_type='text/markdown',
classifiers = [
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=['attrs>=17.4.0'],
zip_safe=False,
)
| 29.277778 | 78 | 0.63093 |
8ef2cfc35c28b79e9aaf6ac289fd8a3d0dc4ce8c | 6,828 | py | Python | main.py | rajeshsaini2115/pyDF-Bot | 752f7fd49037936947b0db63fb812ecbdeac8a17 | [
"MIT"
] | 2 | 2021-09-06T02:11:46.000Z | 2021-09-06T02:13:08.000Z | main.py | rajeshsaini2115/pyDF-Bot | 752f7fd49037936947b0db63fb812ecbdeac8a17 | [
"MIT"
] | null | null | null | main.py | rajeshsaini2115/pyDF-Bot | 752f7fd49037936947b0db63fb812ecbdeac8a17 | [
"MIT"
] | null | null | null | import os
from os import error, system, name
import logging
import pyrogram
import PyPDF2
import time
from decouple import config
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, ForceReply
from pyrogram.types import User, Message, Document
bughunter0 = Client(
"PyDF-BOT",
bot_token = os.environ["BOT_TOKEN"],
api_id = int(os.environ["API_ID"]),
api_hash = os.environ["API_HASH"]
)
START_STR = """
Hi **{}**, I'm PyDF Bot. I can Provide all Help regarding PDF file
"""
ABOUT = """
**BOT:** `PYDF BOT`
**AUTHOR :** [bughunter0](https://t.me/bughunter0)
**SERVER :** `Heroku`
**LIBRARY :** `Pyrogram`
**SOURCE :** [BugHunterBots](https://t.me/bughunterbots)
**LANGUAGE :** `Python 3.9`
"""
HELP = """
Send me a pdf file to Move on
"""
DOWNLOAD_LOCATION = os.environ.get("DOWNLOAD_LOCATION", "./DOWNLOADS/PyDF/")
# TXT_LOCATION = os.environ.get("TXT_LOCATION", "./DOWNLOADS/txt/")
path = './DOWNLOADS/txt/bughunter0.txt'
Disclaimer = """ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE """
START_BUTTON = InlineKeyboardMarkup(
[[
InlineKeyboardButton('ABOUT',callback_data='cbabout'),
InlineKeyboardButton('HELP',callback_data='cbhelp')
],
[
InlineKeyboardButton('↗ Join Here ↗', url='https://t.me/BughunterBots'),
]]
)
CLOSE_BUTTON = InlineKeyboardMarkup(
[[
InlineKeyboardButton('Back',callback_data='cbclose'),
]]
)
@bughunter0.on_callback_query() # callbackQuery()
async def cb_data(bot, update):
if update.data == "cbhelp":
await update.message.edit_text(
text=HELP,
reply_markup=CLOSE_BUTTON,
disable_web_page_preview=True
)
elif update.data == "cbabout":
await update.message.edit_text(
text=ABOUT,
reply_markup=CLOSE_BUTTON,
disable_web_page_preview=True
)
else:
await update.message.edit_text(
text=START_STR.format(update.from_user.mention),
disable_web_page_preview=True,
reply_markup=START_BUTTON
)
@bughunter0.on_message(filters.command(["start"])) # StartCommand
async def start(bot, update):
await update.reply_text(
text=START_STR.format(update.from_user.mention),
disable_web_page_preview=True,
reply_markup=START_BUTTON
)
@bughunter0.on_message(filters.document | (filters.document & filters.forwarded))
async def document(bot, message):
message_id=int(message.message_id)
chat_id=int(message.chat.id)
await bot.send_message(text=" ◆ /pdf2txt - Extract text to Txt file \n ◆ /info to Get PDF information",reply_to_message_id=message_id,chat_id=chat_id)
@bughunter0.on_message(filters.command(["pdf2txt"])) # PdfToText
async def pdf_to_text(bot, message):
try :
if message.reply_to_message:
pdf_path = DOWNLOAD_LOCATION + f"{message.chat.id}.pdf" #pdfFileObject
txt = await message.reply("Downloading.....")
await message.reply_to_message.download(pdf_path)
await txt.edit("Downloaded File")
pdf = open(pdf_path,'rb')
pdf_reader = PyPDF2.PdfFileReader(pdf) #pdfReaderObject
await txt.edit("Getting Number of Pages....")
num_of_pages = pdf_reader.getNumPages() # Number of Pages
await txt.edit(f"Found {num_of_pages} Page")
page_no = pdf_reader.getPage(0) # pageObject
await txt.edit("Extracting Text from PDF...")
page_content = """ """ # EmptyString
with open(f'{message.chat.id}.txt', 'a+') as text_path:
for page in range (0,num_of_pages):
file_write = open(f'{message.chat.id}.txt','a+')
page_no = pdf_reader.getPage(page) # Iteration of page number
page_content = page_no.extractText()
file_write.write(f"\n page number - {page} \n") # writing Page Number as Title
file_write.write(f" {page_content} ") # writing page content
file_write.write(f"\n © BugHunterBots \n ") # Adding Page footer
# await message.reply_text(f"**Page Number : {page} **\n\n ` {page_content} `\n @BugHunterBots\n\n") # Use this Line of code to get Pdf Text as Messages
with open(f'{message.chat.id}.txt', 'a+') as text_path:
await message.reply_document(f"{message.chat.id}.txt",caption="©@BugHunterBots")
os.remove(pdf_path)
os.remove(f"{message.chat.id}.txt")
else :
await message.reply("Please Reply to PDF file")
except Exception as error :
await txt.delete()
await message.reply_text(f"{error}")
os.remove(pdf_path)
os.remove(f"{message.chat.id}.txt")
@bughunter0.on_message(filters.command(["info"]))
async def info(bot, message):
try:
if message.reply_to_message:
txt = await message.reply_text("Validating Pdf ")
pdf_path = DOWNLOAD_LOCATION + f"{message.chat.id}.pdf" #pdfFileObject
await txt.edit("Downloading.....")
await message.reply_to_message.download(pdf_path)
await txt.edit("Downloaded File")
pdf = open(pdf_path,'rb')
pdf_reader = PyPDF2.PdfFileReader(pdf) #pdfReaderObject
await txt.edit("Getting Number of Pages....")
num_of_pages = pdf_reader.getNumPages()
await txt.edit(f"Found {num_of_pages} Page")
await txt.edit("Getting PDF info..")
info = pdf_reader.getDocumentInfo()
await txt.edit(f"""
**Author :** `{info.author}`
**Creator :** `{info.creator}`
**Producer :** `{info.producer}`
**Subject :** `{info.subject}`
**Title :** `{info.title}`
**Pages :** `{num_of_pages}`""")
os.remove(pdf_path)
else:
await message.reply_text("Please Reply to a Pdf File")
except Exception as error :
await message.reply_text(f"Oops , {error}")
# @bughunter0.on_message(filters.command(["merge"])) # Under Maintenance
bughunter0.run()
| 41.634146 | 482 | 0.618483 |
7aa08e29665560b03d1400060f6b4e952f3f612b | 621 | py | Python | tests/test_sitk2vtk.py | paulin1C/dicom2stl | b3e7eaae2b6ec7797c617966e88f2ad4e1e1cf47 | [
"Apache-2.0"
] | 1 | 2021-03-29T17:23:02.000Z | 2021-03-29T17:23:02.000Z | tests/test_sitk2vtk.py | paulin1C/dicom2stl | b3e7eaae2b6ec7797c617966e88f2ad4e1e1cf47 | [
"Apache-2.0"
] | null | null | null | tests/test_sitk2vtk.py | paulin1C/dicom2stl | b3e7eaae2b6ec7797c617966e88f2ad4e1e1cf47 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import unittest
from utils import sitk2vtk
import vtk
import SimpleITK as sitk
import platform
class TestSITK2VTK(unittest.TestCase):
def test_sitk2vtk(self):
print("Testing sitk2vtk")
dims = [102, 102, 102]
img = sitk.GaussianSource(sitk.sitkUInt8, dims)
vol = sitk2vtk.sitk2vtk(img, True)
self.assertTupleEqual(vol.GetDimensions(), tuple(dims))
print("Accessing VTK image")
val = vol.GetScalarComponentAsFloat(5, 5, 5, 0)
print(val)
self.assertAlmostEqual(val, 3.0)
if __name__ == "__main__":
unittest.main()
| 23 | 63 | 0.663446 |
9bbd4164b7a8a9fcfe1e664329ee1ad2e8a46ca5 | 13,941 | py | Python | rlcard/agents/gin_rummy_human_agent/gui_gin_rummy/game_canvas_updater.py | 196693/rlcard | 81f8b579a2efcce7f195c8c2c8906b574c0e8abb | [
"MIT"
] | 1,735 | 2019-09-05T12:49:43.000Z | 2022-03-30T12:02:07.000Z | rlcard/agents/gin_rummy_human_agent/gui_gin_rummy/game_canvas_updater.py | Taidy-zy/rlcard | cd4645d70eacc6016fcaf96873d6bc6869fcf2d9 | [
"MIT"
] | 197 | 2019-09-14T05:59:02.000Z | 2022-03-03T19:21:19.000Z | rlcard/agents/gin_rummy_human_agent/gui_gin_rummy/game_canvas_updater.py | Taidy-zy/rlcard | cd4645d70eacc6016fcaf96873d6bc6869fcf2d9 | [
"MIT"
] | 476 | 2019-09-13T15:25:32.000Z | 2022-03-29T01:41:29.000Z | '''
Project: Gui Gin Rummy
File name: game_canvas_updater.py
Author: William Hale
Date created: 3/14/2020
'''
# from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .game_canvas import GameCanvas
from rlcard.envs.gin_rummy import GinRummyEnv
from ..gin_rummy_human_agent import HumanAgent
from typing import List
from rlcard.games.gin_rummy.utils.action_event import ActionEvent
from rlcard.games.gin_rummy.utils.move import GinRummyMove
from rlcard.games.gin_rummy.utils.move import PlayerMove
from rlcard.games.gin_rummy.utils.move import DealHandMove
from rlcard.games.gin_rummy.utils.move import DrawCardMove, PickupDiscardMove, DeclareDeadHandMove
from rlcard.games.gin_rummy.utils.move import DiscardMove, KnockMove, GinMove
from rlcard.games.gin_rummy.utils.move import ScoreNorthMove, ScoreSouthMove
from rlcard.games.gin_rummy.utils.gin_rummy_error import GinRummyProgramError
from . import configurations
from . import handling_tap_discard_pile
from . import handling_tap_held_pile
from . import starting_new_game
from . import status_messaging
from . import utils
from .player_type import PlayerType
import rlcard.games.gin_rummy.utils.utils as gin_rummy_utils
class GameCanvasUpdater(object):
def __init__(self, game_canvas: 'GameCanvas'):
self.game_canvas = game_canvas
self.env_thread = None
self.pending_human_action_ids = [] # type: List[int]
self.busy_body_id = None # type: int or None
self.is_stopped = False
@property
def mark(self) -> int: # convenience property to briefly get mark
return self.env_thread.mark
@property
def moves(self) -> List[GinRummyMove]: # convenience property to briefly get moves
return self.env.game.round.move_sheet
@property
def env(self) -> 'GinRummyEnv':
return self.env_thread.gin_rummy_env
@property
def human_agent(self) -> 'HumanAgent' or None:
south_agent = self.env_thread.gin_rummy_env.agents[1]
return south_agent if isinstance(south_agent, HumanAgent) else None
def apply_canvas_updates(self):
if not self.env_thread.is_stopped:
self._advance_mark()
delay_ms = 1
self.game_canvas.after(delay_ms, func=self.apply_canvas_updates)
else:
self.is_stopped = True
def did_perform_actions(self, actions: List[int]):
for action_id in actions:
if self.game_canvas.player_types[self.busy_body_id] == PlayerType.human_player:
self.pending_human_action_ids.append(action_id)
self.env_thread.mark += 1
self.busy_body_id = None
#
# Private methods
#
def _advance_mark(self):
if self.env.game.round is None:
return
if self.game_canvas.query.is_game_over() and self.mark >= len(self.moves):
return
if self.busy_body_id is not None:
return
if self.human_agent and not self.human_agent.is_choosing_action_id:
return
if self.human_agent and self.human_agent.chosen_action_id is not None:
return
if self.env_thread.is_action_id_available():
move = self.moves[self.mark]
thinking_time_in_ms = 0 # type: int
if isinstance(move, DealHandMove):
if not self.mark == 0:
raise GinRummyProgramError("mark={} must be 0.".format(self.mark))
self.busy_body_id = move.player_dealing.player_id
elif isinstance(move, ScoreNorthMove) or isinstance(move, ScoreSouthMove):
self.busy_body_id = move.player.player_id
elif isinstance(move, PlayerMove):
self.busy_body_id = move.player.player_id
thinking_time_in_ms = 1000 # type: int # simulate the computer thinking
else:
raise GinRummyProgramError("GameCanvasUpdater advance_mark: unknown move={move}")
if self.mark > 0:
if self.game_canvas.player_types[self.busy_body_id] == PlayerType.human_player:
raise GinRummyProgramError("busy_body_id={} must not be human player.".format(self.busy_body_id))
if not self.busy_body_id == self.game_canvas.getter.get_current_player_id():
raise GinRummyProgramError("busy_body_id={} must equal current_player_id={}".format(self.busy_body_id, self.game_canvas.getter.get_current_player_id()))
self._show_prolog_messages_on_computer_turn()
self.game_canvas.after(thinking_time_in_ms, self._advance_mark_for_computer_player)
return
if self.pending_human_action_ids:
action_id = self.pending_human_action_ids.pop(0) # pending_human_action_ids is queue
if utils.is_debug():
action_event = ActionEvent.decode_action(action_id=action_id)
print("S {}".format(action_event)) # FIXME: South may not always be actor
self.human_agent.chosen_action_id = action_id
return
if not self.mark >= len(self.moves): # FIXME: should be no pending computer moves
raise GinRummyProgramError("Should be no pending computer moves.")
waiting_player_id = self.env_thread.get_waiting_player_id()
if waiting_player_id is None:
return
# FIXME: should be no pending computer moves
if self.human_agent.chosen_action_id is not None:
raise GinRummyProgramError("self.human_agent.chosen_action_id must not be None.")
if self.busy_body_id is not None:
raise GinRummyProgramError("busy_body_id={} must be None.".format(self.busy_body_id))
if not waiting_player_id == self.game_canvas.getter.get_current_player_id():
raise GinRummyProgramError("waiting_player_id={} must be current_player_id.".format(waiting_player_id))
self.busy_body_id = waiting_player_id
if not self.game_canvas.player_types[self.busy_body_id] == PlayerType.human_player:
raise GinRummyProgramError("busy_body_id={} must be human player.".format(self.busy_body_id))
legal_actions = self.human_agent.state['legal_actions']
if self.game_canvas.query.is_scoring(legal_actions=legal_actions):
# 'boss' performs this, not human
if not len(legal_actions) == 1:
raise GinRummyProgramError("len(legal_actions)={} must be 1.".format(len(legal_actions)))
action_id = legal_actions[0]
self._perform_score_action_id(action_id=action_id)
return
self._show_prolog_messages_on_human_turn()
#
# Private methods to handle 'boss' play
#
# The computer is the 'boss' who handles artifact moves injected into the move stream.
#
def _perform_deal_hand_move(self, move: DealHandMove):
if utils.is_debug():
print("{}".format(move))
starting_new_game.show_new_game(game_canvas=self.game_canvas)
self.env_thread.mark += 1
self.busy_body_id = None
def _perform_score_action_id(self, action_id: int):
if utils.is_debug():
if self.busy_body_id == 0:
if not action_id == configurations.SCORE_PLAYER_0_ACTION_ID:
raise GinRummyProgramError("action_id={} must be SCORE_PLAYER_0_ACTION_ID".format(action_id))
else:
if not action_id == configurations.SCORE_PLAYER_1_ACTION_ID:
raise GinRummyProgramError("action_id={} must be SCORE_PLAYER_1_ACTION_ID".format(action_id))
self.game_canvas.after_idle(self.did_perform_actions, [action_id])
#
# Private methods to handle human play
#
def _show_prolog_messages_on_human_turn(self):
legal_actions = self.human_agent.state['legal_actions']
status_messaging.show_prolog_message(player_id=self.busy_body_id,
legal_actions=legal_actions,
game_canvas=self.game_canvas)
#
# Private methods to handle computer play
#
def _show_prolog_messages_on_computer_turn(self):
legal_actions = self.game_canvas.getter.get_legal_actions(player_id=self.busy_body_id)
status_messaging.show_prolog_message(player_id=self.busy_body_id,
legal_actions=legal_actions,
game_canvas=self.game_canvas)
def _advance_mark_for_computer_player(self):
if not self.mark < len(self.moves):
raise GinRummyProgramError("mark={} must be less than len(moves)={}.".format(self.mark, len(self.moves)))
move = self.moves[self.mark]
if isinstance(move, DealHandMove):
self._perform_deal_hand_move(move=move)
elif isinstance(move, DrawCardMove):
self._perform_draw_card_move(move=move)
elif isinstance(move, PickupDiscardMove):
self._perform_pick_up_discard_move(move=move)
elif isinstance(move, DeclareDeadHandMove):
self._do_perform_declare_dead_hand_move(move=move)
elif isinstance(move, DiscardMove):
self._perform_discard_move(move=move)
elif isinstance(move, KnockMove):
self._perform_knock_move(move=move)
elif isinstance(move, GinMove):
self._perform_gin_move(move=move)
elif isinstance(move, ScoreNorthMove) or isinstance(move, ScoreSouthMove):
if utils.is_debug():
print("{}".format(move))
self._perform_score_action_id(action_id=move.action.action_id)
def _perform_draw_card_move(self, move: DrawCardMove):
if utils.is_debug():
print("{}".format(move))
player = move.player
player_id = player.player_id
card = move.card
source_item_id = self.game_canvas.getter.get_top_stock_pile_item_id()
source_card_item_id = self.game_canvas.getter.get_card_id(card_item_id=source_item_id)
if not source_card_item_id == gin_rummy_utils.get_card_id(card=card):
raise GinRummyProgramError("source_card_item_id={} doesn't match with card={}.".format(source_card_item_id, card))
self.game_canvas.addtag_withtag(configurations.DRAWN_TAG, source_item_id)
target_item_id = self.game_canvas.getter.get_held_pile_item_ids(player_id=player_id)[-1]
target_item = self.game_canvas.canvas_item_by_item_id[target_item_id]
handling_tap_held_pile.handle_tap_held_pile(hit_item=target_item, game_canvas=self.game_canvas)
def _perform_pick_up_discard_move(self, move: PickupDiscardMove):
if utils.is_debug():
print("{}".format(move))
player = move.player
player_id = player.player_id
card = move.card
source_item_id = self.game_canvas.getter.get_top_discard_pile_item_id()
source_card_item_id = self.game_canvas.getter.get_card_id(card_item_id=source_item_id)
if not source_card_item_id == gin_rummy_utils.get_card_id(card=card):
raise GinRummyProgramError("source_card_item_id={} doesn't match with card={}.".format(source_card_item_id, card))
self.game_canvas.addtag_withtag(configurations.DRAWN_TAG, source_item_id)
target_item_id = self.game_canvas.getter.get_held_pile_item_ids(player_id=player_id)[-1]
target_item = self.game_canvas.canvas_item_by_item_id[target_item_id]
handling_tap_held_pile.handle_tap_held_pile(hit_item=target_item, game_canvas=self.game_canvas)
def _do_perform_declare_dead_hand_move(self, move: DeclareDeadHandMove):
if utils.is_debug():
print("{}".format(move))
self.game_canvas.post_doing_action.post_do_declare_dead_hand_action(player_id=self.busy_body_id)
self.game_canvas.after_idle(self.did_perform_actions, [move.action.action_id])
def _perform_discard_move(self, move: DiscardMove):
if utils.is_debug():
print("{}".format(move))
action_id = move.action.action_id
if self.busy_body_id is None:
raise GinRummyProgramError("busy_body_id cannot be None.")
card_id = utils.get_action_card_id(action_id)
source_item_id = self.game_canvas.card_item_ids[card_id]
self.game_canvas.addtag_withtag(configurations.SELECTED_TAG, source_item_id)
target_item_id = self.game_canvas.getter.get_top_discard_pile_item_id()
if target_item_id is None:
target_item_id = self.game_canvas.discard_pile_box_item
if not self.game_canvas.is_treating_as_human(player_id=self.busy_body_id):
# move source_item_id to end of held_pile invisibly
self.game_canvas.tag_raise(source_item_id)
utils.fan_held_pile(player_id=self.busy_body_id, game_canvas=self.game_canvas)
handling_tap_discard_pile.handle_tap_discard_pile(hit_item=target_item_id, game_canvas=self.game_canvas)
def _perform_knock_move(self, move: KnockMove):
if utils.is_debug():
print("{}".format(move))
action_id = move.action.action_id
card_id = utils.get_action_card_id(action_id)
source_item_id = self.game_canvas.card_item_ids[card_id]
self.game_canvas.addtag_withtag(configurations.SELECTED_TAG, source_item_id)
self.game_canvas.post_doing_action.post_do_knock_action(source_item_id)
def _perform_gin_move(self, move: GinMove):
if utils.is_debug():
print("{}".format(move))
action_id = move.action.action_id
card_id = utils.get_action_card_id(action_id)
source_item_id = self.game_canvas.card_item_ids[card_id]
self.game_canvas.addtag_withtag(configurations.SELECTED_TAG, source_item_id)
self.game_canvas.post_doing_action.post_do_going_out_action(source_item_id)
| 48.915789 | 168 | 0.693853 |
3fc6ea625182569bf59302d34e338ce8017086f0 | 626 | py | Python | tests/sample_runbooks/simple_runbook.py | tuxtof/calm-dsl | 5af67435d8304b97e170a690068f2d5975e9bfe6 | [
"Apache-2.0"
] | 37 | 2019-12-23T15:23:20.000Z | 2022-03-15T11:12:11.000Z | tests/sample_runbooks/simple_runbook.py | gabybeitler/calm-dsl | bac453413cfcf800eef95d89d5a7323c83654a93 | [
"Apache-2.0"
] | 144 | 2020-03-09T11:22:09.000Z | 2022-03-28T21:34:09.000Z | tests/sample_runbooks/simple_runbook.py | gabybeitler/calm-dsl | bac453413cfcf800eef95d89d5a7323c83654a93 | [
"Apache-2.0"
] | 46 | 2020-01-23T14:28:04.000Z | 2022-03-09T04:17:10.000Z | """
Calm DSL Sample Runbook used for testing runbook pause and play
"""
from calm.dsl.runbooks import runbook, runbook_json
from calm.dsl.runbooks import RunbookTask as Task
code = '''print "Start"
sleep(20)
print "End"'''
@runbook
def DslSimpleRunbook():
"Runbook example"
Task.Exec.escript(name="Task1", script=code)
Task.Exec.escript(name="Task2", script=code)
Task.Exec.escript(name="Task3", script=code)
Task.Exec.escript(name="Task4", script=code)
Task.Exec.escript(name="Task5", script=code)
def main():
print(runbook_json(DslSimpleRunbook))
if __name__ == "__main__":
main()
| 19.5625 | 63 | 0.702875 |
ba0bd9b179a2a5dca882d8a99401474925d770e0 | 726 | py | Python | empresa.py | bernardolorenzini/Python_Projects | cf2b196de10a90f767cd227fa34e649c8d681dc5 | [
"MIT"
] | null | null | null | empresa.py | bernardolorenzini/Python_Projects | cf2b196de10a90f767cd227fa34e649c8d681dc5 | [
"MIT"
] | null | null | null | empresa.py | bernardolorenzini/Python_Projects | cf2b196de10a90f767cd227fa34e649c8d681dc5 | [
"MIT"
] | null | null | null | class Funcionario:
id = 0
def __init__(self, nome, sobrenome, idade, data_entrada):
self.nome = nome
self.sobrenome = sobrenome
self.idade = idade
self.data_entrada = data_entrada
class Gerente(Funcionario):
juros = 800
def __init__(self, nome, sobrenome, idade, data_entrada, salario, setor, cargo):
super().__init__(nome, sobrenome, idade, data_entrada)
self.salario = salario
self.setor = setor
self.cargo = cargo
def pagamento(self):
return self.salario - Gerente.juros
f1 = Funcionario("Ademir", "Silva", 55, "23/04/1992")
g1 = Gerente("Ademir", "Silva", 55, "23/04/1992", 3000, "cont", "adm" "pesq")
print(g1.pagamento())
| 27.923077 | 84 | 0.636364 |
1ff8718a8b65b738eb536f85eb2b5e0906b73ccb | 504 | py | Python | chapter-11/exercise05.py | krastin/pp-cs3.0 | 502be9aac2d84215db176864e443c219e5e26591 | [
"MIT"
] | null | null | null | chapter-11/exercise05.py | krastin/pp-cs3.0 | 502be9aac2d84215db176864e443c219e5e26591 | [
"MIT"
] | null | null | null | chapter-11/exercise05.py | krastin/pp-cs3.0 | 502be9aac2d84215db176864e443c219e5e26591 | [
"MIT"
] | null | null | null | from typing import Dict
def count_values(sample_dict: Dict) -> int:
"""takes a single dictionary as an argument and returns the number of distinct values it contains
>>> sample_dict = {'red': 1, 'green': 1, 'blue': 2}
>>> count_values(sample_dict)
2
"""
unique_values = []
for k,v in sample_dict.items():
if v not in unique_values:
unique_values.append(v)
return len(unique_values)
if __name__ == "__main__":
import doctest
doctest.testmod() | 28 | 101 | 0.646825 |
1a5f6f73c2c3bb548f4aee616afc86e41ab844f4 | 16,725 | py | Python | artssat/atmosphere/atmosphere.py | simonpf/pARTS | b4d9f4c2ceac594273c5589e44fe6a3a4f8d7028 | [
"MIT"
] | 3 | 2020-09-02T08:20:42.000Z | 2020-12-18T17:19:38.000Z | artssat/atmosphere/atmosphere.py | simonpf/pARTS | b4d9f4c2ceac594273c5589e44fe6a3a4f8d7028 | [
"MIT"
] | null | null | null | artssat/atmosphere/atmosphere.py | simonpf/pARTS | b4d9f4c2ceac594273c5589e44fe6a3a4f8d7028 | [
"MIT"
] | null | null | null | """
The model atmosphere
====================
"""
import numpy as np
from artssat.atmosphere.cloud_box import CloudBox
from artssat.jacobian import JacobianBase
from artssat.retrieval import RetrievalBase, RetrievalQuantity
from artssat.atmosphere.catalogs import LineCatalog, Perrin
class TemperatureJacobian(JacobianBase):
def __init__(self,
quantity,
index,
p_grid = [],
lat_grid = [],
lon_grid = [],
hse = "on"):
super().__init__(quantity, index)
self.p_grid = p_grid
self.lat_grid = lat_grid
self.lon_grid = lon_grid
self.hse = hse
def _make_setup_kwargs(self, ws):
if self.p_grid.size == 0:
g1 = ws.p_grid
else:
g1 = self.p_grid
if self.lat_grid.size == 0:
g2 = ws.lat_grid
else:
g2 = self.lat_grid
if self.lon_grid.size == 0:
g3 = ws.lon_grid
else:
g3 = self.lon_grid
kwargs = {"g1" : g1, "g2" : g2, "g3" : g3,
"hse" : self.hse}
return kwargs
def setup(self, ws, data_provider, *args, **kwargs):
kwargs = self._make_setup_kwargs(ws)
ws.jacobianAddTemperature(**kwargs)
class TemperatureRetrieval(RetrievalBase, TemperatureJacobian):
def __init__(self,
quantity,
index,
p_grid = [],
lat_grid = [],
lon_grid = [],
hse = "on"):
RetrievalBase.__init__(self)
TemperatureJacobian.__init__(self, quantity, index,
p_grid, lat_grid, lon_grid, hse)
def add(self, ws):
ws.retrievalAddTemperature(**self._make_setup_kwargs(ws))
class Temperature(RetrievalQuantity):
def __init__(self, atmosphere):
super().__init__()
self.atmosphere = atmosphere
def get_data(self, ws, data_provider, *args, **kwargs):
t = data_provider.get_temperature(*args, **kwargs)
self.atmosphere.__check_dimensions__(t, "temperature")
ws.t_field = self.atmosphere.__reshape__(t)
def set_from_x(self, ws, xa):
x = self.transformation.invert(xa)
self.t_field = x
@property
def name(self):
return "temperature"
@property
def jacobian_class(self):
return TemperatureJacobian
@property
def retrieval_class(self):
return TemperatureRetrieval
class Atmosphere:
def __init__(self,
dimensions,
absorbers = [],
scatterers = [],
surface = None,
catalog = None):
self.__set_dimensions__(dimensions)
self._required_data = [("p_grid", dimensions[:1], False),
("temperature", dimensions, False),
("altitude", dimensions, False),
("surface_altitude", dimensions[1:], True)]
self.absorbers = absorbers
self.scatterers = scatterers
self.scattering = len(scatterers) > 0
self._dimensions = dimensions
self._cloud_box = CloudBox(n_dimensions = len(dimensions),
scattering = self.scattering)
self._surface_data_indices = []
self._surface = surface
self.temperature = Temperature(self)
if not surface is None:
nd = len(self._required_data)
self._required_data += surface.required_data
self.surface_data_indices = range(nd, len(self._required_data))
self._catalog = catalog
#
# Dimensions
#
def __set_dimensions__(self, dimensions):
if not type(dimensions) == tuple or not type(dimensions[0]) == int:
raise Exception("Dimensions of atmosphere must be given as a tuple "
"of integers.")
if not len(dimensions) in [1, 2, 3]:
raise Exception("The number of dimensions of the atmosphere "
"must be 1, 2 or 3.")
if not all([n >= 0 for n in dimensions]):
raise Exception("The dimension tuple must contain only positive "
"integers.")
else:
self._dimensions = dimensions
@property
def dimensions(self):
return self._dimensions
#
# Absorbers
#
@property
def absorbers(self):
return self._absorbers
@absorbers.setter
def absorbers(self, absorbers):
for a in absorbers:
self.__dict__[a.name] = a
self._required_data += [(a.name, self._dimensions, False)]
self._absorbers = absorbers
def add_absorber(self, absorber):
self.__dict__[absorber.name] = absorber
self._required_data += [(absorber.name, self._dimensions, False)]
self._absorbers += absorber
#
# Cloud box
#
@property
def cloud_box(self):
return self._cloud_box
#
# Catalog
#
@property
def catalog(self):
"""
Line catalog from which to read absorption line data.
"""
return self._catalog
@catalog.setter
def catalog(self, c):
if isinstance(c, LineCatalog) or c is None:
self._catalog = c
else:
raise ValueError("Line catalog must be of type LineCatalog.")
#
# Jacobian
#
def has_jacobian(self):
for a in self.absorbers:
if not a.jacobian is None:
return True
for b in self.scatterers:
for m in b.moments:
if not a.jacobian is None:
return True
#
# Scatterers
#
@property
def scatterers(self):
return self._scatterers
@scatterers.setter
def scatterers(self, scatterers):
if not type(scatterers) is list:
raise ValueError("The 'scatterers' property can only be set to a list.")
for s in scatterers:
self.__dict__[s.name] = s
self._required_data += [(n, self._dimensions, False) \
for n in s.moment_names]
self._scatterers = scatterers
self.scattering = True
self._cloud_box = CloudBox(n_dimensions = len(self.dimensions),
scattering = self.scattering)
def add_scatterer(self, scatterer):
self.__dict__[scatterer.name] = scatterer
self._required_data += [(n, self._dimensions, False) \
for n in scatterer.moment_names]
self._scatterers += [scatterer]
self.scattering = True
self._cloud_box = CloudBox(n_dimensions = len(self.dimensions),
scattering = self.scattering)
#
# Surface
#
@property
def surface(self):
return self._surface
@surface.setter
def set_surface(self, s):
if not self._surface is None:
rd = [d for i, d in enumerate(self._required_data) \
if i not in self._required_data_indices]
nd = len(rd)
rd += surface.required_data
self._required_data = rd
self._requried_data_indices = range(nd, len(rd))
self._surface = surface
@property
def required_data(self):
return self._required_data
#
# Setup
#
def __setup_absorption__(self, ws, sensors):
species = []
lineshapes = []
normalizations = []
cutoffs = []
for i, a in enumerate(self._absorbers):
a.setup(ws, i)
species += [a.get_tag_string()]
ws.abs_speciesSet(species = species)
# Set the line shape
if not self.catalog is None:
self.catalog.setup(ws, sensors)
ws.abs_lines_per_speciesCreateFromLines()
ws.abs_lines_per_speciesSetMirroring(option = "Same")
else:
for a in self._absorbers:
if a.from_catalog:
raise Exception("Absorber {} has from_catalog set to true "
"but no catalog is provided".format(a.name))
ws.abs_lines_per_speciesSetEmpty()
for i, a in enumerate(self._absorbers):
tag = a.get_tag_string()
cutoff = np.float32(a.cutoff)
cutoff_type = a.cutoff_type
#ws.abs_lines_per_speciesSetCutoffForSpecies(option = cutoff_type,
# value = cutoff,
# species_tag = tag)
lineshape = a.lineshape
ws.abs_lines_per_speciesSetLineShapeTypeForSpecies(option = lineshape,
species_tag = tag)
normalization = a.normalization
ws.abs_lines_per_speciesSetNormalizationForSpecies(option = normalization,
species_tag = tag)
ws.Copy(ws.abs_xsec_agenda, ws.abs_xsec_agenda__noCIA)
ws.Copy(ws.propmat_clearsky_agenda,
ws.propmat_clearsky_agenda__OnTheFly)
ws.lbl_checkedCalc()
def __setup_scattering__(self, ws):
ws.ScatSpeciesInit()
pb_names = []
for s in self._scatterers:
s.setup(ws, len(pb_names))
pb_names += s.moment_names
ws.particle_bulkprop_names = pb_names
def setup(self, ws, sensors):
if len(self.dimensions) == 1:
ws.AtmosphereSet1D()
if len(self.dimensions) == 2:
ws.AtmosphereSet2D()
if len(self.dimensions) == 3:
ws.AtmosphereSet3D()
self.__setup_absorption__(ws, sensors)
self.__setup_scattering__(ws)
self.surface.setup(ws)
self.cloud_box.setup(ws)
def setup_jacobian(self, ws):
for a in self.absorbers:
a.setup_jacobian(ws)
for s in self.scatterers:
for m in s.moments:
m.setup_jacobian(ws)
#
# Data
#
def __check_dimensions__(self, f, name):
s = f.shape
err = "Provided atmospheric " + name + " field"
err += " is inconsistent with the dimensions of the atmosphere."
if len(s) != len(self.dimensions):
raise Exception(err)
if not all([i == j or j == 0 for i,j \
in zip(s, self.dimensions)]):
raise Exception(err)
def __reshape__(self, f):
s = [1, 1, 1]
j = 0
for i in range(len(self.dimensions)):
if self.dimensions[0] > 0:
s[i] = self.dimensions[i]
else:
s[i] = f.shape[i]
return np.reshape(f, tuple(s))
def __get_pressure__(self, ws, provider, *args, **kwargs):
p = provider.get_pressure(*args, **kwargs).ravel()
if self.dimensions[0] != 0 and p.size != self.dimensions[0]:
raise Exception("Provided pressure grid is inconsistent with"
" dimensions of the atmosphere.")
ws.p_grid = p
def __get_altitude__(self, ws, provider, *args, **kwargs):
dimensions = ws.t_field.value.shape
z = provider.get_altitude(*args, **kwargs)
self.__check_dimensions__(z, "altitude")
z = self.__reshape__(z)
if not z.shape == dimensions:
raise Exception("Dimensions of altitude field inconsistent"
" with dimensions of temperature field.")
ws.z_field = z
# Surface altitude
dimensions = ws.t_field.value.shape
if hasattr(provider, "get_surface_altitude"):
zs = provider.get_surface_altitude(*args, **kwargs)
try:
zs = zs.reshape(dimensions[1:])
ws.z_surface = zs
except:
raise Exception("Shape " + str(zs.shape) + "of provided "
"surface altitude is inconsistent with "
"the horizontal dimensions of the "
"atmosphere " + str(dimensions) + ".")
else:
ws.z_surface = ws.z_field.value[0, :, :]
def __get_latitude__(self, ws, provider, *args, **kwargs):
if len(self.dimensions) > 1:
dimensions = ws.t_field.value.shape
lats = provider.get_latitude(*args, **kwargs)
ws.lat_grid = np.arange(lats.size)
ws.lat_true = lats
def __get_longitude__(self, ws, provider, *args, **kwargs):
if len(self.dimensions) > 1:
dimensions = ws.t_field.value.shape
lons = provider.get_longitude(*args, **kwargs)
ws.lon_true = lons
if len(self.dimensions) < 3:
ws.lon_grid = []
def __get_absorbers__(self, ws, provider, *args, **kwargs):
dimensions = ws.t_field.value.shape
ws.vmr_field = np.zeros((len(self.absorbers),) + dimensions)
for i, a in enumerate(self.absorbers):
if a.retrieval is None:
fname = "get_" + a.name
f = provider.__getattribute__(fname)
x = f(*args, **kwargs)
self.__check_dimensions__(x, a.name)
x = self.__reshape__(x)
if not x.shape == dimensions:
raise Exception("Dimensions of " + a.name + " VMR field "
"inconcistent with dimensions of temperature "
"field.")
ws.vmr_field.value[i, :, :, :] = x
i = 0
n_moments = sum([len(s.moment_names) for s in self.scatterers])
ws.particle_bulkprop_field = np.zeros(((n_moments,)
+ ws.t_field.value.shape))
def __get_scatterers__(self, ws, provider, *args, **kwargs):
dimensions = ws.t_field.value.shape
ws.cloudbox_on = 1
ws.cloudbox_limits = [0, dimensions[0] - 1,
0, dimensions[1] - 1,
0, dimensions[2] - 1]
#if not self.scatterers is None and len(self.scatterers) > 0:
# ws.cloudboxSetFullAtm()
for s in self.scatterers:
s.get_data(ws, provider, *args, **kwargs)
def get_data(self, ws, provider, *args, **kwargs):
self.__get_pressure__(ws, provider, *args, **kwargs)
self.temperature.get_data(ws, provider, *args, **kwargs)
self.__get_altitude__(ws, provider, *args, **kwargs)
self.__get_latitude__(ws, provider, *args, **kwargs)
self.__get_longitude__(ws, provider, *args, **kwargs)
self.__get_absorbers__(ws, provider, *args, **kwargs)
self.__get_scatterers__(ws, provider, *args, **kwargs)
self.cloud_box.get_data(ws, provider, *args, **kwargs)
self.surface.get_data(ws, provider, *args, **kwargs)
#
# Checks
#
def run_checks(self, ws):
ws.atmgeom_checkedCalc()
ws.atmfields_checkedCalc(bad_partition_functions_ok = 1)
ws.propmat_clearsky_agenda_checkedCalc()
ws.propmat_clearsky_agenda_checkedCalc()
ws.abs_xsec_agenda_checkedCalc()
self.cloud_box.run_checks(ws)
self.surface.run_checks(ws)
class Atmosphere1D(Atmosphere):
def __init__(self,
absorbers = [],
scatterers = [],
surface = None,
levels = None,
catalog = None):
if levels is None:
dimensions = (0,)
else:
if not type(levels) == int:
raise Exception("The number of levels of the 1D atmosphere "
"must be given by an integer.")
else:
dimensions = (level, )
super().__init__(dimensions,
absorbers = absorbers,
scatterers = scatterers,
surface = surface,
catalog = catalog)
class Atmosphere2D(Atmosphere):
def __init__(self,
absorbers=[],
scatterers=[],
surface=None,
levels=None,
catalog=None):
if levels is None:
dimensions = (0, 0)
super().__init__(dimensions,
absorbers=absorbers,
scatterers=scatterers,
surface=surface,
catalog=catalog)
| 31.378987 | 86 | 0.540269 |
89e7adf6f059ddc6e289252a52a737441d16d030 | 11,435 | py | Python | model/converted_condensenetv2.py | undol26/CondenseNetV2 | 9fb144ea8010a0e152b09b5224d2b343da3b624b | [
"MIT"
] | null | null | null | model/converted_condensenetv2.py | undol26/CondenseNetV2 | 9fb144ea8010a0e152b09b5224d2b343da3b624b | [
"MIT"
] | 2 | 2021-10-15T07:44:35.000Z | 2021-10-15T07:50:01.000Z | model/converted_condensenetv2.py | undol26/CondenseNetV2 | 9fb144ea8010a0e152b09b5224d2b343da3b624b | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from utils import Conv, CondenseLGC, CondenseSFR, HS, SELayer, ResNet
__all__ = ['ConvertedCondenseNetV2', 'converted_cdnv2_a', 'converted_cdnv2_b', 'converted_cdnv2_c', 'converted_cdnv2_d']
class _SFR_DenseLayer(nn.Module):
def __init__(self, in_channels, growth_rate, args, activation, use_se=False):
super(_SFR_DenseLayer, self).__init__()
self.group_1x1 = args.group_1x1
self.group_3x3 = args.group_3x3
self.group_trans = args.group_trans
self.use_se = use_se
### 1x1 conv i --> b*k
self.conv_1 = CondenseLGC(in_channels, args.bottleneck * growth_rate,
kernel_size=1, groups=self.group_1x1,
activation=activation)
### 3x3 conv b*k --> k
self.conv_2 = Conv(args.bottleneck * growth_rate, growth_rate,
kernel_size=3, padding=1, groups=self.group_3x3,
activation=activation)
### 1x1 res conv k(8-16-32)--> i (k*l)
self.sfr = CondenseSFR(growth_rate, in_channels, kernel_size=1,
groups=self.group_trans, activation=activation)
if self.use_se:
self.se = SELayer(inplanes=growth_rate, reduction=1)
def forward(self, x):
x_ = x
x = self.conv_1(x)
x = self.conv_2(x)
if self.use_se:
x = self.se(x)
sfr_feature = self.sfr(x)
y = x_ + sfr_feature
return torch.cat([y, x], 1)
class _SFR_DenseLayerLTDN(nn.Module):
def __init__(self, in_channels, growth_rate, path, args, activation, use_se=False):
super(_SFR_DenseLayerLTDN, self).__init__()
self.group_1x1 = args.group_1x1
self.group_3x3 = args.group_3x3
self.path = path
self.group_trans = args.group_trans
self.use_se = use_se
for i in range(path):
### 1x1 conv i --> b*k
layer1 = CondenseLGC(int(in_channels/path), int(args.bottleneck * growth_rate/path),
kernel_size=1, groups=self.group_1x1,
activation=activation)
self.add_module('path_%d%d' %((i + 1), 1), layer1)
### 3x3 conv b*k --> k
layer2 = Conv(int(args.bottleneck * growth_rate/path), int(growth_rate/path),
kernel_size=3, padding=1, groups=self.group_3x3,
activation=activation)
self.add_module('path_%d%d' % ((i + 1), 2), layer2)
### 1x1 res conv k(8-16-32)--> i (k*l)
layer3 = CondenseSFR(int(growth_rate/path), int(in_channels/path), kernel_size=1,
groups=self.group_trans, activation=activation)
self.add_module('path_%d%d' % ((i + 1), 3), layer3)
if self.use_se:
self.se = SELayer(inplanes=int(growth_rate/path), reduction=1)
def forward(self, x):
input_channels = int(x.shape[1])
path = self.path
num_input_part_channels = int(input_channels/path)
input_part = {}
output_part = {}
returnList = []
for i in range(path):
temp_input_part = x[:,i*num_input_part_channels:(i+1)*num_input_part_channels,:,:]
input_part['input_part{0}'.format(i+1)] = temp_input_part
output_part['output_part{0}'.format(i+1)] = eval(f'self.path_{i+1}{1}')(input_part['input_part{0}'.format(i+1)])
output_part['output_part{0}'.format(i+1)] = eval(f'self.path_{i+1}{2}')(output_part['output_part{0}'.format(i+1)])
if self.use_se:
output_part['output_part{0}'.format(i+1)] = self.se(output_part['output_part{0}'.format(i+1)])
input_part['input_part{0}'.format(i+1)] = input_part['input_part{0}'.format(i+1)] + \
eval(f'self.path_{i+1}{3}')(output_part['output_part{0}'.format(i+1)])
for i in range(path):
if i%2==0:
returnList.append(input_part['input_part{0}'.format(i+1)])
returnList.append(output_part['output_part{0}'.format(i+2)])
returnList.append(input_part['input_part{0}'.format(i+2)])
returnList.append(output_part['output_part{0}'.format(i+1)])
return torch.cat(returnList, 1)
class _SFR_DenseBlock(nn.Sequential):
def __init__(self, num_layers, in_channels, growth_rate, path, args, activation, use_se):
super(_SFR_DenseBlock, self).__init__()
if args.ltdn_model:
for i in range(num_layers):
layer = _SFR_DenseLayerLTDN(in_channels + i * growth_rate, growth_rate, path, args, activation, use_se)
self.add_module('denselayer_%d' % (i + 1), layer)
else:
for i in range(num_layers):
layer = _SFR_DenseLayer(in_channels + i * growth_rate, growth_rate, args, activation, use_se)
self.add_module('denselayer_%d' % (i + 1), layer)
class _Transition(nn.Module):
def __init__(self):
super(_Transition, self).__init__()
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = self.pool(x)
return x
class ConvertedCondenseNetV2(nn.Module):
def __init__(self, args):
super(ConvertedCondenseNetV2, self).__init__()
self.stages = args.stages
self.growth = args.growth
self.paths = args.paths
assert len(self.stages) == len(self.growth)
self.args = args
self.progress = 0.0
if args.dataset in ['cifar10', 'cifar100']:
self.init_stride = 1
self.pool_size = 8
else:
self.init_stride = 2
self.pool_size = 7
self.features = nn.Sequential()
### Initial nChannels should be 3
self.num_features = 2 * self.growth[0]
### Dense-block 1 (224x224)
self.features.add_module('init_conv', nn.Conv2d(3, self.num_features,
kernel_size=3,
stride=self.init_stride,
padding=1,
bias=False))
if args.ltdn_model:
resnet = ResNet(int(self.num_features/2), int(self.num_features/2),
kernel_size=[1,3,1])
self.features.add_module('resnet', resnet)
for i in range(len(self.stages)):
activation = 'HS' if i >= args.HS_start_block else 'ReLU'
use_se = True if i >= args.SE_start_block else False
### Dense-block i
self.add_block(i, activation, use_se)
self.fc = nn.Linear(self.num_features, args.fc_channel)
if not args.ltdn_model:
self.fc_act = HS()
### Classifier layer
self.classifier = nn.Linear(args.fc_channel, args.num_classes)
self._initialize()
def add_block(self, i, activation, use_se):
### Check if ith is the last one
last = (i == len(self.stages) - 1)
block = _SFR_DenseBlock(
num_layers=self.stages[i],
in_channels=self.num_features,
growth_rate=self.growth[i],
path=self.paths[i],
args=self.args,
activation=activation,
use_se=use_se,
)
self.features.add_module('denseblock_%d' % (i + 1), block)
self.num_features += self.stages[i] * self.growth[i]
print('DenseBlock {} output channel {}'.format(i, self.num_features))
if not last:
trans = _Transition()
self.features.add_module('transition_%d' % (i + 1), trans)
else:
self.features.add_module('norm_last',
nn.BatchNorm2d(self.num_features))
self.features.add_module('relu_last',
nn.ReLU(inplace=True))
self.features.add_module('pool_last',
nn.AvgPool2d(self.pool_size))
if use_se:
self.features.add_module('se_last',
SELayer(self.num_features, reduction=self.args.last_se_reduction))
def forward(self, x):
features = self.features(x)
out = features.view(features.size(0), -1)
out = self.fc(out)
if not self.args.ltdn_model:
out = self.fc_act(out)
out = self.classifier(out)
return out
def _initialize(self):
### initialize
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def converted_cdnv2_a(args):
args.stages = '1-1-4-6-8'
args.growth = '8-8-16-32-64'
print('Stages: {}, Growth: {}'.format(args.stages, args.growth))
args.stages = list(map(int, args.stages.split('-')))
args.growth = list(map(int, args.growth.split('-')))
args.condense_factor = 8
args.trans_factor = 8
args.group_1x1 = 8
args.group_3x3 = 8
args.group_trans = 8
args.bottleneck = 4
args.last_se_reduction = 16
args.HS_start_block = 2
args.SE_start_block = 3
args.fc_channel = 828
return ConvertedCondenseNetV2(args)
def converted_cdnv2_b(args):
args.stages = '2-4-6-8-6'
args.growth = '6-12-24-48-96'
print('Stages: {}, Growth: {}'.format(args.stages, args.growth))
args.stages = list(map(int, args.stages.split('-')))
args.growth = list(map(int, args.growth.split('-')))
args.condense_factor = 6
args.trans_factor = 6
args.group_1x1 = 6
args.group_3x3 = 6
args.group_trans = 6
args.bottleneck = 4
args.last_se_reduction = 16
args.HS_start_block = 2
args.SE_start_block = 3
args.fc_channel = 1024
return ConvertedCondenseNetV2(args)
def converted_cdnv2_c(args):
args.stages = '4-6-8-10-8'
args.growth = '8-16-32-64-128'
args.stages = list(map(int, args.stages.split('-')))
args.growth = list(map(int, args.growth.split('-')))
args.condense_factor = 8
args.trans_factor = 8
args.group_1x1 = 8
args.group_3x3 = 8
args.group_trans = 8
args.bottleneck = 4
args.last_se_reduction = 16
args.HS_start_block = 2
args.SE_start_block = 3
args.fc_channel = 1024
return ConvertedCondenseNetV2(args)
def converted_cdnv2_d(args):
args.stages = '4-5-6'
args.growth = '8-16-32' # 2/4/
print('Stages: {}, Growth: {}'.format(args.stages, args.growth))
args.stages = list(map(int, args.stages.split('-')))
args.growth = list(map(int, args.growth.split('-')))
args.paths = list(map(int, args.paths.split('-')))
args.condense_factor = 4
args.trans_factor = 4
args.group_1x1 = 4
args.group_3x3 = 4
args.group_trans = 4
args.bottleneck = 4
# args.last_se_reduction = 16
args.HS_start_block = 10
args.SE_start_block = 10
args.fc_channel = 1024
return ConvertedCondenseNetV2(args)
| 39.431034 | 126 | 0.570529 |
b2726e263a14fa5cb0d5df92ef55266b383cf804 | 354 | py | Python | solutions/2017/prob_12.py | PolPtoAmo/HPCodeWarsBCN | 8a98b1feb6d8b7d2d5b8b4dace3e02af9e6bb4e8 | [
"MIT"
] | 1 | 2021-02-27T09:46:06.000Z | 2021-02-27T09:46:06.000Z | solutions/2017/prob_12.py | PolPtoAmo/HPCodeWarsBCN | 8a98b1feb6d8b7d2d5b8b4dace3e02af9e6bb4e8 | [
"MIT"
] | null | null | null | solutions/2017/prob_12.py | PolPtoAmo/HPCodeWarsBCN | 8a98b1feb6d8b7d2d5b8b4dace3e02af9e6bb4e8 | [
"MIT"
] | 1 | 2021-02-27T12:03:33.000Z | 2021-02-27T12:03:33.000Z | import datetime
entrada = input()
while entrada != '#':
nombre, salida, final = entrada.split()
salida = float(salida)
final = float(final)
salidaseg = datetime.datetime.fromtimestamp(salida).isoformat()
finalseg = datetime.datetime.fromtimestamp(final).isoformat()
tiempo = int(salidaseg)-(finalseg)
print(nombre + tiempo)
| 27.230769 | 67 | 0.694915 |
f5e89bd15f104b59977c2640e56767481e56f1eb | 4,530 | py | Python | test/unit/driver/test_docker.py | skyscooby/molecule | a0b8127f14d34360f490b9c3e7518c89beb50677 | [
"MIT"
] | null | null | null | test/unit/driver/test_docker.py | skyscooby/molecule | a0b8127f14d34360f490b9c3e7518c89beb50677 | [
"MIT"
] | null | null | null | test/unit/driver/test_docker.py | skyscooby/molecule | a0b8127f14d34360f490b9c3e7518c89beb50677 | [
"MIT"
] | 1 | 2021-04-26T19:47:39.000Z | 2021-04-26T19:47:39.000Z | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import pytest
from molecule import config
from molecule.driver import docker
@pytest.fixture
def _instance(config_instance):
return docker.Docker(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_testinfra_options_property(_instance):
assert {
'connection': 'ansible',
'ansible-inventory': _instance._config.provisioner.inventory_file
} == _instance.testinfra_options
def test_name_property(_instance):
assert 'docker' == _instance.name
def test_options_property(_instance):
x = {'managed': True}
assert x == _instance.options
def test_login_cmd_template_property(_instance):
x = ('docker exec '
'-e COLUMNS={columns} '
'-e LINES={lines} '
'-e TERM=bash '
'-e TERM=xterm '
'-ti {instance} bash')
assert x == _instance.login_cmd_template
def test_safe_files_property(_instance):
x = [
os.path.join(_instance._config.scenario.ephemeral_directory,
'Dockerfile')
]
assert x == _instance.safe_files
def test_default_safe_files_property(_instance):
x = [
os.path.join(_instance._config.scenario.ephemeral_directory,
'Dockerfile')
]
assert x == _instance.default_safe_files
def test_delegated_property(_instance):
assert not _instance.delegated
def test_managed_property(_instance):
assert _instance.managed
def test_default_ssh_connection_options_property(_instance):
assert [] == _instance.default_ssh_connection_options
def test_login_options(_instance):
assert {'instance': 'foo'} == _instance.login_options('foo')
def test_ansible_connection_options(_instance):
x = {'ansible_connection': 'docker'}
assert x == _instance.ansible_connection_options('foo')
def test_instance_config_property(_instance):
x = os.path.join(_instance._config.scenario.ephemeral_directory,
'instance_config.yml')
assert x == _instance.instance_config
def test_ssh_connection_options_property(_instance):
assert [] == _instance.ssh_connection_options
def test_status(_instance):
result = _instance.status()
assert 2 == len(result)
assert result[0].instance_name == 'instance-1'
assert result[0].driver_name == 'docker'
assert result[0].provisioner_name == 'ansible'
assert result[0].scenario_name == 'default'
assert result[0].created == 'false'
assert result[0].converged == 'false'
assert result[1].instance_name == 'instance-2'
assert result[1].driver_name == 'docker'
assert result[1].provisioner_name == 'ansible'
assert result[1].scenario_name == 'default'
assert result[1].created == 'false'
assert result[1].converged == 'false'
def test_created(_instance):
assert 'false' == _instance._created()
def test_converged(_instance):
assert 'false' == _instance._converged()
def test_sanity_checks_missing_docker_dependency(mocker, _instance):
try:
# ansible >= 2.8
target = 'ansible.module_utils.docker.common.HAS_DOCKER_PY'
mocker.patch(target, False)
except ImportError:
# ansible < 2.8
target = 'ansible.module_utils.docker_common.HAS_DOCKER_PY'
mocker.patch(target, False)
with pytest.raises(SystemExit):
_instance.sanity_checks()
| 28.853503 | 79 | 0.715453 |
ed71bf9a2b9f856c36f934e656577ad61df8d29e | 2,831 | py | Python | software/cl_reachy/cl_reachy/view/sound/threshold/threshold.py | wisehackermonkey/CoLab-Reachy | 538f3e6d6f43f87cdf2c91e97229b20a7fb490a9 | [
"CC0-1.0"
] | 3 | 2021-03-28T19:43:57.000Z | 2021-12-19T04:42:02.000Z | software/cl_reachy/cl_reachy/view/sound/threshold/threshold.py | wisehackermonkey/CoLab-Reachy | 538f3e6d6f43f87cdf2c91e97229b20a7fb490a9 | [
"CC0-1.0"
] | 7 | 2020-09-23T00:33:53.000Z | 2021-08-11T03:15:43.000Z | software/cl_reachy/cl_reachy/view/sound/threshold/threshold.py | wisehackermonkey/CoLab-Reachy | 538f3e6d6f43f87cdf2c91e97229b20a7fb490a9 | [
"CC0-1.0"
] | 1 | 2021-01-21T02:32:15.000Z | 2021-01-21T02:32:15.000Z | import time
from ....node import NodeBase
from .meter import ThresholdMeter
from ....model.messages import AudioInputStateMessage, ThresholdStartMessage
class Threshold(NodeBase):
def __init__(self, node_name="audioinput", host="127.0.0.1", port=1883,
username=None, password=None, subscribe_dict={}, run_sleep=0.1,
profile="reachy", threshold="+500"):
super().__init__(node_name, host, port, username, password, subscribe_dict, run_sleep)
self.profile = profile
self.threshold = threshold
self.threshold_meter = None
self.add_subscribe('+/threshold/start', self.handle_threshold_start)
self.add_subscribe('+/threshold/stop', self.handle_threshold_stop)
@property
def is_busy(self):
return (self.threshold_meter is not None)
def node_init(self):
if self.is_busy:
self.stop()
super().node_init()
def publish_state(self):
msg = AudioInputStateMessage(is_busy=self.is_busy, listener="threshold")
self.publish("threshold/threshold/state", msg.to_json())
def make_sigint_handler(self):
def sigint_handler(signum, frame):
self.threshold_meter.stop()
return sigint_handler
def completed(self):
self.threshold_meter = None
self.publish_state()
def handle_threshold_start(self, client, userdata, message):
print("###handle_threshold_start - 1")
_message = str(message.payload.decode("utf-8"))
print("###handle_threshold_start - 2 - _message: ", _message)
threshold_start_msg = ThresholdStartMessage.from_json(_message)
if self.is_busy:
print("###handle_threshold_start - 3")
# force stop
self.stop()
while self.is_busy:
time.sleep(0.1)
print("###handle_threshold_start - 4")
self.threshold_meter = ThresholdMeter(action="exec-stop", threshold=self.threshold,
num=int(threshold_start_msg.num), publish=self.publish, verbose=True,
profile=self.profile)
print("###handle_threshold_start - 5")
self.threshold_meter.start(final_callback=self.completed)
print("###handle_threshold_start - 6")
def handle_threshold_stop(self, client, userdata, message):
self.threshold_meter.stop()
def stop(self):
if self.threshold_meter is not None:
self.threshold_meter.graceful()
while self.threshold_meter is not None and self.threshold_meter.is_running:
# wait for threshold_meter to stop running
time.sleep(1)
def handle_quit(self, command_input=None):
self.stop()
super().handle_quit(command_input)
| 32.918605 | 105 | 0.636877 |
305ef1176b55be054dbe3bd9152ca08402a168a5 | 1,311 | py | Python | turtlebot2/kobuki_base/kobuki_ros/kobuki_node/launch/kobuki_node-composed-launch.py | RoboticsLabURJC/2021-tfg-carlos-caminero | e23991616cb971b9a0bd95b653789c54f571a930 | [
"Apache-2.0"
] | null | null | null | turtlebot2/kobuki_base/kobuki_ros/kobuki_node/launch/kobuki_node-composed-launch.py | RoboticsLabURJC/2021-tfg-carlos-caminero | e23991616cb971b9a0bd95b653789c54f571a930 | [
"Apache-2.0"
] | null | null | null | turtlebot2/kobuki_base/kobuki_ros/kobuki_node/launch/kobuki_node-composed-launch.py | RoboticsLabURJC/2021-tfg-carlos-caminero | e23991616cb971b9a0bd95b653789c54f571a930 | [
"Apache-2.0"
] | null | null | null | import os
import ament_index_python.packages
from launch import LaunchDescription
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
import yaml
def generate_launch_description():
share_dir = ament_index_python.packages.get_package_share_directory('kobuki_node')
# Passing parameters to a composed node must be done via a dictionary of
# key -> value pairs. Here we read in the data from the configuration file
# and create a dictionary of it that the ComposableNode will accept.
params_file = os.path.join(share_dir, 'config', 'kobuki_node_params.yaml')
with open(params_file, 'r') as f:
params = yaml.safe_load(f)['kobuki_ros_node']['ros__parameters']
container = ComposableNodeContainer(
node_name='kobuki_node_container',
node_namespace='',
package='rclcpp_components',
executable='component_container',
composable_node_descriptions=[
ComposableNode(
package='kobuki_node',
node_plugin='kobuki_node::KobukiRos',
node_name='kobuki_ros_node',
parameters=[params]),
],
output='both',
)
return LaunchDescription([container])
| 36.416667 | 86 | 0.674294 |
402c77e8b598c14e724790f1b70df3c8088c859a | 438 | py | Python | QRSMS/actor/urls.py | Srishti-Ahuja/QRSMS-V1 | 1f2fa82e8ddaeb62e633fcd6a136696355317bba | [
"Apache-2.0"
] | 4 | 2020-06-16T09:42:20.000Z | 2021-11-24T08:18:16.000Z | QRSMS/actor/urls.py | Srishti-Ahuja/QRSMS-V1 | 1f2fa82e8ddaeb62e633fcd6a136696355317bba | [
"Apache-2.0"
] | 7 | 2021-04-08T21:57:34.000Z | 2022-02-27T06:41:15.000Z | QRSMS/actor/urls.py | Srishti-Ahuja/QRSMS-V1 | 1f2fa82e8ddaeb62e633fcd6a136696355317bba | [
"Apache-2.0"
] | 7 | 2020-11-29T09:45:44.000Z | 2022-03-30T15:27:33.000Z | from django.urls import path, include
from rest_framework import routers
from . import api
from . import views
from . import models
actor = 'actor'
router = routers.DefaultRouter()
router.register('employee', api.EmployeeViewSet, basename='employee')
urlpatterns = [
# urls for Django Rest Framework API
# path('api/', include(router.urls)),
]
urlpatterns += [
path('temp_login', views.temp_login, name="temp_login"),
]
| 19.909091 | 69 | 0.721461 |
8056ef02d82e3273ca9e5ee1a0f98da83a92d57b | 8,190 | py | Python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/default_security_rules_operations.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/default_security_rules_operations.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/default_security_rules_operations.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class DefaultSecurityRulesOperations(object):
"""DefaultSecurityRulesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01"
self.config = config
def list(
self, resource_group_name, network_security_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all default security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of SecurityRule
:rtype:
~azure.mgmt.network.v2017_09_01.models.SecurityRulePaged[~azure.mgmt.network.v2017_09_01.models.SecurityRule]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SecurityRulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SecurityRulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules'}
def get(
self, resource_group_name, network_security_group_name, default_security_rule_name, custom_headers=None, raw=False, **operation_config):
"""Get the specified default network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param default_security_rule_name: The name of the default security
rule.
:type default_security_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SecurityRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_09_01.models.SecurityRule or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'defaultSecurityRuleName': self._serialize.url("default_security_rule_name", default_security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules/{defaultSecurityRuleName}'}
| 46.271186 | 220 | 0.668498 |
c6faf72afdf177c0a1d77b1bd2e39bc76f7fb5e7 | 7,349 | py | Python | bentoml/_internal/frameworks/torchscript.py | matheusMoreno/BentoML | 4c139142fae486ba1ccf6b24e89505c030e3df3f | [
"Apache-2.0"
] | null | null | null | bentoml/_internal/frameworks/torchscript.py | matheusMoreno/BentoML | 4c139142fae486ba1ccf6b24e89505c030e3df3f | [
"Apache-2.0"
] | null | null | null | bentoml/_internal/frameworks/torchscript.py | matheusMoreno/BentoML | 4c139142fae486ba1ccf6b24e89505c030e3df3f | [
"Apache-2.0"
] | null | null | null | import typing as t
import logging
from typing import TYPE_CHECKING
from simple_di import inject
from simple_di import Provide
import bentoml
from bentoml import Tag
from ..models import PT_EXT
from ..models import SAVE_NAMESPACE
from ..utils.pkg import get_pkg_version
from ...exceptions import BentoMLException
from .common.pytorch import torch
from .common.pytorch import BasePyTorchRunner
from ..configuration.containers import BentoMLContainer
if TYPE_CHECKING:
from ..models import ModelStore
MODULE_NAME = "bentoml.torchscript"
logger = logging.getLogger(__name__)
@inject
def load(
tag: t.Union[Tag, str],
device_id: t.Optional[str] = "cpu",
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> "torch.jit.ScriptModule":
"""
Load a model from BentoML local modelstore with given name.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
device_id (:code:`str`, `optional`, default to :code:`cpu`):
Optional devices to put the given model on. Refers to `device attributes <https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.device>`_.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`torch.jit.ScriptModule`: an instance of :code:`torch.jit.ScriptModule` from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
model = bentoml.torchscript.load('lit_classifier:latest', device_id="cuda:0")
""" # noqa
bentoml_model = model_store.get(tag)
if bentoml_model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {tag} was saved with module {bentoml_model.info.module}, failed loading with {MODULE_NAME}."
)
weight_file = bentoml_model.path_of(f"{SAVE_NAMESPACE}{PT_EXT}")
model_format = bentoml_model.info.context.get("model_format")
# backward compatibility
if not model_format:
model_format = "torchscript:v1"
if model_format == "torchscript:v1":
model: "torch.jit.ScriptModule" = torch.jit.load(weight_file, map_location=device_id) # type: ignore[reportPrivateImportUsage] # noqa: LN001
else:
raise BentoMLException(f"Unknown model format {model_format}")
return model
@inject
def save(
name: str,
model: "torch.jit.ScriptModule",
*,
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
metadata: t.Union[None, t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:code:`torch.jit.ScriptModule`):
Instance of model to be saved
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import torch
import bentoml
class NGramLanguageModeler(nn.Module):
def __init__(self, vocab_size, embedding_dim, context_size):
super(NGramLanguageModeler, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
log_probs = F.log_softmax(out, dim=1)
return log_probs
_model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
model = torch.jit.script(_model)
tag = bentoml.torchscript.save("ngrams", model)
# example tag: ngrams:20201012_DE43A2
Integration with Torch Hub and BentoML:
.. code-block:: python
import torch
import bentoml
resnet50 = torch.hub.load("pytorch/vision", "resnet50", pretrained=True)
...
# trained a custom resnet50
tag = bentoml.torchscript.save("resnet50", resnet50)
""" # noqa
context: t.Dict[str, t.Any] = {
"framework_name": "torch",
"pip_dependencies": [f"torch=={get_pkg_version('torch')}"],
}
with bentoml.models.create(
name,
module=MODULE_NAME,
options=None,
context=context,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
) as _model:
weight_file = _model.path_of(f"{SAVE_NAMESPACE}{PT_EXT}")
_model.info.context["model_format"] = "torchscript:v1"
torch.jit.save(model, weight_file) # type: ignore[reportUnknownMemberType]
return _model.tag
class _TorchScriptRunner(BasePyTorchRunner):
def _load_model(self):
return load(self._tag, device_id=self._device_id, model_store=self.model_store)
def load_runner(
tag: t.Union[str, Tag],
*,
predict_fn_name: str = "__call__",
partial_kwargs: t.Optional[t.Dict[str, t.Any]] = None,
name: t.Optional[str] = None,
) -> "_TorchScriptRunner":
"""
Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. `bentoml.torchscript.load_runner` implements a Runner class that
wrap around a pytorch instance, which optimize it for the BentoML runtime.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
predict_fn_name (:code:`str`, default to :code:`__call__`):
inference function to be used.
partial_kwargs (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Common kwargs passed to model for this runner
Returns:
:obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.torchscript` model
Examples:
.. code-block:: python
import bentoml
import pandas as pd
runner = bentoml.torchscript.load_runner("ngrams:latest")
runner.run(pd.DataFrame("/path/to/csv"))
"""
return _TorchScriptRunner(
tag=tag,
predict_fn_name=predict_fn_name,
partial_kwargs=partial_kwargs,
name=name,
)
| 35.162679 | 163 | 0.658049 |
c12c73e90bf8ae23c594c1ebe9419aaac1ff68cf | 3,465 | py | Python | functions/analytics_consumer/test/test_main.py | epiphone/lambda-terraform-analytics | b569ee6a7bcb56c8c3c41b875edd4859350010ee | [
"MIT"
] | null | null | null | functions/analytics_consumer/test/test_main.py | epiphone/lambda-terraform-analytics | b569ee6a7bcb56c8c3c41b875edd4859350010ee | [
"MIT"
] | null | null | null | functions/analytics_consumer/test/test_main.py | epiphone/lambda-terraform-analytics | b569ee6a7bcb56c8c3c41b875edd4859350010ee | [
"MIT"
] | null | null | null | import json
import os
from uuid import uuid4
import boto3
from moto import mock_lambda, mock_sqs
import pytest
os.environ['WORKER_LAMBDA_ARN'] = 'test-worker-lambda-arn'
@pytest.fixture
def queue():
"""
Return a mocked SQS Queue object, save its URL to an environment variable.
"""
mock_sqs().start()
queue_url = boto3.client('sqs').create_queue(QueueName='test')['QueueUrl']
os.environ['SQS_URL'] = queue_url
yield boto3.resource('sqs').Queue(queue_url)
mock_sqs().stop()
@pytest.fixture
def handler(lambda_context):
"""
Return a function to invoke the main lambda handler with a mock context.
"""
import main
return lambda: main.main({}, lambda_context)
def test_use_fake_aws_credentials(queue):
import main
creds = main.boto3.DEFAULT_SESSION.get_credentials()
assert creds.access_key == 'xxx'
assert creds.secret_key == 'xxx'
def test_no_msgs_in_queue(mocker, handler, queue):
mock_invoke = mocker.patch('main.invoke')
assert handler() == {'processed': 0}
mock_invoke.assert_not_called()
msgs_left = queue.receive_messages()
assert msgs_left == []
def test_single_receive_batch(mocker, handler, queue, event):
e1, e2 = event(), event()
queue.send_message(MessageBody=json.dumps({'Message': json.dumps(e1)}))
queue.send_message(MessageBody=json.dumps({'Message': json.dumps(e2)}))
mock_invoke = mocker.patch('main.invoke')
assert handler() == {'processed': 2}
mock_invoke.assert_called_once_with(
FunctionName=os.environ['WORKER_LAMBDA_ARN'],
InvocationType='Event',
Payload=json.dumps([e1, e2]))
msgs_left = queue.receive_messages()
assert msgs_left == []
@mock_lambda
def test_multiple_receive_batches(mocker, handler, queue, event):
n = 87
batch_size = 10
batches_n = n // batch_size + 1
batches = [[event()
for _ in range(i * 10, min(n, (i + 1) * 10))]
for i in range(batches_n)]
for batch in batches:
queue.send_messages(Entries=[{
'Id': e['event_id'],
'MessageBody': json.dumps({
'Message': json.dumps(e)
})
} for e in batch])
mock_invoke = mocker.patch('main.invoke')
assert handler() == {'processed': n}
calls = [
mocker.call(
FunctionName=os.environ['WORKER_LAMBDA_ARN'],
InvocationType='Event',
Payload=json.dumps(batch)) for batch in batches
]
mock_invoke.assert_has_calls(calls)
msgs_left = queue.receive_messages()
assert msgs_left == []
def test_deduplicate(mocker, handler, queue, event):
events = [event() for _ in range(7)]
# Mock a SQS queue with 2 duplicates among event messages:
class MockSQSMessage:
def __init__(self, event):
self.body = json.dumps({'Message': json.dumps(event)})
self.message_id = event['event_id']
self.receipt_handle = str(uuid4())
msgs = [MockSQSMessage(e) for e in events]
msgs.insert(2, MockSQSMessage(events[1]))
msgs.insert(6, MockSQSMessage(events[3]))
mocker.patch('main.sqs').receive_messages.side_effect = [msgs, []]
mock_invoke = mocker.patch('main.invoke')
assert handler() == {'processed': len(events)}
mock_invoke.assert_called_once_with(
FunctionName=os.environ['WORKER_LAMBDA_ARN'],
InvocationType='Event',
Payload=json.dumps(events))
| 29.117647 | 78 | 0.650794 |
d276dc6311a91141103791545e82b9ef0246c8e5 | 9,806 | py | Python | test/functional/rpc_createmultisig.py | litecoin-foundation/litecoin | de61fa1580d0465edb16251a4db5267f6b1cd047 | [
"MIT"
] | 28 | 2020-10-02T23:18:59.000Z | 2022-02-21T02:49:42.000Z | test/functional/rpc_createmultisig.py | litecoin-foundation/litecoin | de61fa1580d0465edb16251a4db5267f6b1cd047 | [
"MIT"
] | 14 | 2020-10-01T11:23:36.000Z | 2021-07-02T04:40:31.000Z | test/functional/rpc_createmultisig.py | litecoin-foundation/litecoin | de61fa1580d0465edb16251a4db5267f6b1cd047 | [
"MIT"
] | 9 | 2020-10-06T22:11:10.000Z | 2022-01-04T12:09:01.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multisig RPCs"""
import binascii
import decimal
import itertools
import json
import os
from test_framework.authproxy import JSONRPCException
from test_framework.descriptors import descsum_create, drop_origins
from test_framework.key import ECPubKey, ECKey
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
)
from test_framework.wallet_util import bytes_to_wif
class RpcCreateMultiSigTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_keys(self):
self.pub = []
self.priv = []
node0, node1, node2 = self.nodes
for _ in range(self.nkeys):
k = ECKey()
k.generate()
self.pub.append(k.get_pubkey().get_bytes().hex())
self.priv.append(bytes_to_wif(k.get_bytes(), k.is_compressed))
self.final = node2.getnewaddress()
def run_test(self):
node0, node1, node2 = self.nodes
self.check_addmultisigaddress_errors()
self.log.info('Generating blocks ...')
node0.generate(149)
self.sync_all()
self.moved = 0
for self.nkeys in [3, 5]:
for self.nsigs in [2, 3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
# Test mixed compressed and uncompressed pubkeys
self.log.info('Mixed compressed and uncompressed multisigs are not allowed')
pk0 = node0.getaddressinfo(node0.getnewaddress())['pubkey']
pk1 = node1.getaddressinfo(node1.getnewaddress())['pubkey']
pk2 = node2.getaddressinfo(node2.getnewaddress())['pubkey']
# decompress pk2
pk_obj = ECPubKey()
pk_obj.set(binascii.unhexlify(pk2))
pk_obj.compressed = False
pk2 = binascii.hexlify(pk_obj.get_bytes()).decode()
node0.createwallet(wallet_name='wmulti0', disable_private_keys=True)
wmulti0 = node0.get_wallet_rpc('wmulti0')
# Check all permutations of keys because order matters apparently
for keys in itertools.permutations([pk0, pk1, pk2]):
# Results should be the same as this legacy one
legacy_addr = node0.createmultisig(2, keys, 'legacy')['address']
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'legacy')['address'])
# Generate addresses with the segwit types. These should all make legacy addresses
assert_equal(legacy_addr, wmulti0.createmultisig(2, keys, 'bech32')['address'])
assert_equal(legacy_addr, wmulti0.createmultisig(2, keys, 'p2sh-segwit')['address'])
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'bech32')['address'])
assert_equal(legacy_addr, wmulti0.addmultisigaddress(2, keys, '', 'p2sh-segwit')['address'])
self.log.info('Testing sortedmulti descriptors with BIP 67 test vectors')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_bip67.json'), encoding='utf-8') as f:
vectors = json.load(f)
for t in vectors:
key_str = ','.join(t['keys'])
desc = descsum_create('sh(sortedmulti(2,{}))'.format(key_str))
assert_equal(self.nodes[0].deriveaddresses(desc)[0], t['address'])
sorted_key_str = ','.join(t['sorted_keys'])
sorted_key_desc = descsum_create('sh(multi(2,{}))'.format(sorted_key_str))
assert_equal(self.nodes[0].deriveaddresses(sorted_key_desc)[0], t['address'])
def check_addmultisigaddress_errors(self):
if self.options.descriptors:
return
self.log.info('Check that addmultisigaddress fails when the private keys are missing')
addresses = [self.nodes[1].getnewaddress(address_type='legacy') for _ in range(2)]
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
for a in addresses:
# Importing all addresses should not change the result
self.nodes[0].importaddress(a)
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
def checkbalances(self):
node0, node1, node2 = self.nodes
node0.generate(100)
self.sync_all()
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149 * 50 + (height - 149 - 100) * 25
assert bal1 == 0
assert bal2 == self.moved
assert bal0 + bal1 + bal2 == total
def do_multisig(self):
node0, node1, node2 = self.nodes
if 'wmulti' not in node1.listwallets():
try:
node1.loadwallet('wmulti')
except JSONRPCException as e:
path = os.path.join(self.options.tmpdir, "node1", "regtest", "wallets", "wmulti")
if e.error['code'] == -18 and "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path) in e.error['message']:
node1.createwallet(wallet_name='wmulti', disable_private_keys=True)
else:
raise
wmulti = node1.get_wallet_rpc('wmulti')
# Construct the expected descriptor
desc = 'multi({},{})'.format(self.nsigs, ','.join(self.pub))
if self.output_type == 'legacy':
desc = 'sh({})'.format(desc)
elif self.output_type == 'p2sh-segwit':
desc = 'sh(wsh({}))'.format(desc)
elif self.output_type == 'bech32':
desc = 'wsh({})'.format(desc)
desc = descsum_create(desc)
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
assert_equal(desc, msig['descriptor'])
if self.output_type == 'bech32':
assert madd[0:4] == "rltc" # actually a bech32 address
# compare against addmultisigaddress
msigw = wmulti.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
assert_equal(desc, drop_origins(msigw['descriptor']))
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd in v["scriptPubKey"].get("addresses", [])]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
prevtx_err = dict(prevtxs[0])
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "Missing redeemScript/witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# if witnessScript specified, all ok
prevtx_err["witnessScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# both specified, also ok
prevtx_err["redeemScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript mismatch to witnessScript
prevtx_err["redeemScript"] = "6a" # OP_RETURN
assert_raises_rpc_error(-8, "redeemScript does not correspond to witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript does not match scriptPubKey
del prevtx_err["witnessScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# witnessScript does not match scriptPubKey
prevtx_err["witnessScript"] = prevtx_err["redeemScript"]
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs - 1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], 0)
blk = node0.generate(1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
wmulti.unloadwallet()
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
| 44.171171 | 174 | 0.646033 |
569aa5ae4cfd2855a489ecab0bd1854c51830f21 | 837 | py | Python | jazzband/tasks.py | tipabu/jazzband-website | 30102e87348924eb56b610e74609a3475d3a14de | [
"MIT"
] | null | null | null | jazzband/tasks.py | tipabu/jazzband-website | 30102e87348924eb56b610e74609a3475d3a14de | [
"MIT"
] | null | null | null | jazzband/tasks.py | tipabu/jazzband-website | 30102e87348924eb56b610e74609a3475d3a14de | [
"MIT"
] | null | null | null | import redis
from spinach import signals
from spinach.contrib.flask_spinach import Spinach
from spinach.brokers.redis import recommended_socket_opts, RedisBroker
from .account import github
from .members.tasks import tasks as member_tasks
from .projects.tasks import tasks as project_tasks
class JazzbandSpinach(Spinach):
def init_app(self, app):
app.config["SPINACH_BROKER"] = RedisBroker(
redis.from_url(app.config["QUEUE_URL"], **recommended_socket_opts)
)
super().init_app(app)
namespace = app.extensions["spinach"].namespace
@signals.job_started.connect_via(namespace)
def job_started(*args, **kwargs):
github.load_config()
for tasks in [member_tasks, project_tasks]:
self.register_tasks(app, tasks)
spinach = JazzbandSpinach()
| 28.862069 | 78 | 0.714456 |
6e2804f479a82538338c393aebc7294f58f684fb | 8,180 | py | Python | fred/peaktrough.py | cc7768/NYUecondata | 086dfac9f0eb53f67a5164d661e74735f64b0978 | [
"MIT"
] | null | null | null | fred/peaktrough.py | cc7768/NYUecondata | 086dfac9f0eb53f67a5164d661e74735f64b0978 | [
"MIT"
] | 1 | 2015-06-17T15:28:53.000Z | 2015-06-17T17:03:46.000Z | fred/peaktrough.py | cc7768/NYUecondata | 086dfac9f0eb53f67a5164d661e74735f64b0978 | [
"MIT"
] | null | null | null | """
Cooley-Rupert-style business cycle figures for Backus-Ferriere-Zin paper,
"Risk and ambiguity in models of business cycles," Carnegie-Rochester-NYU
conference paper, April 2014.
FRED codes: ["GDPC1", "PCECC96", "GPDIC96", "OPHNFB"]
(gdp, consumption, investment, labor productivity)
Cooley-Rupert link: http://econsnapshot.com/
Paper link: http://pages.stern.nyu.edu/~dbackus/BFZ/ms/BFZ_CRN_latest.pdf
GitHub: https://github.com/cc7768/NYUecondata/tree/master/fred
Authors: Chase Coleman and Spencer Lyon
Date: 06/24/2014
TODO: Add labels to the plots
Increase thickness of current recession
Smaller fonts in legend
Identify FRED code?
Check margins: http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.tight_layout
"""
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas.io.data import DataReader
# legend control, subject to change
# http://stackoverflow.com/questions/7125009/how-to-change-legend-size-with-matplotlib-pyplot
params = {'legend.fontsize': 10,
'legend.linewidth': 0.5} # this one doesn't seem to do anything
plt.rcParams.update(params)
def chopseries(data, indices, periods=40):
"""
Takes a series and chops it into pieces starting with cyclical peaks.
Formally, it turns it into a data frame starting at each peak index date
and running for the number of periods specified (default is 40)
Parameters
----------
data : pd.Series
The Series that should be chopped. Index should be a
DatetimeIndex
indices : pd.DatetimeIndex
A pandas DatetimeIndex where each item represents the beginning
of a cycle
periods : int, optional(default=40)
An integer specifying the maximum number of periods to retain
in each cycle. In other words, the function will attempt to keep
`periods` items, starting at each date in indices
Returns
-------
new_data : pd.DataFrame
A pd.DataFrame with columns named for the year the cycle
started. The data is a subset of the original series passed into
the function
"""
# Number or series to plot
n = len(indices)
c_names = ["%d cycle" % x.year for x in indices]
new_data = pd.DataFrame(np.empty((periods, n)), columns=c_names)
for num, date in enumerate(indices):
date_loc = data.index.get_loc(date)
try:
new_data[c_names[num]] = data.ix[date_loc:date_loc+periods].values
except:
the_values = data.ix[date_loc:].values
length = the_values.size
stupiddata = np.concatenate([the_values.squeeze(),
np.nan*np.ones(periods-length)])
new_data[c_names[num]] = stupiddata
return new_data
def peak_begin_dates(start="01/01/1972", end=datetime.now()):
"""
Use the fred dataset `USRECQ` to determine the beginning of the
peaks before all recessions between dates start and end
Parameters
----------
start : string or datetime.datetime, optional(default='01/01/1972')
A string or other acceptable pandas date identifier that marks
the beginning of the window for which we will search for starts
of peaks
end : string or datetime.datetime, optional(default=datetime.now())
The ending date of the search window
Returns
-------
rec_startind : pd.DatetimeIndex
A pandas DatetimeIndex representing the starting points of each
"peak" from start to end
"""
# Get quarterly recession dates from FRED
rec_dates = DataReader("USRECQ", "fred", start=start)
one_vals = np.where(rec_dates == 1)[0]
rec_start = [one_vals[0]]
# Find the beginning of the recession dates (Don't include ones that
# begin within three years of a previous one -- hence the `+12`)
for d in one_vals:
if d > max(rec_start) + 12:
rec_start.append(d)
rec_startind = rec_dates.index[rec_start]
return rec_startind
def manhandle_freddata(fred_series, nperiods=40,
changetype="log", start="01/01/1972",
saveshow="show", **plot_kwargs):
"""
This function takes a string that corresponds to a data series from
FRED and creates a DataFrame that takes this series and creates a
new series that shows the percent change of the series starting at
the beginning of every business cycle and ending `nperiods` quarters
later using either log differences or percent change.
By default it will start at the beginning of 1972 and additionally
data should be quarterly
If you would like to use multiple series, use python's map function:
map(manhandle_freddata, [list of fred_series])
Parameters
----------
fred_series : string
A string representing the fred dataset identifier.
nperiods : int, optional(default=40)
The number of periods each cycle should represent. This is
passed directly to the `chopseries` function
changetype : string
A string identifying how the percentage change should be
computed. Acceptable values are `percent` or `log`
start : string or datetime.datetime, optional(default='01/01/1972')
A string or other acceptable pandas date identifier that marks
the beginning of the window for which we will search for starts
of peaks. This is passed directly to `pd.io.data.DataReader` to
obtain the data set and to `peak_begin_dates` to determine
starting periods for business cycle peaks
saveshow : string, optional(default="save")
A string specifying whether the plots should be saved to disk or
shown. Optional parameter, default is to save them. Acceptable
values are "save", "show", and "both".
plot_kwargs : other
Other keyword arguments that will be passed directly to the
`pd.DataFrame.plot` method when generating the plot. See pandas
plotting documentation for an explanation of acceptable values
Returns
-------
pct_change : pd.DataFrame
The pandas DataFrame representing representing the percent
change from the beginning of each peak, extended out `nperiods`
Examples
--------
>>> rgdp = manhandle_freddata('GDPC1') # produces real GDP plot
For more examples see the `examples.ipynb` notebook in this
directory.
"""
# Get data
fred_data = DataReader(fred_series, "fred", start=start)
# Get dates for start of peak
peak_dates = peak_begin_dates(start=start)
# Break the time-series into chunks for each recession
chopped_data = chopseries(fred_data, peak_dates, periods=nperiods)
# Compute percent changes.
if changetype.lower() == "percent":
pct_change = ((chopped_data / chopped_data.iloc[0] - 1)*100)
elif changetype.lower() == "log":
logged = np.log(chopped_data)
pct_change = (logged - logged.iloc[0]) * 100.0
# plot data
fig, (ax) = plt.subplots(1, 1)
ax.set_ylabel("Percent change from previous peak")
pct_change.index.name = "Quarters since previous peak" # becomes x_label
pct_change.plot(ax=ax, **plot_kwargs)
ax.legend_.set_title("FRED: " + fred_series) # set title on legend
# add line for x-axis and show the plot.
ax.axhline(y=0, xmin=0, xmax=nperiods, color='k', linewidth=1.5)
# if saveshow="save" save plot as pdf file with name = FRED code
if saveshow=="save" or saveshow=="both":
fn = fred_series + ".pdf"
plt.savefig(fn)
if saveshow=="show" or saveshow=="both":
plt.show()
return pct_change
if __name__ == '__main__':
# Get Real GDP, Real Personal Consumption, Nonresidential Investment,
# and Output per Hour from FRED
fred_names = ["GDPC1", "PCECC96", "GPDIC96", "OPHNFB"]
# gdpdiff, pceccdiff, gpdicdiff, ophnfbdiff = map(manhandle_freddata,
# fred_names)
test = manhandle_freddata("GDPC1", saveshow="show")
test = manhandle_freddata("GDPC1", saveshow="save")
| 36.355556 | 93 | 0.675061 |
2f89febe522cf9679cab5f4782b32af943faf97c | 53 | py | Python | src/DataDiagnose/.ipynb_checkpoints/__init__-checkpoint.py | JoeBuzh/DeepWater | 9d01167517c91fb2024d2abbcbaa53b072c4fdbf | [
"MIT"
] | 1 | 2020-12-16T07:58:51.000Z | 2020-12-16T07:58:51.000Z | src/DataDiagnose/__init__.py | JoeBuzh/DeepWater | 9d01167517c91fb2024d2abbcbaa53b072c4fdbf | [
"MIT"
] | null | null | null | src/DataDiagnose/__init__.py | JoeBuzh/DeepWater | 9d01167517c91fb2024d2abbcbaa53b072c4fdbf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Data Diagnose Module """ | 17.666667 | 28 | 0.528302 |
f4dbcc33951a2adebfe97cccb63ebe53361b2527 | 8,730 | py | Python | responder/models.py | adrianuf22/responder | 84f9225b6ab6fe1f088fa1dd036097e865290b0b | [
"Apache-2.0"
] | null | null | null | responder/models.py | adrianuf22/responder | 84f9225b6ab6fe1f088fa1dd036097e865290b0b | [
"Apache-2.0"
] | null | null | null | responder/models.py | adrianuf22/responder | 84f9225b6ab6fe1f088fa1dd036097e865290b0b | [
"Apache-2.0"
] | null | null | null | import io
import json
import gzip
from http.cookies import SimpleCookie
import chardet
import rfc3986
import graphene
import yaml
from requests.structures import CaseInsensitiveDict
from requests.cookies import RequestsCookieJar
from starlette.datastructures import MutableHeaders
from starlette.requests import Request as StarletteRequest
from starlette.responses import Response as StarletteResponse
from urllib.parse import parse_qs
from .status_codes import HTTP_200
from .statics import DEFAULT_ENCODING
class QueryDict(dict):
def __init__(self, query_string):
self.update(parse_qs(query_string))
def __getitem__(self, key):
"""
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
"""
list_ = super().__getitem__(key)
try:
return list_[-1]
except IndexError:
return []
def get(self, key, default=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def _get_list(self, key, default=None, force_list=False):
"""
Return a list of values for the key.
Used internally to manipulate values list. If force_list is True,
return a new copy of values.
"""
try:
values = super().__getitem__(key)
except KeyError:
if default is None:
return []
return default
else:
if force_list:
values = list(values) if values is not None else None
return values
def get_list(self, key, default=None):
"""
Return the list of values for the key. If key doesn't exist, return a
default value.
"""
return self._get_list(key, default, force_list=True)
def items(self):
"""
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def items_list(self):
"""
Yield (key, value) pairs, where value is the the list.
"""
yield from super().items()
# TODO: add slots
class Request:
__slots__ = ["_starlette", "formats", "_headers", "_encoding", "api", "_content"]
def __init__(self, scope, receive, api=None):
self._starlette = StarletteRequest(scope, receive)
self.formats = None
self._encoding = None
self.api = api
self._content = None
headers = CaseInsensitiveDict()
for header, value in self._starlette.headers.items():
headers[header] = value
self._headers = headers
@property
def session(self):
"""The session data, in dict form, from the Request."""
if "Responder-Session" in self.cookies:
data = self.cookies[self.api.session_cookie]
data = self.api._signer.unsign(data)
return json.loads(data)
return {}
@property
def headers(self):
"""A case-insensitive dictionary, containing all headers sent in the Request."""
return self._headers
@property
def mimetype(self):
return self.headers.get("Content-Type", "")
@property
def method(self):
"""The incoming HTTP method used for the request, lower-cased."""
return self._starlette.method.lower()
@property
def full_url(self):
"""The full URL of the Request, query parameters and all."""
return str(self._starlette.url)
@property
def url(self):
"""The parsed URL of the Request."""
return rfc3986.urlparse(self.full_url)
@property
def cookies(self):
"""The cookies sent in the Request, as a dictionary."""
cookies = RequestsCookieJar()
cookie_header = self.headers.get("cookie", "")
bc = SimpleCookie(cookie_header)
for k, v in bc.items():
cookies[k] = v
return cookies.get_dict()
@property
def params(self):
"""A dictionary of the parsed query parameters used for the Request."""
try:
return QueryDict(self.url.query)
except AttributeError:
return QueryDict({})
@property
async def encoding(self):
"""The encoding of the Request's body. Can be set, manually. Must be awaited."""
# Use the user-set encoding first.
if self._encoding:
return self._encoding
# Then try what's defined by the Request.
elif await self.declared_encoding:
return self.declared_encoding
# Then, automatically detect the encoding.
else:
return await self.apparent_encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
@property
async def content(self):
"""The Request body, as bytes. Must be awaited."""
if not self._content:
self._content = await self._starlette.body()
return self._content
@property
async def text(self):
"""The Request body, as unicode. Must be awaited."""
return (await self.content).decode(await self.encoding)
@property
async def declared_encoding(self):
if "Encoding" in self.headers:
return self.headers["Encoding"]
@property
async def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library. Must be awaited."""
declared_encoding = await self.declared_encoding
if declared_encoding:
return declared_encoding
else:
return chardet.detect(await self.content)["encoding"]
@property
def is_secure(self):
return self.url.scheme == "https"
def accepts(self, content_type):
"""Returns ``True`` if the incoming Request accepts the given ``content_type``."""
return content_type in self.headers.get("Accept", [])
async def media(self, format=None):
"""Renders incoming json/yaml/form data as Python objects. Must be awaited.
:param format: The name of the format being used. Alternatively accepts a custom callable for the format type.
"""
if format is None:
format = "yaml" if "yaml" in self.mimetype or "" else "json"
format = "form" if "form" in self.mimetype or "" else format
if format in self.formats:
return await self.formats[format](self)
else:
return await format(self)
class Response:
__slots__ = [
"req",
"status_code",
"text",
"content",
"encoding",
"media",
"headers",
"formats",
"cookies",
"session",
]
def __init__(self, req, *, formats):
self.req = req
self.status_code = HTTP_200 #: The HTTP Status Code to use for the Response.
self.text = None #: A unicode representation of the response body.
self.content = None #: A bytes representation of the response body.
self.encoding = DEFAULT_ENCODING
self.media = (
None
) #: A Python object that will be content-negotiated and sent back to the client. Typically, in JSON formatting.
self.headers = (
{}
) #: A Python dictionary of ``{key: value}``, representing the headers of the response.
self.formats = formats
self.cookies = {} #: The cookies set in the Response, as a dictionary
self.session = (
req.session.copy()
) #: The cookie-based session data, in dict form, to add to the Response.
@property
async def body(self):
if self.content:
return (self.content, {})
if self.text:
return (self.text.encode(self.encoding), {"Encoding": self.encoding})
for format in self.formats:
if self.req.accepts(format):
return (await self.formats[format](self, encode=True)), {}
# Default to JSON anyway.
return (
await self.formats["json"](self, encode=True),
{"Content-Type": "application/json"},
)
async def __call__(self, receive, send):
body, headers = await self.body
if self.headers:
headers.update(self.headers)
response = StarletteResponse(
body, status_code=self.status_code, headers=headers
)
await response(receive, send)
| 30.103448 | 121 | 0.601031 |
aa3c53fd6d46ee624b7f8501fe8550742fc44be3 | 1,083 | py | Python | craftroom/thefriendlystars/panels.py | davidjwilson/craftroom | 05721893350a8b554204e188c8413ee33a7768ad | [
"MIT"
] | 1 | 2019-11-25T21:19:03.000Z | 2019-11-25T21:19:03.000Z | craftroom/thefriendlystars/panels.py | davidjwilson/craftroom | 05721893350a8b554204e188c8413ee33a7768ad | [
"MIT"
] | 1 | 2018-03-14T04:26:54.000Z | 2018-03-14T04:26:54.000Z | craftroom/thefriendlystars/panels.py | davidjwilson/craftroom | 05721893350a8b554204e188c8413ee33a7768ad | [
"MIT"
] | 1 | 2021-09-10T21:24:43.000Z | 2021-09-10T21:24:43.000Z |
'''
Panel object contains
up to one image in the background,
and any number of catalogs plotted.
'''
import astroquery.skyview
class Panel:
'''
A single frame of a finder chart,
that has up to one image in the background,
and any number of catalogs plotted.
'''
def __init__(self, image, catalogs=None):
pass
#???
# define the images that accessible to skyview
twomass = ['2MASS-J', '2MASS-H', '2MASS-K']
ukidss = ['UKIDSS-Y', 'UKIDSS-J', 'UKIDSS-H', 'UKIDSS-K']
wise = ['WISE 3.4', 'WISE 4.6', 'WISE 12', 'WISE 22']
dss1 = ['DSS1 Blue', 'DSS1 Red']
dss2 = ['DSS2 Blue', 'DSS2 Red']
GALEX = ['GALEX Far UV', 'GALEX Near UV']
class Image:
'''
This represents images that lines up with a given patch of the sky.
'''
def __init__(self, hdu, name=None):
'''
Initialize an image.
Parameters
----------
hdu : a PrimaryHDU file
FITS file
'''
self.header = hdu.header
self.data = hdu.data
self.wcs = WCS(hdu.header)
self.name = name
| 22.102041 | 71 | 0.581717 |
2119f2cc9a8515c6f9ade48b78f5d7fb0cf38370 | 455 | py | Python | src/model/card/SaneCard.py | Marsevil/LovecraftLetter_IA | 201d32c54448d6fb82f02e9ea6df8c7e59e8309c | [
"MIT"
] | null | null | null | src/model/card/SaneCard.py | Marsevil/LovecraftLetter_IA | 201d32c54448d6fb82f02e9ea6df8c7e59e8309c | [
"MIT"
] | null | null | null | src/model/card/SaneCard.py | Marsevil/LovecraftLetter_IA | 201d32c54448d6fb82f02e9ea6df8c7e59e8309c | [
"MIT"
] | 3 | 2021-03-17T20:58:04.000Z | 2021-05-16T15:20:10.000Z | from .Card import Card
from abc import ABC, abstractmethod
class SaneCard (Card):
""" SaneCard class (abstract) is defined by :
- SaneCard(_name : string, _description : string, _value : int)
- effect(sanity : Sanity)
- hasInsane() """
@abstractmethod
def __init__(self, _name, _description, _value):
super().__init__(_name, _description, _value)
@staticmethod
def hasInsane():
return False
| 25.277778 | 71 | 0.643956 |
5e6ef30c728cb4c86bbb415891a6a8c2ba172cee | 1,577 | py | Python | course/migrations/0001_initial.py | ArnedyNavi/studymate | 55e6a2c6717dd478a311ea8bf839a26ca3ef2b40 | [
"MIT"
] | 4 | 2021-12-31T17:25:00.000Z | 2022-02-08T17:05:46.000Z | course/migrations/0001_initial.py | ArnedyNavi/studymate | 55e6a2c6717dd478a311ea8bf839a26ca3ef2b40 | [
"MIT"
] | null | null | null | course/migrations/0001_initial.py | ArnedyNavi/studymate | 55e6a2c6717dd478a311ea8bf839a26ca3ef2b40 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2022-01-05 07:53
import course.models
from django.conf import settings
from django.db import migrations, models
import studymate.storage
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
('profile_image', models.ImageField(blank=True, default='banner/banner.jpg', max_length=255, null=True, storage=studymate.storage.OverwriteStorage(), upload_to=course.models.get_course_banner_filepath)),
('ratings', models.FloatField(default=0)),
],
),
migrations.CreateModel(
name='UserCourse',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rate', models.IntegerField(default=0)),
('progress', models.IntegerField(default=0)),
('completed', models.BooleanField(default=False)),
('course', models.ManyToManyField(related_name='user', to='course.Course')),
('user', models.ManyToManyField(related_name='course', to=settings.AUTH_USER_MODEL)),
],
),
]
| 39.425 | 219 | 0.618263 |
65efeb57abed4bb9ea049c8cf407b98a93b9ace5 | 1,500 | py | Python | malware_rl/__init__.py | xenoxine/malware_rl | 6a316caae02491404cb5d335735c22a74269e01f | [
"MIT"
] | 61 | 2020-08-28T19:33:07.000Z | 2022-03-26T06:38:29.000Z | malware_rl/__init__.py | xenoxine/malware_rl | 6a316caae02491404cb5d335735c22a74269e01f | [
"MIT"
] | 11 | 2020-09-25T18:59:41.000Z | 2022-01-31T11:39:57.000Z | malware_rl/__init__.py | xenoxine/malware_rl | 6a316caae02491404cb5d335735c22a74269e01f | [
"MIT"
] | 12 | 2020-08-29T01:35:05.000Z | 2022-02-07T02:56:35.000Z | from gym.envs.registration import register
from sklearn.model_selection import train_test_split
from malware_rl.envs.utils import interface
# create a holdout set
sha256 = interface.get_available_sha256()
sha256_train, sha256_holdout = train_test_split(sha256, test_size=40)
MAXTURNS = 50
register(
id="malconv-train-v0",
entry_point="malware_rl.envs:MalConvEnv",
kwargs={
"random_sample": True,
"maxturns": MAXTURNS,
"sha256list": sha256_train,
},
)
register(
id="malconv-test-v0",
entry_point="malware_rl.envs:MalConvEnv",
kwargs={
"random_sample": False,
"maxturns": MAXTURNS,
"sha256list": sha256_holdout,
},
)
register(
id="ember-train-v0",
entry_point="malware_rl.envs:EmberEnv",
kwargs={
"random_sample": True,
"maxturns": MAXTURNS,
"sha256list": sha256_train,
},
)
register(
id="ember-test-v0",
entry_point="malware_rl.envs:EmberEnv",
kwargs={
"random_sample": False,
"maxturns": MAXTURNS,
"sha256list": sha256_holdout,
},
)
register(
id="sorel-train-v0",
entry_point="malware_rl.envs:SorelEnv",
kwargs={
"random_sample": True,
"maxturns": MAXTURNS,
"sha256list": sha256_train,
},
)
register(
id="sorel-test-v0",
entry_point="malware_rl.envs:SorelEnv",
kwargs={
"random_sample": False,
"maxturns": MAXTURNS,
"sha256list": sha256_holdout,
},
)
| 21.126761 | 69 | 0.636 |
1afebf4b7b6801e4393604a66512f748526274d3 | 1,703 | py | Python | network/Coral.py | Fassial/Air-Writing-with-TL | 9b9047c5bd5aef3a869e2d5166be1c0cf0c5ccf0 | [
"MIT"
] | 1 | 2021-06-16T16:45:01.000Z | 2021-06-16T16:45:01.000Z | network/Coral.py | Fassial/Air-Writing-with-TL | 9b9047c5bd5aef3a869e2d5166be1c0cf0c5ccf0 | [
"MIT"
] | null | null | null | network/Coral.py | Fassial/Air-Writing-with-TL | 9b9047c5bd5aef3a869e2d5166be1c0cf0c5ccf0 | [
"MIT"
] | 1 | 2020-04-21T01:31:26.000Z | 2020-04-21T01:31:26.000Z | import torch
import scipy.linalg
import numpy as np
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def CORAL_loss(source, target):
d = source.size(1)
n_s, n_t = source.size(0), target.size(0)
# source covariance
tmp_s = torch.ones((1, n_s)).to(DEVICE) @ source
cs = (source.t() @ source - (tmp_s.t() @ tmp_s) / n_s) / (n_s - 1)
# target covariance
tmp_t = torch.ones((1, n_t)).to(DEVICE) @ target
ct = (target.t() @ target - (tmp_t.t() @ tmp_t) / n_t) / (n_t - 1)
# frobenius norm
loss = (cs - ct).pow(2).sum().sqrt()
loss = loss / (4 * d * d)
return loss
def CORAL_np(Xs, Xt):
'''
Perform CORAL on the source domain features
:param Xs: ns * n_feature, source feature
:param Xt: nt * n_feature, target feature
:return: New source domain features
'''
# get domain covariance
cov_src = np.cov(Xs.T) + 2 * np.eye(Xs.shape[1])
cov_tar = np.cov(Xt.T) + 2 * np.eye(Xt.shape[1])
# compute Xs_new
print(scipy.linalg.fractional_matrix_power(cov_src, -0.5).dtype)
A_coral = np.dot(scipy.linalg.fractional_matrix_power(cov_src, -0.5), scipy.linalg.fractional_matrix_power(cov_tar, 0.5))
Xs_new = np.dot(Xs, A_coral)
return Xs_new
def CORAL_torch(source, target):
return source
n_s, n_t = source.size(0), target.size(0)
# source covariance + source(target.size(1))
tmp_s = torch.ones((1, n_s)).to(DEVICE) @ source
cs = ((source.t() @ source - (tmp_s.t() @ tmp_s) / n_s) / (n_s - 1)) + torch.eye(source.size(1))
# target covariance + eye(target.size(1))
tmp_t = torch.ones((1, n_t)).to(DEVICE) @ target
ct = ((target.t() @ target - (tmp_t.t() @ tmp_t) / n_t) / (n_t - 1)) + torch.eye(target.size(1))
# TODO
A_CORAL = source.mm()
return None
| 28.383333 | 122 | 0.655314 |
6c58385a82fdf4a63a74cab36dc78fc9e0915829 | 2,235 | py | Python | demos/html5boilerplate.py | aaronchall/HTML5.py | ccff1451214adf2d9147a3e253f49e757da5297f | [
"MIT"
] | 3 | 2015-11-05T14:36:27.000Z | 2020-07-20T19:10:17.000Z | demos/html5boilerplate.py | aaronchall/HTML5.py | ccff1451214adf2d9147a3e253f49e757da5297f | [
"MIT"
] | 1 | 2015-11-05T14:36:16.000Z | 2015-11-06T04:17:13.000Z | demos/html5boilerplate.py | aaronchall/HTML5.py | ccff1451214adf2d9147a3e253f49e757da5297f | [
"MIT"
] | 2 | 2016-01-04T06:25:05.000Z | 2020-07-20T19:10:21.000Z | """emulate the html from html5boilerplate"""
from __future__ import print_function, division, absolute_import
from html5 import Document, Head, Meta, Description, Viewport, Link
from html5 import Script, Body, Comment, Paragraph
from html5 import Description, Viewport# metas
def main():
"""print the boilerplate"""
print(
Document(
Head(
[Meta(http-equiv='x-ua-compatible', content='ie=edge'), # get charset for free
Description('Boilerplate has this empty, but '
'this description should be detailed '
'it is what will show up in Google'),
Viewport('width=device-width, initial-scale=1'),
Link(rel='apple-touch-icon', href='apple-touch-icon.png'),
# maybe we need comments for this sort of thing:
Comment('Place favicon.ico in the root directory'),
Link(rel='stylesheet', href='css/normalize.css'),
Link(rel='stylesheet', href='css/main.css'),
],
title=Title('')),
Body([
#Comment("""[if lt IE 8]>
# <p class="browserupgrade">You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser</a> to improve your experience.</p>
# <![endif]""")
Conditional(
'if lt IE 8'
[Paragraph(["""<p class="browserupgrade">You are using an <strong>outdated</strong> browser.
Please <a href="http://browsehappy.com/">upgrade your browser</a> to improve your experience."""],
Class='browserupgrade', )],
),
# are comments necessary? We have Python comments. Why have no-op html?
Comment('Add your site or application content here'),
Script(src='https://ajax.googleapis.com/ajax/libs/jquery/{{JQUERY_VERSION}}/jquery.min.js'),
Script(["""window.jQuery || document.write('<script src="js/vendor/jquery-{{JQUERY_VERSION}}.min.js"><\/script>')'"""]),
Script(src="js/plugins.js"),
Script(src="js/main.js"),
]),
Class='no-js'
)
)
if __name__ == '__main__':
main()
| 42.980769 | 194 | 0.580313 |
2283fcabd7a5a127c71c332aedcbd89a8d37459c | 5,348 | py | Python | pb_planning/pb_tools/movo_constants.py | kylehkhsu/pybullet-planning | a504d4641f2bb1d52b7532c35d3d6996302bb80d | [
"MIT"
] | 143 | 2020-10-22T04:17:04.000Z | 2022-03-26T09:24:21.000Z | pybullet_tools/movo_constants.py | Kami-code/pybullet_planning | a9ac00ac407c57cc6280867e7308b3737728bf6b | [
"MIT"
] | 4 | 2019-03-05T14:57:09.000Z | 2020-10-14T05:24:21.000Z | pybullet_tools/movo_constants.py | Kami-code/pybullet_planning | a9ac00ac407c57cc6280867e7308b3737728bf6b | [
"MIT"
] | 38 | 2020-11-03T07:25:49.000Z | 2022-03-26T09:33:54.000Z | #!/usr/bin/env python
from __future__ import print_function
from itertools import combinations
import numpy as np
from .ikfast.utils import IKFastInfo
from .utils import joints_from_names, has_joint, get_max_limits, get_min_limits, apply_alpha, \
pairwise_link_collision, get_all_links, get_link_name, are_links_adjacent
#MOVO_URDF = "models/movo_description/movo.urdf"
#MOVO_URDF = "models/movo_description/movo_lis.urdf"
#MOVO_URDF = "models/movo_description/movo_robotiq.urdf"
MOVO_URDF = "models/movo_description/movo_robotiq_collision.urdf"
# https://github.mit.edu/Learning-and-Intelligent-Systems/ltamp_pr2/blob/master/control_tools/ik/ik_tools/movo_ik/movo_robotiq.urdf
# https://github.com/Learning-and-Intelligent-Systems/movo_ws/blob/master/src/kinova-movo-bare/movo_common/movo_description/urdf/movo.custom.urdf
# https://github.mit.edu/Learning-and-Intelligent-Systems/ltamp_pr2/tree/master/control_tools/ik/ik_tools/movo_ik
#####################################
LEFT = 'left' # KG3
RIGHT = 'right' # ROBOTIQ
ARMS = [RIGHT, LEFT]
BASE_JOINTS = ['x', 'y', 'theta']
TORSO_JOINTS = ['linear_joint']
HEAD_JOINTS = ['pan_joint', 'tilt_joint']
ARM_JOINTS = ['{}_shoulder_pan_joint', '{}_shoulder_lift_joint', '{}_arm_half_joint', '{}_elbow_joint',
'{}_wrist_spherical_1_joint', '{}_wrist_spherical_2_joint', '{}_wrist_3_joint']
KG3_GRIPPER_JOINTS = ['{}_gripper_finger1_joint', '{}_gripper_finger2_joint', '{}_gripper_finger3_joint']
ROBOTIQ_GRIPPER_JOINTS = ['{}_gripper_finger1_joint', '{}_gripper_finger2_joint',
'{}_gripper_finger1_inner_knuckle_joint', '{}_gripper_finger1_finger_tip_joint',
'{}_gripper_finger2_inner_knuckle_joint', '{}_gripper_finger2_finger_tip_joint']
EE_LINK = '{}_ee_link'
TOOL_LINK = '{}_tool_link'
#PASSIVE_JOINTS = ['mid_body_joint']
# TODO: mid_body_joint - might be passive
# https://github.com/Kinovarobotics/kinova-movo/blob/master/movo_moveit_config/config/movo_kg2.srdf
# https://github.mit.edu/Learning-and-Intelligent-Systems/ltamp_pr2/blob/master/control_tools/ik/ik_tools/movo_ik/movo_ik_generator.py
MOVO_INFOS = {
arm: IKFastInfo(module_name='movo.movo_{}_arm_ik'.format(arm), base_link='base_link', ee_link=EE_LINK.format(arm),
free_joints=['linear_joint', '{}_arm_half_joint'.format(arm)]) for arm in ARMS}
MOVO_COLOR = apply_alpha(0.25*np.ones(3), 1)
#####################################
def names_from_templates(templates, *args):
return [template.format(*args) for template in templates]
def get_arm_joints(robot, arm):
assert arm in ARMS
return joints_from_names(robot, names_from_templates(ARM_JOINTS, arm))
def has_kg3_gripper(robot, arm):
assert arm in ARMS
return all(has_joint(robot, joint_name) for joint_name in names_from_templates(KG3_GRIPPER_JOINTS, arm))
def has_robotiq_gripper(robot, arm):
assert arm in ARMS
return all(has_joint(robot, joint_name) for joint_name in names_from_templates(ROBOTIQ_GRIPPER_JOINTS, arm))
def get_gripper_joints(robot, arm):
assert arm in ARMS
if has_kg3_gripper(robot, arm):
return joints_from_names(robot, names_from_templates(KG3_GRIPPER_JOINTS, arm))
elif has_robotiq_gripper(robot, arm):
return joints_from_names(robot, names_from_templates(ROBOTIQ_GRIPPER_JOINTS, arm))
raise ValueError(arm)
def get_open_positions(robot, arm):
assert arm in ARMS
joints = get_gripper_joints(robot, arm)
if has_kg3_gripper(robot, arm):
return get_min_limits(robot, joints)
elif has_robotiq_gripper(robot, arm):
return 6 * [0.]
raise ValueError(arm)
def get_closed_positions(robot, arm):
assert arm in ARMS
joints = get_gripper_joints(robot, arm)
if has_kg3_gripper(robot, arm):
return get_max_limits(robot, joints)
elif has_robotiq_gripper(robot, arm):
return [0.32]*6
raise ValueError(arm)
#####################################
def get_colliding(robot):
disabled = []
for link1, link2 in combinations(get_all_links(robot), r=2):
if not are_links_adjacent(robot, link1, link2) and pairwise_link_collision(robot, link1, robot, link2):
disabled.append((get_link_name(robot, link1), get_link_name(robot, link2)))
return disabled
NEVER_COLLISIONS = [
('linear_actuator_fixed_link', 'right_base_link'), ('linear_actuator_fixed_link', 'right_shoulder_link'),
('linear_actuator_fixed_link', 'left_base_link'), ('linear_actuator_fixed_link', 'left_shoulder_link'),
('linear_actuator_fixed_link', 'front_laser_link'), ('linear_actuator_fixed_link', 'rear_laser_link'),
('linear_actuator_link', 'pan_link'), ('linear_actuator_link', 'right_shoulder_link'),
('linear_actuator_link', 'right_arm_half_1_link'), ('linear_actuator_link', 'left_shoulder_link'),
('linear_actuator_link', 'left_arm_half_1_link'), ('right_wrist_spherical_2_link', 'right_robotiq_coupler_link'),
('right_wrist_3_link', 'right_robotiq_coupler_link'), ('right_wrist_3_link', 'right_gripper_base_link'),
('right_gripper_finger1_finger_link', 'right_gripper_finger1_finger_tip_link'),
('right_gripper_finger2_finger_link', 'right_gripper_finger2_finger_tip_link'),
('left_wrist_spherical_2_link', 'left_gripper_base_link'), ('left_wrist_3_link', 'left_gripper_base_link'),
] | 44.941176 | 145 | 0.74009 |
7ba1a6be91bc7fdcd84978a017c60f8d4f84119f | 2,478 | py | Python | etl_framework/config_mixins/FieldMappingsMixin.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
] | 2 | 2017-03-01T20:09:06.000Z | 2019-02-08T17:10:16.000Z | etl_framework/config_mixins/FieldMappingsMixin.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
] | 40 | 2015-10-10T15:02:21.000Z | 2020-03-17T22:32:04.000Z | etl_framework/config_mixins/FieldMappingsMixin.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
] | 2 | 2018-11-14T21:50:58.000Z | 2022-03-07T20:59:27.000Z | """parses configuration and returns useful things"""
#pylint: disable=relative-import
#pylint cant tell AddFiltersMixin is new class
#pylint: disable=super-on-old-class
from etl_framework.Exceptions import ConfigAttrNotSetException
from etl_framework.config_mixins.AddFiltersMixin import AddFiltersMixin
from etl_framework.method_wrappers.check_config_attr import check_config_attr_default_none
class FieldMappingsMixin(AddFiltersMixin):
"""parses configuration files"""
FIELD_MAPPINGS = 'field_mappings'
def add_filters(self, filter_mappings):
"""override add_filters method of config object"""
super(FieldMappingsMixin, self).add_filters(filter_mappings)
if self.get_field_mappings():
self.set_field_mappings({key: [filter_mappings.get(value[0]), value[1]]
for key, value in list(self.get_field_mappings().items())})
else:
raise ConfigAttrNotSetException
def add_filters_from_module(self, filters_module):
"""override add_filters_from_module method of config object"""
super(FieldMappingsMixin, self).add_filters_from_module(filters_module)
if self.get_field_mappings():
self.set_field_mappings({key: [getattr(filters_module, value[0]), value[1]]
for key, value in list(self.get_field_mappings().items())})
else:
raise ConfigAttrNotSetException
@check_config_attr_default_none
def get_field_mappings(self):
"""yup"""
return self.config[self.FIELD_MAPPINGS]
def get_field_mapping_fields(self):
"""returns only fields (without filters)"""
return {key: value[1] for key, value in list(self.get_field_mappings().items())}
def get_field_mappings_without_target_fields(self):
"""returns only fields (without filters)"""
return {key: value[0] for key, value in list(self.get_field_mappings().items())}
def get_field_mapping_target_fields(self):
"""returns target fields"""
return tuple(value[1] for value in list(self.get_field_mappings().values()))
def get_field_mapping_source_fields(self):
"""returns source fields"""
return tuple(key for key in list(self.get_field_mappings().keys()))
@check_config_attr_default_none
def set_field_mappings(self, field_mappings):
"""yup"""
self.config[self.FIELD_MAPPINGS] = field_mappings
| 36.985075 | 99 | 0.695722 |
26647d34d0e04c0f9bbf212524df1df3048ffdaa | 244 | py | Python | openmetrics/datadog_checks/openmetrics/openmetrics.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | 1 | 2021-03-24T13:00:14.000Z | 2021-03-24T13:00:14.000Z | openmetrics/datadog_checks/openmetrics/openmetrics.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | 1 | 2021-02-23T14:03:42.000Z | 2021-03-25T16:52:05.000Z | openmetrics/datadog_checks/openmetrics/openmetrics.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.base.checks.openmetrics import OpenMetricsBaseCheck
class OpenMetricsCheck(OpenMetricsBaseCheck):
pass
| 27.111111 | 71 | 0.79918 |
f4b0d8fbeb96deaa9d5fb5b5ee3bb47d247de75b | 893 | py | Python | kubernetes/test/test_v1_network_policy_port.py | jraby/kubernetes-client-python | e6e7b710d0b15fbde686bc9dccf00da5951bef84 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_network_policy_port.py | jraby/kubernetes-client-python | e6e7b710d0b15fbde686bc9dccf00da5951bef84 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_network_policy_port.py | jraby/kubernetes-client-python | e6e7b710d0b15fbde686bc9dccf00da5951bef84 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_network_policy_port import V1NetworkPolicyPort
class TestV1NetworkPolicyPort(unittest.TestCase):
""" V1NetworkPolicyPort unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1NetworkPolicyPort(self):
"""
Test V1NetworkPolicyPort
"""
model = kubernetes.client.models.v1_network_policy_port.V1NetworkPolicyPort()
if __name__ == '__main__':
unittest.main()
| 20.767442 | 105 | 0.720045 |
a80c638ca80e8ad43d8a3c49eac5fbcbb1460930 | 27 | py | Python | src/kgmk/const/__init__.py | kagemeka/python | 486ce39d97360b61029527bacf00a87fdbcf552c | [
"MIT"
] | null | null | null | src/kgmk/const/__init__.py | kagemeka/python | 486ce39d97360b61029527bacf00a87fdbcf552c | [
"MIT"
] | null | null | null | src/kgmk/const/__init__.py | kagemeka/python | 486ce39d97360b61029527bacf00a87fdbcf552c | [
"MIT"
] | null | null | null | from .inf import (
INF,
) | 9 | 18 | 0.592593 |
d065f19a86fe3c508080c53d3c6f89132f993d29 | 4,042 | py | Python | build.py | Lzhiyong/android-sdk-tools | a2c5e32764f4140c13da017abe9f861ba462cc7c | [
"Apache-2.0"
] | 22 | 2018-06-25T15:57:58.000Z | 2020-03-26T15:42:23.000Z | build.py | Lzhiyong/android-sdk-tools | a2c5e32764f4140c13da017abe9f861ba462cc7c | [
"Apache-2.0"
] | 3 | 2018-12-11T04:15:06.000Z | 2020-05-01T00:06:46.000Z | build.py | Lzhiyong/android-sdk-tools | a2c5e32764f4140c13da017abe9f861ba462cc7c | [
"Apache-2.0"
] | 6 | 2018-08-25T19:54:58.000Z | 2020-03-26T15:42:26.000Z | #!/usr/bin/env python
#
# Copyright © 2022 Github Lzhiyong
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=not-callable, line-too-long, no-else-return
import time
import argparse
import subprocess
from pathlib import Path
def format_time(seconds):
minute, sec = divmod(seconds, 60)
hour, minute = divmod(minute, 60)
if hour > 0:
return "{}h{:02d}m{:02d.2f}s".format(hour, minute, sec)
elif minute > 0:
return "{}m{:02d.2f}s".format(minute, sec)
else:
return "{:.2f}s".format(sec)
def build(cc, cxx, args):
command = ["cmake", "-GNinja",
"-B {}".format(args.build),
"-DCMAKE_C_COMPILER={}".format(cc),
"-DCMAKE_CXX_COMPILER={}".format(cxx),
"-DTARGET_ABI={}".format(args.arch),
"-DCMAKE_BUILD_TYPE=Release"]
if args.protoc is not None and len(str(args.protoc)) > 0:
command.append("-DPROTOC_PATH={}".format(args.protoc))
result = subprocess.run(command)
start = time.time()
if result.returncode == 0:
if args.target == "all":
result = subprocess.run(["ninja", "-C", args.build, "-j {}".format(args.job)])
else:
result = subprocess.run(["ninja", "-C", args.build, args.target, "-j {}".format(args.job)])
if result.returncode == 0:
end = time.time()
print("\033[1;32mbuild success cost time: {}\033[0m".format(format_time(end - start)))
def configure(args):
ndk = Path(args.ndk)
if not ndk.exists() or not ndk.is_dir():
raise ValueError("cannot find the ndk")
toolchain = ndk / "toolchains/llvm/prebuilt/linux-x86_64"
cc: Path = Path()
cxx: Path = Path()
if args.arch == "aarch64":
cc = toolchain / "bin" / "aarch64-linux-android{}-clang".format(args.api)
cxx = toolchain / "bin" / "aarch64-linux-android{}-clang++".format(args.api)
elif args.arch == "arm":
cc = toolchain / "bin" / "armv7a-linux-androideabi{}-clang".format(args.api)
cxx = toolchain / "bin" / "armv7a-linux-androideabi{}-clang++".format(args.api)
elif args.arch == "x86":
cc = toolchain / "bin" / "i686-linux-android{}-clang".format(args.api)
cxx = toolchain / "bin" / "i686-linux-android{}-clang++".format(args.api)
else:
cc = toolchain / "bin" / "x86_64-linux-android{}-clang".format(args.api)
cxx = toolchain / "bin" / "x86_64-linux-android{}-clang++".format(args.api)
if not cc.exists() or not cxx.exists():
print("cc is {}".format(cc))
print("cxx is {}".format(cxx))
raise ValueError("error: cannot find the clang compiler")
# start building
build(str(cc), str(cxx), args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--ndk", required=True, help="set the ndk toolchain path")
parser.add_argument("--arch", choices=["aarch64", "arm", "x86", "x86_64"],
required=True, help="build for the specified architecture")
parser.add_argument("--api", default=30, help="set android platform level, min api is 30")
parser.add_argument("--build", default="build", help="the build directory")
parser.add_argument("--job", default=16, help="run N jobs in parallel, default is 16")
parser.add_argument("--target", default="all", help="build specified targets such as aapt2 adb fastboot, etc")
parser.add_argument("--protoc", help="set the host protoc path")
args = parser.parse_args()
configure(args)
if __name__ == "__main__":
main()
| 36.089286 | 114 | 0.635824 |
bc75033967ffd05166158ccd907f35abbfd606ef | 1,699 | py | Python | project_automation/commands/go.py | Guigui14460/project-automation | 98f9b73be2000b0ecb07b1cca758693c29032947 | [
"Apache-2.0"
] | null | null | null | project_automation/commands/go.py | Guigui14460/project-automation | 98f9b73be2000b0ecb07b1cca758693c29032947 | [
"Apache-2.0"
] | 2 | 2021-01-17T16:04:03.000Z | 2021-08-13T13:00:49.000Z | project_automation/commands/go.py | Guigui14460/project-automation | 98f9b73be2000b0ecb07b1cca758693c29032947 | [
"Apache-2.0"
] | null | null | null | from typing import NoReturn
from .command_program import CommandProgram
from .utils import WindowsInstallationPackage, MacOSInstallationPackage, GNULinuxDistributionInstallationPackage
class GoCommand(CommandProgram):
"""
Command to verify if ``go`` command is recognized by the operating system.
If its not verify, the class install it automatically if you want.
"""
def __init__(self, allow_install: bool, update_package_manager: bool = True) -> NoReturn:
"""
Constructor and initializer.
Parameters
----------
allow_install : bool
True if you want to automatically install the required package, False otherwise
update_package_manager : bool
allows this program to automatically update and upgrade all packages installed in the system (via the package manager used)
"""
windows = WindowsInstallationPackage(
windows_download_link="https://golang.org/dl/",
winget_command="winget install Golang.Go",
scoop_command="scoop install go",
choco_command="choco install golang",
update_package_manager=update_package_manager
)
macos = MacOSInstallationPackage(
macos_download_link="https://golang.org/dl/",
brew_command="brew install go",
update_package_manager=update_package_manager
)
linux = GNULinuxDistributionInstallationPackage(
linux_download_link="https://golang.org/dl/",
update_package_manager=update_package_manager
)
super().__init__("go version", allow_install,
windows, macos, linux)
| 40.452381 | 135 | 0.670394 |
6a1b2030ec561f9077488bf846b45e0650dcb1e9 | 1,019 | py | Python | WebServices/ApiHandler.py | david-c-stein/Python-HouseLEDs | ba01368179f30fe7612b30f1257838b715fd866b | [
"MIT"
] | null | null | null | WebServices/ApiHandler.py | david-c-stein/Python-HouseLEDs | ba01368179f30fe7612b30f1257838b715fd866b | [
"MIT"
] | null | null | null | WebServices/ApiHandler.py | david-c-stein/Python-HouseLEDs | ba01368179f30fe7612b30f1257838b715fd866b | [
"MIT"
] | null | null | null | import Global
import json
import time
import datetime
import os.path
import tornado.web
from tornado.escape import json_encode, json_decode
from tornado.options import options
import uuid
import base6
class ApiHandler(tornado.web.RequestHandler):
def initialize(self, queCam, queHdw, queWeb, config):
self.config = config
self.logger = logging.getLogger(__name__)
self.logger.info("Initializing " + __file__)
# message queues
self.getMsg = queWeb
self.putMsgSer = queSer.put
self.putMsgCam = queCam.put
self.putMsgHwd = queHdw.put
#=============================================================
@web.asynchronous
def get(self, *args):
self.finish()
id = self.get_argument("id")
value = self.get_argument("value")
data = {"id": id, "value" : value}
data = json.dumps(data)
for c in cl:
c.write_message(data)
@web.asynchronous
def post(self):
pass
| 23.159091 | 66 | 0.594701 |
512d397b00f61a3e517094366111b009af8e1dde | 4,530 | py | Python | vk.py | Fogapod/weechat-vk-script | b9a2fa06c6d415b386455fd4092e80c58e8263d0 | [
"MIT"
] | null | null | null | vk.py | Fogapod/weechat-vk-script | b9a2fa06c6d415b386455fd4092e80c58e8263d0 | [
"MIT"
] | null | null | null | vk.py | Fogapod/weechat-vk-script | b9a2fa06c6d415b386455fd4092e80c58e8263d0 | [
"MIT"
] | null | null | null | # coding:utf8
SCRIPT_NAME = 'vk'
SCRIPT_AUTHOR = 'fogapod'
SCRIPT_VERSION = '0.0.1dev'
SCRIPT_LICENSE = 'MIT'
SCRIPT_DESC = 'vk.com messaging script'
try:
import weechat
except ImportError:
print('\nThis file should be run under WeeChat')
import sys
sys.exit(2)
import re
DEFAULT_SETTINGS = {
'debug_logging': True,
'token': ''
}
INSERT_TOKEN_COMMAND = 'insert-token'
BUFFER = ''
# TODO: use classes instead
# UTIL
def log_info(text, buffer=None, note=0, error=0):
if buffer is None:
buffer = BUFFER
message = '{0}: {1}'.format(SCRIPT_NAME, text)
if get_setting('debug_logging'):
# not very useful yet
pass
if note:
message = NOTE_PREFIX + message
elif error:
message = ERROR_PREFIX + message
weechat.prnt(buffer, message)
def log_debug(text, *args, **kwargs):
if get_setting('debug_logging'):
log_info('[DEBUG] ' + text, *args, **kwargs)
def set_default_settings():
for setting, default_value in DEFAULT_SETTINGS.items():
if not weechat.config_is_set_plugin(setting):
log_debug('Creating: {0}:{1}'.format(setting, default_value))
set_setting(setting, default_value)
def get_setting(key):
if not weechat.config_is_set_plugin(key):
return DEFAULT_SETTINGS[key]
setting = weechat.config_get_plugin(key)
setting_type = type(DEFAULT_SETTINGS[key])
if setting_type is bool:
return setting == 'True'
else:
return setting_type(setting)
def set_setting(key, value):
log_debug('Saving: {0}:"{1}"'.format(key, value))
weechat.config_set_plugin(key, str(value))
# BUFFER
def buffer_input_cb(data, buffer, input_data):
# TODO: use '/vk command' instead or both methods
lower_input_data = input_data.lower()
args = lower_input_data.strip().split()[1:]
if lower_input_data.startswith(INSERT_TOKEN_COMMAND):
if not args:
log_info('This command should be run with argument!', error=1)
else:
token = re.search('access_token=(.+?)&expires_in', args[0])
if token:
set_setting(TOKEN_SETTING_NAME, token.group(1))
log_debug(token.group(1))
else:
log_info(
'Could not find token in url! Please, try again', error=1
)
return weechat.WEECHAT_RC_OK
def buffer_close_cb(data, buffer):
log_info(
'WARNING: This buffer should not be closed while script running. Use '
'/python unload {0} if you want to exit'.format(SCRIPT_NAME), error=1
) # TODO: prevent closing buffer or open new after x seconds
global BUFFER
BUFFER = ''
return weechat.WEECHAT_RC_OK
def create_buffer():
global BUFFER
if not BUFFER:
BUFFER = weechat.buffer_new(
SCRIPT_NAME, 'buffer_input_cb',
'Main working window for ' + SCRIPT_NAME,
'buffer_close_cb', ''
)
log_debug('Buffer created')
# BOT
class LongPollSession:
def __init__(self):
self.mlpd = None
def show_auth_hint():
log_info(
'Please, open this url, confirm access rights of the app and copy url'
' from address bar. Then use command {0} <copyed_url>'.format(
INSERT_TOKEN_COMMAND)
)
log_info(
'https://oauth.vk.com/authorize?client_id=6178678&scope=69636&v=5.68&'
'response_type=token',
note=1
)
def main():
if not weechat.register(
SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, 'exit_cb', ''):
return weechat.WEECHAT_RC_ERROR
# registration required for accessing config
global NOTE_PREFIX
global ERROR_PREFIX
NOTE_PREFIX = weechat.color(weechat.config_color(
weechat.config_get('weechat.color.chat_prefix_join')
)) + weechat.config_string(
weechat.config_get('weechat.look.prefix_join')) + '\t'
ERROR_PREFIX = weechat.color(weechat.config_color(
weechat.config_get('weechat.color.chat_prefix_error')
)) + weechat.config_string(
weechat.config_get('weechat.look.prefix_error')) + '\t'
create_buffer()
set_default_settings()
log_debug('Test note', note=1)
log_debug('Test error', error=1)
if not get_setting('token'):
show_auth_hint()
def exit_cb():
log_debug('Exiting')
weechat.buffer_close(BUFFER)
return weechat.WEECHAT_RC_OK
if __name__ == '__main__':
main()
| 24.354839 | 78 | 0.642605 |
53e2d391487b42e3c7acb14b40dd0e792404063c | 6,303 | py | Python | Thesis@3.9.1/Lib/site-packages/django/db/models/aggregates.py | nverbois/TFE21-232 | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | [
"MIT"
] | null | null | null | Thesis@3.9.1/Lib/site-packages/django/db/models/aggregates.py | nverbois/TFE21-232 | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | [
"MIT"
] | null | null | null | Thesis@3.9.1/Lib/site-packages/django/db/models/aggregates.py | nverbois/TFE21-232 | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | [
"MIT"
] | null | null | null | """
Classes to represent the definitions of aggregate functions.
"""
from django.core.exceptions import FieldError
from django.db.models.expressions import Case, Func, Star, When
from django.db.models.fields import IntegerField
from django.db.models.functions.mixins import (
FixDurationInputMixin,
NumericOutputFieldMixin,
)
__all__ = [
"Aggregate",
"Avg",
"Count",
"Max",
"Min",
"StdDev",
"Sum",
"Variance",
]
class Aggregate(Func):
template = "%(function)s(%(distinct)s%(expressions)s)"
contains_aggregate = True
name = None
filter_template = "%s FILTER (WHERE %%(filter)s)"
window_compatible = True
allow_distinct = False
def __init__(self, *expressions, distinct=False, filter=None, **extra):
if distinct and not self.allow_distinct:
raise TypeError("%s does not allow distinct." % self.__class__.__name__)
self.distinct = distinct
self.filter = filter
super().__init__(*expressions, **extra)
def get_source_fields(self):
# Don't return the filter expression since it's not a source field.
return [e._output_field_or_none for e in super().get_source_expressions()]
def get_source_expressions(self):
source_expressions = super().get_source_expressions()
if self.filter:
return source_expressions + [self.filter]
return source_expressions
def set_source_expressions(self, exprs):
self.filter = self.filter and exprs.pop()
return super().set_source_expressions(exprs)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# Aggregates are not allowed in UPDATE queries, so ignore for_save
c = super().resolve_expression(query, allow_joins, reuse, summarize)
c.filter = c.filter and c.filter.resolve_expression(
query, allow_joins, reuse, summarize
)
if not summarize:
# Call Aggregate.get_source_expressions() to avoid
# returning self.filter and including that in this loop.
expressions = super(Aggregate, c).get_source_expressions()
for index, expr in enumerate(expressions):
if expr.contains_aggregate:
before_resolved = self.get_source_expressions()[index]
name = (
before_resolved.name
if hasattr(before_resolved, "name")
else repr(before_resolved)
)
raise FieldError(
"Cannot compute %s('%s'): '%s' is an aggregate"
% (c.name, name, name)
)
return c
@property
def default_alias(self):
expressions = self.get_source_expressions()
if len(expressions) == 1 and hasattr(expressions[0], "name"):
return "%s__%s" % (expressions[0].name, self.name.lower())
raise TypeError("Complex expressions require an alias")
def get_group_by_cols(self, alias=None):
return []
def as_sql(self, compiler, connection, **extra_context):
extra_context["distinct"] = "DISTINCT " if self.distinct else ""
if self.filter:
if connection.features.supports_aggregate_filter_clause:
filter_sql, filter_params = self.filter.as_sql(compiler, connection)
template = self.filter_template % extra_context.get(
"template", self.template
)
sql, params = super().as_sql(
compiler,
connection,
template=template,
filter=filter_sql,
**extra_context
)
return sql, params + filter_params
else:
copy = self.copy()
copy.filter = None
source_expressions = copy.get_source_expressions()
condition = When(self.filter, then=source_expressions[0])
copy.set_source_expressions([Case(condition)] + source_expressions[1:])
return super(Aggregate, copy).as_sql(
compiler, connection, **extra_context
)
return super().as_sql(compiler, connection, **extra_context)
def _get_repr_options(self):
options = super()._get_repr_options()
if self.distinct:
options["distinct"] = self.distinct
if self.filter:
options["filter"] = self.filter
return options
class Avg(FixDurationInputMixin, NumericOutputFieldMixin, Aggregate):
function = "AVG"
name = "Avg"
allow_distinct = True
class Count(Aggregate):
function = "COUNT"
name = "Count"
output_field = IntegerField()
allow_distinct = True
def __init__(self, expression, filter=None, **extra):
if expression == "*":
expression = Star()
if isinstance(expression, Star) and filter is not None:
raise ValueError("Star cannot be used with filter. Please specify a field.")
super().__init__(expression, filter=filter, **extra)
def convert_value(self, value, expression, connection):
return 0 if value is None else value
class Max(Aggregate):
function = "MAX"
name = "Max"
class Min(Aggregate):
function = "MIN"
name = "Min"
class StdDev(NumericOutputFieldMixin, Aggregate):
name = "StdDev"
def __init__(self, expression, sample=False, **extra):
self.function = "STDDEV_SAMP" if sample else "STDDEV_POP"
super().__init__(expression, **extra)
def _get_repr_options(self):
return {**super()._get_repr_options(), "sample": self.function == "STDDEV_SAMP"}
class Sum(FixDurationInputMixin, Aggregate):
function = "SUM"
name = "Sum"
allow_distinct = True
class Variance(NumericOutputFieldMixin, Aggregate):
name = "Variance"
def __init__(self, expression, sample=False, **extra):
self.function = "VAR_SAMP" if sample else "VAR_POP"
super().__init__(expression, **extra)
def _get_repr_options(self):
return {**super()._get_repr_options(), "sample": self.function == "VAR_SAMP"}
| 34.255435 | 88 | 0.613993 |
b4d3979099395fe966205dcda418128a3713a677 | 428 | py | Python | test/cmd/unix/test_cmd_ip_neigh.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 57 | 2018-02-20T08:16:47.000Z | 2022-03-28T10:36:57.000Z | test/cmd/unix/test_cmd_ip_neigh.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 377 | 2018-07-19T11:56:27.000Z | 2021-07-09T13:08:12.000Z | test/cmd/unix/test_cmd_ip_neigh.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 24 | 2018-04-14T20:49:40.000Z | 2022-03-29T10:44:26.000Z | # -*- coding: utf-8 -*-
"""
Testing of ip_neigh command.
"""
__author__ = 'Sylwester Golonka'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = 'sylwester.golonka@nokia.com'
def test_ip_neigh_returns_proper_command_string(buffer_connection):
from moler.cmd.unix.ip_neigh import IpNeigh
cmd = IpNeigh(connection=buffer_connection.moler_connection, options="show")
assert "ip neigh show" == cmd.command_string
| 30.571429 | 80 | 0.754673 |
7efa4473b7dc5ad51a00e66193302278a49624c0 | 15,518 | py | Python | layers/modules/multibox_loss_gmm.py | kandula-ai/AL-MDN | 266fd9e67614282f34cbc8d2d9163bde1580f6d7 | [
"BSD-Source-Code"
] | 59 | 2021-10-13T22:59:19.000Z | 2022-03-26T20:44:47.000Z | layers/modules/multibox_loss_gmm.py | jwchoi384/AL-MDN | 54aaae4405c69b33998e5b5306c7c645780d473c | [
"BSD-Source-Code"
] | 10 | 2021-11-02T06:35:24.000Z | 2022-03-26T21:08:06.000Z | layers/modules/multibox_loss_gmm.py | jwchoi384/AL-MDN | 54aaae4405c69b33998e5b5306c7c645780d473c | [
"BSD-Source-Code"
] | 9 | 2021-10-14T09:48:05.000Z | 2022-03-27T06:04:32.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from data import coco as cfg
from ..box_utils import match, log_sum_exp
import math
def Gaussian(y, mu, var):
eps = 0.3
result = (y-mu)/var
result = (result**2)/2*(-1)
exp = torch.exp(result)
result = exp/(math.sqrt(2*math.pi))/(var + eps)
return result
def NLL_loss(bbox_gt, bbox_pred, bbox_var):
bbox_var = torch.sigmoid(bbox_var)
prob = Gaussian(bbox_gt, bbox_pred, bbox_var)
return prob
class MultiBoxLoss_GMM(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching,
bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,
use_gpu=True, cls_type='Type-1'):
super(MultiBoxLoss_GMM, self).__init__()
self.use_gpu = use_gpu
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = cfg['variance']
self.cls_type = cls_type
def forward(self, predictions, targets):
priors, loc_mu_1, loc_var_1, loc_pi_1, loc_mu_2, loc_var_2, loc_pi_2, \
loc_mu_3, loc_var_3, loc_pi_3, loc_mu_4, loc_var_4, loc_pi_4, \
conf_mu_1, conf_var_1, conf_pi_1, conf_mu_2, conf_var_2, conf_pi_2, \
conf_mu_3, conf_var_3, conf_pi_3, conf_mu_4, conf_var_4, conf_pi_4 = predictions
num = loc_mu_1.size(0)
priors = priors[:loc_mu_1.size(1), :]
num_priors = (priors.size(0))
num_classes = self.num_classes
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4)
conf_t = torch.LongTensor(num, num_priors)
for idx in range(num):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
match(self.threshold,
truths,
defaults,
self.variance,
labels,
loc_t,
conf_t,
idx)
if self.use_gpu:
loc_t = loc_t.cuda()
conf_t = conf_t.cuda()
# wrap targets
loc_t = Variable(loc_t, requires_grad=False)
conf_t = Variable(conf_t, requires_grad=False)
pos = conf_t > 0
num_pos = pos.sum(dim=1, keepdim=True)
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_mu_1)
loc_mu_1_ = loc_mu_1[pos_idx].view(-1, 4)
loc_mu_2_ = loc_mu_2[pos_idx].view(-1, 4)
loc_mu_3_ = loc_mu_3[pos_idx].view(-1, 4)
loc_mu_4_ = loc_mu_4[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
# localization loss
loss_l_1 = NLL_loss(loc_t, loc_mu_1_, loc_var_1[pos_idx].view(-1, 4))
loss_l_2 = NLL_loss(loc_t, loc_mu_2_, loc_var_2[pos_idx].view(-1, 4))
loss_l_3 = NLL_loss(loc_t, loc_mu_3_, loc_var_3[pos_idx].view(-1, 4))
loss_l_4 = NLL_loss(loc_t, loc_mu_4_, loc_var_4[pos_idx].view(-1, 4))
loc_pi_1_ = loc_pi_1[pos_idx].view(-1, 4)
loc_pi_2_ = loc_pi_2[pos_idx].view(-1, 4)
loc_pi_3_ = loc_pi_3[pos_idx].view(-1, 4)
loc_pi_4_ = loc_pi_4[pos_idx].view(-1, 4)
pi_all = torch.stack([
loc_pi_1_.reshape(-1),
loc_pi_2_.reshape(-1),
loc_pi_3_.reshape(-1),
loc_pi_4_.reshape(-1)
])
pi_all = pi_all.transpose(0,1)
pi_all = (torch.softmax(pi_all, dim=1)).transpose(0,1).reshape(-1)
(
loc_pi_1_,
loc_pi_2_,
loc_pi_3_,
loc_pi_4_
) = torch.split(pi_all, loc_pi_1_.reshape(-1).size(0), dim=0)
loc_pi_1_ = loc_pi_1_.view(-1, 4)
loc_pi_2_ = loc_pi_2_.view(-1, 4)
loc_pi_3_ = loc_pi_3_.view(-1, 4)
loc_pi_4_ = loc_pi_4_.view(-1, 4)
_loss_l = (
loc_pi_1_*loss_l_1 +
loc_pi_2_*loss_l_2 +
loc_pi_3_*loss_l_3 +
loc_pi_4_*loss_l_4
)
epsi = 10**-9
# balance parameter
balance = 2.0
loss_l = -torch.log(_loss_l + epsi)/balance
loss_l = loss_l.sum()
if self.cls_type == 'Type-1':
# Classification loss (Type-1)
conf_pi_1_ = conf_pi_1.view(-1, 1)
conf_pi_2_ = conf_pi_2.view(-1, 1)
conf_pi_3_ = conf_pi_3.view(-1, 1)
conf_pi_4_ = conf_pi_4.view(-1, 1)
conf_pi_all = torch.stack([
conf_pi_1_.reshape(-1),
conf_pi_2_.reshape(-1),
conf_pi_3_.reshape(-1),
conf_pi_4_.reshape(-1)
])
conf_pi_all = conf_pi_all.transpose(0,1)
conf_pi_all = (torch.softmax(conf_pi_all, dim=1)).transpose(0,1).reshape(-1)
(
conf_pi_1_,
conf_pi_2_,
conf_pi_3_,
conf_pi_4_
) = torch.split(conf_pi_all, conf_pi_1_.reshape(-1).size(0), dim=0)
conf_pi_1_ = conf_pi_1_.view(conf_pi_1.size(0), -1)
conf_pi_2_ = conf_pi_2_.view(conf_pi_2.size(0), -1)
conf_pi_3_ = conf_pi_3_.view(conf_pi_3.size(0), -1)
conf_pi_4_ = conf_pi_4_.view(conf_pi_4.size(0), -1)
conf_var_1 = torch.sigmoid(conf_var_1)
conf_var_2 = torch.sigmoid(conf_var_2)
conf_var_3 = torch.sigmoid(conf_var_3)
conf_var_4 = torch.sigmoid(conf_var_4)
rand_val_1 = torch.randn(conf_var_1.size(0), conf_var_1.size(1), conf_var_1.size(2))
rand_val_2 = torch.randn(conf_var_2.size(0), conf_var_2.size(1), conf_var_2.size(2))
rand_val_3 = torch.randn(conf_var_3.size(0), conf_var_3.size(1), conf_var_3.size(2))
rand_val_4 = torch.randn(conf_var_4.size(0), conf_var_4.size(1), conf_var_4.size(2))
batch_conf_1 = (conf_mu_1+torch.sqrt(conf_var_1)*rand_val_1).view(-1, self.num_classes)
batch_conf_2 = (conf_mu_2+torch.sqrt(conf_var_2)*rand_val_2).view(-1, self.num_classes)
batch_conf_3 = (conf_mu_3+torch.sqrt(conf_var_3)*rand_val_3).view(-1, self.num_classes)
batch_conf_4 = (conf_mu_4+torch.sqrt(conf_var_4)*rand_val_4).view(-1, self.num_classes)
loss_c_1 = log_sum_exp(batch_conf_1) - batch_conf_1.gather(1, conf_t.view(-1, 1))
loss_c_2 = log_sum_exp(batch_conf_2) - batch_conf_2.gather(1, conf_t.view(-1, 1))
loss_c_3 = log_sum_exp(batch_conf_3) - batch_conf_3.gather(1, conf_t.view(-1, 1))
loss_c_4 = log_sum_exp(batch_conf_4) - batch_conf_4.gather(1, conf_t.view(-1, 1))
loss_c = (
loss_c_1 * conf_pi_1_.view(-1, 1) +
loss_c_2 * conf_pi_2_.view(-1, 1) +
loss_c_3 * conf_pi_3_.view(-1, 1) +
loss_c_4 * conf_pi_4_.view(-1, 1)
)
loss_c = loss_c.view(pos.size()[0], pos.size()[1])
loss_c[pos] = 0 # filter out pos boxes for now : true -> zero
loss_c = loss_c.view(num, -1)
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_mu_1)
neg_idx = neg.unsqueeze(2).expand_as(conf_mu_1)
batch_conf_1_ = conf_mu_1+torch.sqrt(conf_var_1)*rand_val_1
batch_conf_2_ = conf_mu_2+torch.sqrt(conf_var_2)*rand_val_2
batch_conf_3_ = conf_mu_3+torch.sqrt(conf_var_3)*rand_val_3
batch_conf_4_ = conf_mu_4+torch.sqrt(conf_var_4)*rand_val_4
conf_pred_1 = batch_conf_1_[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
conf_pred_2 = batch_conf_2_[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
conf_pred_3 = batch_conf_3_[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
conf_pred_4 = batch_conf_4_[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
targets_weighted = conf_t[(pos+neg).gt(0)]
loss_c_1 = log_sum_exp(conf_pred_1) - conf_pred_1.gather(1, targets_weighted.view(-1, 1))
loss_c_2 = log_sum_exp(conf_pred_2) - conf_pred_2.gather(1, targets_weighted.view(-1, 1))
loss_c_3 = log_sum_exp(conf_pred_3) - conf_pred_3.gather(1, targets_weighted.view(-1, 1))
loss_c_4 = log_sum_exp(conf_pred_4) - conf_pred_4.gather(1, targets_weighted.view(-1, 1))
_conf_pi_1 = conf_pi_1_[(pos+neg).gt(0)]
_conf_pi_2 = conf_pi_2_[(pos+neg).gt(0)]
_conf_pi_3 = conf_pi_3_[(pos+neg).gt(0)]
_conf_pi_4 = conf_pi_4_[(pos+neg).gt(0)]
loss_c = (
loss_c_1 * _conf_pi_1.view(-1, 1) +
loss_c_2 * _conf_pi_2.view(-1, 1) +
loss_c_3 * _conf_pi_3.view(-1, 1) +
loss_c_4 * _conf_pi_4.view(-1, 1)
)
loss_c = loss_c.sum()
else:
# Classification loss (Type-2)
# more details are in our supplementary material
conf_pi_1_ = conf_pi_1.view(-1, 1)
conf_pi_2_ = conf_pi_2.view(-1, 1)
conf_pi_3_ = conf_pi_3.view(-1, 1)
conf_pi_4_ = conf_pi_4.view(-1, 1)
conf_pi_all = torch.stack([
conf_pi_1_.reshape(-1),
conf_pi_2_.reshape(-1),
conf_pi_3_.reshape(-1),
conf_pi_4_.reshape(-1)
])
conf_pi_all = conf_pi_all.transpose(0,1)
conf_pi_all = (torch.softmax(conf_pi_all, dim=1)).transpose(0,1).reshape(-1)
(
conf_pi_1_,
conf_pi_2_,
conf_pi_3_,
conf_pi_4_
) = torch.split(conf_pi_all, conf_pi_1_.reshape(-1).size(0), dim=0)
conf_pi_1_ = conf_pi_1_.view(conf_pi_1.size(0), -1)
conf_pi_2_ = conf_pi_2_.view(conf_pi_2.size(0), -1)
conf_pi_3_ = conf_pi_3_.view(conf_pi_3.size(0), -1)
conf_pi_4_ = conf_pi_4_.view(conf_pi_4.size(0), -1)
conf_var_1 = torch.sigmoid(conf_var_1)
conf_var_2 = torch.sigmoid(conf_var_2)
conf_var_3 = torch.sigmoid(conf_var_3)
conf_var_4 = torch.sigmoid(conf_var_4)
rand_val_1 = torch.randn(conf_var_1.size(0), conf_var_1.size(1), conf_var_1.size(2))
rand_val_2 = torch.randn(conf_var_2.size(0), conf_var_2.size(1), conf_var_2.size(2))
rand_val_3 = torch.randn(conf_var_3.size(0), conf_var_3.size(1), conf_var_3.size(2))
rand_val_4 = torch.randn(conf_var_4.size(0), conf_var_4.size(1), conf_var_4.size(2))
batch_conf_1 = (conf_mu_1+torch.sqrt(conf_var_1)*rand_val_1).view(-1, self.num_classes)
batch_conf_2 = (conf_mu_2+torch.sqrt(conf_var_2)*rand_val_2).view(-1, self.num_classes)
batch_conf_3 = (conf_mu_3+torch.sqrt(conf_var_3)*rand_val_3).view(-1, self.num_classes)
batch_conf_4 = (conf_mu_4+torch.sqrt(conf_var_4)*rand_val_4).view(-1, self.num_classes)
soft_max = nn.Softmax(dim=1)
epsi = 10**-9
weighted_softmax_out = (
soft_max(batch_conf_1)*conf_pi_1_.view(-1, 1) +
soft_max(batch_conf_2)*conf_pi_2_.view(-1, 1) +
soft_max(batch_conf_3)*conf_pi_3_.view(-1, 1) +
soft_max(batch_conf_4)*conf_pi_4_.view(-1, 1)
)
softmax_out_log = -torch.log(weighted_softmax_out+epsi)
loss_c = softmax_out_log.gather(1, conf_t.view(-1,1))
loss_c = loss_c.view(pos.size()[0], pos.size()[1])
loss_c[pos] = 0 # filter out pos boxes for now : true -> zero
loss_c = loss_c.view(num, -1)
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_mu_1)
neg_idx = neg.unsqueeze(2).expand_as(conf_mu_1)
batch_conf_1_ = conf_mu_1+torch.sqrt(conf_var_1)*rand_val_1
batch_conf_2_ = conf_mu_2+torch.sqrt(conf_var_2)*rand_val_2
batch_conf_3_ = conf_mu_3+torch.sqrt(conf_var_3)*rand_val_3
batch_conf_4_ = conf_mu_4+torch.sqrt(conf_var_4)*rand_val_4
conf_pred_1 = batch_conf_1_[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
conf_pred_2 = batch_conf_2_[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
conf_pred_3 = batch_conf_3_[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
conf_pred_4 = batch_conf_4_[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
targets_weighted = conf_t[(pos+neg).gt(0)]
_conf_pi_1 = conf_pi_1_[(pos+neg).gt(0)]
_conf_pi_2 = conf_pi_2_[(pos+neg).gt(0)]
_conf_pi_3 = conf_pi_3_[(pos+neg).gt(0)]
_conf_pi_4 = conf_pi_4_[(pos+neg).gt(0)]
weighted_softmax_out = (
soft_max(conf_pred_1)*_conf_pi_1.view(-1, 1) +
soft_max(conf_pred_2)*_conf_pi_2.view(-1, 1) +
soft_max(conf_pred_3)*_conf_pi_3.view(-1, 1) +
soft_max(conf_pred_4)*_conf_pi_4.view(-1, 1)
)
softmax_out_log = -torch.log(weighted_softmax_out+epsi)
loss_c = softmax_out_log.gather(1, targets_weighted.view(-1,1))
loss_c = loss_c.sum()
N = num_pos.data.sum()
loss_l /= N
loss_c /= N
return loss_l, loss_c
| 44.97971 | 101 | 0.593827 |
0ed3c0fd6622583d13c2e3997818d75ff6581a04 | 1,470 | py | Python | model-optimizer/extensions/front/mxnet/reshape_ext.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | 3 | 2020-02-09T23:25:37.000Z | 2021-01-19T09:44:12.000Z | model-optimizer/extensions/front/mxnet/reshape_ext.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | null | null | null | model-optimizer/extensions/front/mxnet/reshape_ext.py | zhoub/dldt | e42c01cf6e1d3aefa55e2c5df91f1054daddc575 | [
"Apache-2.0"
] | 2 | 2020-04-18T16:24:39.000Z | 2021-01-19T09:42:19.000Z | """
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.front.extractor import FrontExtractorOp
from mo.ops.reshape import Reshape
class ReshapeFrontExtractor(FrontExtractorOp):
op = 'Reshape'
enabled = True
@staticmethod
def extract(node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
dim = attrs.tuple("shape", int, None)
update_attrs = {
'dim': np.array(dim)
}
for d in dim:
if d in [-2, -3, -4]:
log.error('The attribute "shape" of the operation "{}" contains value "{}" which is not supported.'.
format(node.soft_get('name'), d))
return False
# update the attributes of the node
Reshape.update_node_stat(node, update_attrs)
return __class__.enabled
| 32.666667 | 116 | 0.681633 |
5d97d90efec6c45d927ea7c62b33951dfcc3ddb3 | 990 | py | Python | cohesity_management_sdk/models/remediation_state_update_infected_file_params_enum.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
] | 1 | 2021-01-07T20:36:22.000Z | 2021-01-07T20:36:22.000Z | cohesity_management_sdk/models/remediation_state_update_infected_file_params_enum.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
] | null | null | null | cohesity_management_sdk/models/remediation_state_update_infected_file_params_enum.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class RemediationStateUpdateInfectedFileParamsEnum(object):
"""Implementation of the 'RemediationState_UpdateInfectedFileParams' enum.
Specifies the remediation state of the file. Not setting any value to
remediation state will reset the infected file.
Remediation State.
'kQuarantine' indicates 'Quarantine' state of the file. This state blocks
the client access. The administrator will have to manually delete, rescan
or
unquarantine the file.
'kUnquarantine' indicates 'Unquarantine' state of the file.
The administrator has manually moved files from quarantined to the
unquarantined state to allow client access. Unquarantined files are
not scanned for virus until manually reset.
Attributes:
KQUARANTINE: TODO: type description here.
KUNQUARANTINE: TODO: type description here.
"""
KQUARANTINE = 'kQuarantine'
KUNQUARANTINE = 'kUnquarantine'
| 33 | 78 | 0.741414 |
1115b33a2d13fe1f6bc6677555a2e09a87beb75f | 166 | py | Python | tests/web_platform/css_flexbox_1/test_flexbox_flex_N_N_Npercent.py | fletchgraham/colosseum | 77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f | [
"BSD-3-Clause"
] | null | null | null | tests/web_platform/css_flexbox_1/test_flexbox_flex_N_N_Npercent.py | fletchgraham/colosseum | 77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f | [
"BSD-3-Clause"
] | null | null | null | tests/web_platform/css_flexbox_1/test_flexbox_flex_N_N_Npercent.py | fletchgraham/colosseum | 77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f | [
"BSD-3-Clause"
] | 1 | 2020-01-16T01:56:41.000Z | 2020-01-16T01:56:41.000Z | from tests.utils import W3CTestCase
class TestFlexbox_FlexNNNpercent(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_flex-N-N-Npercent'))
| 27.666667 | 80 | 0.807229 |
139b674d73bace07663e960f930ac1716e14d599 | 2,047 | py | Python | a4/model_embeddings.py | Folk19/CS224N | cf57a707256453b670371f0ae5000587b496ee5b | [
"MIT"
] | null | null | null | a4/model_embeddings.py | Folk19/CS224N | cf57a707256453b670371f0ae5000587b496ee5b | [
"MIT"
] | null | null | null | a4/model_embeddings.py | Folk19/CS224N | cf57a707256453b670371f0ae5000587b496ee5b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2018-19: Homework 4
model_embeddings.py: Embeddings for the NMT model
Pencheng Yin <pcyin@cs.cmu.edu>
Sahil Chopra <schopra8@stanford.edu>
Anand Dhoot <anandd@stanford.edu>
"""
import torch.nn as nn
class ModelEmbeddings(nn.Module):
"""
Class that converts input words to their embeddings.
"""
def __init__(self, embed_size, vocab):
"""
Init the Embedding layers.
@param embed_size (int): Embedding size (dimensionality)
@param vocab (Vocab): Vocabulary object containing src and tgt languages
See vocab.py for documentation.
"""
super(ModelEmbeddings, self).__init__()
self.embed_size = embed_size
# default values
self.source = None
self.target = None
src_pad_token_idx = vocab.src['<pad>']
tgt_pad_token_idx = vocab.tgt['<pad>']
### YOUR CODE HERE (~2 Lines)
### TODO - Initialize the following variables:
### self.source (Embedding Layer for source language)
### self.target (Embedding Layer for target langauge)
###
### Note:
### 1. `vocab` object contains two vocabularies:
### `vocab.src` for source
### `vocab.tgt` for target
### 2. You can get the length of a specific vocabulary by running:
### `len(vocab.<specific_vocabulary>)`
### 3. Remember to include the padding token for the specific vocabulary
### when creating your Embedding.
###
### Use the following docs to properly initialize these variables:
### Embedding Layer:
### https://pytorch.org/docs/stable/nn.html#torch.nn.Embedding
self.source = nn.Embedding(len(vocab.src), self.embed_size, padding_idx=src_pad_token_idx)
self.target = nn.Embedding(len(vocab.tgt), self.embed_size, padding_idx=tgt_pad_token_idx)
### END YOUR CODE
| 34.116667 | 98 | 0.602345 |
92f22263fb775cd2fe9e2f67a71b6e5643670ab0 | 12,903 | py | Python | src/models/inception_resnet_v2.py | KittenCN/pyFaceNet | 0804d06a3533a83ff865a3c4343cfca2a5cbe063 | [
"MIT"
] | 1 | 2020-04-27T22:52:14.000Z | 2020-04-27T22:52:14.000Z | src/models/inception_resnet_v2.py | KittenCN/pyFaceNet | 0804d06a3533a83ff865a3c4343cfca2a5cbe063 | [
"MIT"
] | null | null | null | src/models/inception_resnet_v2.py | KittenCN/pyFaceNet | 0804d06a3533a83ff865a3c4343cfca2a5cbe063 | [
"MIT"
] | null | null | null | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception Resnet V2 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
# Inception-Renset-A
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(3, [tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# Inception-Renset-B
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(3, [tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# Inception-Resnet-C
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(3, [tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def inference(images, keep_probability, phase_train=True, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
}
with slim.arg_scope([slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v2(images, is_training=phase_train,
dropout_keep_prob=keep_probability, reuse=reuse)
def inception_resnet_v2(inputs, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionResnetV2'):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_5a_3x3')
end_points['MaxPool_5a_3x3'] = net
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_0b_1x1')
net = tf.concat(3, [tower_conv, tower_conv1_1,
tower_conv2_2, tower_pool_1])
end_points['Mixed_5b'] = net
net = slim.repeat(net, 10, block35, scale=0.17)
# 17 x 17 x 1024
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(3, [tower_conv, tower_conv1_2, tower_pool])
end_points['Mixed_6a'] = net
net = slim.repeat(net, 20, block17, scale=0.10)
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(3, [tower_conv_1, tower_conv1_1,
tower_conv2_2, tower_pool])
end_points['Mixed_7a'] = net
net = slim.repeat(net, 9, block8, scale=0.20)
net = block8(net, activation_fn=None)
net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
end_points['Conv2d_7b_1x1'] = net
with tf.variable_scope('Logits'):
end_points['PrePool'] = net
#pylint: disable=no-member
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
return net, end_points
| 51.819277 | 91 | 0.530807 |
265482082b289cbcb80d4113b90a92af6c9de809 | 19,003 | py | Python | src/render/image.py | cfmcdonald-78/Hexcrawler | 79ca4ab9327abf08de1743612c23eb89aa53a2b9 | [
"MIT"
] | null | null | null | src/render/image.py | cfmcdonald-78/Hexcrawler | 79ca4ab9327abf08de1743612c23eb89aa53a2b9 | [
"MIT"
] | null | null | null | src/render/image.py | cfmcdonald-78/Hexcrawler | 79ca4ab9327abf08de1743612c23eb89aa53a2b9 | [
"MIT"
] | 1 | 2021-12-01T01:38:12.000Z | 2021-12-01T01:38:12.000Z | '''
Created on Jun 29, 2012
@author: Chris
'''
import pygame, os, random
import gamemap.terrain as terrain, gamemap.site_type as site_type, mob.unit as unit, mob.hero as hero, gamemap.hexgrid as hexgrid, mob.item as item
from util.tools import make_2D_list
import core.event_manager as event_manager
import text
MAX_FRAME = 2
TICKS_PER_FRAME = 10
NUM_HASHES = 256
UNIT_WIDTH = 64
UNIT_HEIGHT = 64
MAP_UNIT_WIDTH = 48
MAP_UNIT_HEIGHT = 48
ITEM_ICON_WIDTH = 48
ITEM_ICON_HEIGHT = 48
HEX_WIDTH = 54
HEX_HEIGHT = 64
UI_ICON_WIDTH = 32
UI_ICON_HEIGHT = 32
SM_BUTTON_WIDTH = 24
SM_BUTTON_HEIGHT = 24
CONTROL_BUTTON_WIDTH = 32
CONTROL_BUTTON_HEIGHT = 32
PLAYER_SUB_COLOR = (255, 0, 255)
NORMAL_WATER_COLOR1 = (45, 84, 153)
NORMAL_WATER_COLOR2 = (94, 105, 241)
FLOODED_WATER_COLOR1 = ( 114, 105, 96)
FLOODED_WATER_COLOR2 = (203, 187, 171)
HIDDEN_TRANSPARENCY = 128
class ImageManager(object):
def __init__(self, font_file):
self.unit_images = {}
self.unit_map_images = {}
for unit_type in unit.unit_types:
if "Hero" in unit_type.name:
continue
self.unit_images[unit_type.name] = self.prep_image("unit", unit_type.name + ".png")
self.unit_map_images[unit_type.name] = self.prep_image("unit", unit_type.name + ".png", (MAP_UNIT_WIDTH, MAP_UNIT_HEIGHT))
for sex in hero.sexes:
icon_name = sex + "Hero"
self.unit_images[icon_name] = self.prep_image("unit", icon_name + ".png")
self.unit_map_images[icon_name] = self.prep_image("unit", icon_name+ ".png", (MAP_UNIT_WIDTH, MAP_UNIT_HEIGHT))
self.boat = self.prep_image("unit", "boat.png", (MAP_UNIT_WIDTH, MAP_UNIT_HEIGHT))
self.site_images = {}
self.sacked_images = {}
for image_site_type in site_type.site_types.values():
self.site_images[image_site_type.name] = self.prep_image("map", image_site_type.name + ".png")
if image_site_type.loot_effects != None and (image_site_type.loot_effects.new_status == site_type.SACKED or
image_site_type.loot_effects.new_status == site_type.ACTIVE):
self.sacked_images[image_site_type.name] = self.prep_image("map", "sacked " + image_site_type.name + ".png")
# self.upgrade_images = {}
# for upgrade_name in site_upgrade.upgrades_by_name:
# self.upgrade_images[upgrade_name] = self.prep_image("map", upgrade_name + ".png")
#
self.hash_values = [random.randint(0, 2 ** 32) for i in range(NUM_HASHES)]
self.hex_images = {}
self.mini_hex_images = {}
for terrain_type in terrain.hex_types:
hex_images = []
i = 0
next_img_file = terrain_type.name + str(i) + ".png"
while (self.image_exists("map", next_img_file)):
hex_images.append(self.prep_image("map",next_img_file))
i += 1
next_img_file = terrain_type.name + str(i) + ".png"
self.hex_images[terrain_type.name] = hex_images
self.mini_hex_images[terrain_type.name] = self.prep_image("map", terrain_type.name + "_mini.png")
self.road = self.prep_image("map", "road.png")
self.river = {}
for angle in range(0, 4):
self.river[angle] = self.prep_image("map", "river" + str(angle) + ".png")
self.river[(2,1)] = self.prep_image("map", "river21.png")
self.river[(3,1)] = pygame.transform.flip(self.river[(2,1)], False, True) # self.prep_image("map", "river21.png")
self.river[(2,2)] = self.prep_image("map", "river22.png")
self.river_frag = self.prep_image("map", "river frag.png")
self.zone_border = self.prep_image("map", "zone_border.png")
self.storm = self.prep_animation("map", "storm")
self.fire = self.prep_animation("map", "fire")
self.selected_hex = self.prep_image("ui", "selected_hex.png")
self.legal_move = self.prep_image("ui", "legal_move.png")
self.attack_move = self.prep_image("ui", "attack_move.png")
self.selected_unit = self.prep_image("ui", "selected_unit.png")
self.selected_item = self.prep_image("ui", "selected_item.png")
self.dead_image = self.prep_image("ui", "dead.png")
self.unit_slot = self.prep_image("ui", "unit_slot.png")
self.combat_images = {}
self.wounded_image = self.prep_image("ui", "wounded.png")
self.restrained_image = self.prep_image("ui", "restrained.png")
self.burning_image = self.prep_image("ui", "burning.png")
self.combat_images[event_manager.UNIT_HIT] = self.prep_image("ui", "hit.png")
self.combat_images[event_manager.UNIT_BLOCK] = self.prep_image("ui", "block.png")
self.combat_images[event_manager.RANGED_ATTACK] = self.prep_image("ui", "ranged.png")
self.combat_images[event_manager.HEAL_ATTEMPT] = self.prep_image("ui", "magic.png")
#self.combat_images[combat.HEALED] = self.prep_image("heal.png")
self.combat_images[event_manager.UNIT_HEAL] = self.prep_image("ui", "heal.png")
self.equip_slots = {}
self.item_icons = {}
for item_type in item.item_types:
self.equip_slots[item_type] = self.prep_image("item", item_type + "_slot.png")
for item_subtype in item.item_subtypes:
self.item_icons[item_subtype] = self.prep_image("item", item_subtype + ".png")
self.backpack_slot = self.prep_image("item", "backpack_slot.png")
self.banner = self.prep_image("ui", "banner.png")
self.fogged_hex = self.prep_image("ui", "fogged_hex.png")
self.invisible_hex = self.prep_image("ui", "invisible_hex.png")
self.mini_invisible_hex = self.prep_image("ui", "invisible_hex_mini.png")
self.l_arrow = self.prep_image("ui", "l_arrow.png")
self.r_arrow = self.prep_image("ui", "r_arrow.png")
self.transfer = self.prep_image("ui", "transfer.png")
self.disband = self.prep_image("ui", "disband.png")
self.prev_site = self.prep_image("ui", "prev_site.png")
self.next_site = self.prep_image("ui", "next_site.png")
self.minimap = self.prep_image("ui", "minimap.png")
self.center_view = self.prep_image("ui", "center_view.png")
self.end_turn = self.prep_image("ui", "end_turn.png")
self.tools = self.prep_image("ui", "tools.png")
self.gold = self.prep_image("ui", "gold.png")
self.gold_small = self.prep_image("ui", "gold_24.png")
self.chains = self.prep_image("ui", "chains.png")
self.reputation = self.prep_image("ui", "reputation.png")
self.fame = self.prep_image("ui", "fame.png")
self.income = self.prep_image("ui", "income.png")
self.revolt = self.prep_image("ui", "revolt.png")
self.embassy = self.prep_image("ui", "embassy.png")
self.exhaustion = self.prep_image("ui", "exhaustion.png")
self.supply = self.prep_image("ui", "supply.png")
self.blood = self.prep_image("ui", "blood.png")
self.inspire = self.prep_image("ui", "inspire.png")
self.foot_move = self.prep_image("ui", "foot_move.png")
self.naval_move = self.prep_image("ui", "naval_move.png")
self.strength = self.prep_image("ui", "strength.png")
self.armor = self.prep_image("ui", "armor.png")
self.looting = self.prep_image("ui", "looting.png")
self.health = self.prep_image("ui", "health.png")
self.window_9patch = self.prep_9patch("ui", "window_9patch.png")
self.region_9patch = self.prep_9patch("ui", "region_9patch.png")
self.button_9patch = self.prep_9patch("ui", "button_9patch.png")
self.button_down_9patch = self.prep_9patch("ui", "button_down_9patch.png")
self.fight_line = self.prep_image("ui", "fight_line.png")
self.shoot_line = self.prep_image("ui", "shoot_line.png")
#self.icon = self.prep_image("icon.png")
self.text = text.TextDrawer(font_file)
self.PAD = 2
self.temp_surfaces = {}
self.temp_surfaces[(MAP_UNIT_WIDTH, MAP_UNIT_HEIGHT)] = pygame.Surface((MAP_UNIT_WIDTH, MAP_UNIT_HEIGHT)).convert()
# self.initialize()
def blit_alpha(self, target, source, location, opacity):
x, y = location
temp = self.temp_surfaces[(source.get_width(), source.get_height())]
temp.blit(target, (-x, -y))
temp.blit(source, (0, 0))
temp.set_alpha(opacity)
target.blit(temp, location)
def initialize(self):
event_manager.add_listener(self.handle_game_events, event_manager.TICK)
self.frame_num = 0
self.frame_inc = 1
self.tick_count = 0
# self.hash_index = 0
def inc_hash_index(self):
self.hash_index = (self.hash_index + 1) % NUM_HASHES
def handle_game_events(self, event):
if event.type == event_manager.TICK:
if self.tick_count < TICKS_PER_FRAME:
self.tick_count += 1
return
self.tick_count = 0
if self.frame_num == MAX_FRAME:
self.frame_inc = -1
elif self.frame_num == 0:
self.frame_inc = 1
self.frame_num += self.frame_inc
def draw_window(self, surface, rect):
self.draw_9patch(surface, self.window_9patch, rect)
def draw_button(self, surface, rect, down):
if down:
self.draw_9patch(surface, self.button_down_9patch, rect)
else:
self.draw_9patch(surface, self.button_9patch, rect)
def draw_textbox(self, surface, rect, text_value, has_focus):
# self.draw_9patch(surface, self.textbox_9patch, rect)
if has_focus:
surface.fill((230, 230, 230), rect = rect)
else:
surface.fill((170, 170, 170), rect = rect)
self.text.draw_text(text_value, text.lg_font, rect.x + 4, rect.y + 4, surface, centered=False)
def draw_9patch(self, surface, (patch_size, nine_patch), rect):
x_scale, y_scale = False, False
if rect.width > patch_size * 3:
x_scale = True
if rect.height > patch_size * 3 :
y_scale = True
# raise ValueError("can't shrink 9 patch image")
x_pos = [rect.x, rect.x + patch_size,
rect.x + rect.width - patch_size, rect.x + rect.width]
y_pos = [rect.y, rect.y + patch_size,
rect.y + rect.height - patch_size, rect.y + rect.height]
for y in range(3):
if rect.height <= patch_size * 2 and y == 1:
continue
for x in range(3):
scaled = pygame.transform.scale(nine_patch[x][y],
((x_pos[x + 1] - x_pos[x]) if x_scale else patch_size,
(y_pos[y + 1] - y_pos[y]) if y_scale else patch_size))
surface.blit(scaled, (x_pos[x], y_pos[y]))
def draw_site(self, site, surface, mask_player, position):
if site.sacked():
site_image = self.sacked_images[site.site_type.name]
surface.blit(site_image, position)
else:
site_image = self.site_images[site.site_type.name]
site_pixels = pygame.PixelArray(site_image)
# swap in owner color for flags and the like on the site, to indicate ownership
owner_color = site.owner.get_color(site, mask_player)
site_pixels.replace(PLAYER_SUB_COLOR, owner_color)
del site_pixels # unlock image so pygame can draw it
surface.blit(site_image, position)
# swap color back
site_pixels = pygame.PixelArray(site_image)
site_pixels.replace(owner_color, PLAYER_SUB_COLOR)
del site_pixels
# if site.get_fixed_prisoner() != None:
# x, y = position
# surface.blit(self.chains, (x + 8, y + 8))
if site.get_embassy() != None:
x, y = position
embassy_pixels = pygame.PixelArray(self.embassy)
owner_color = site.get_embassy().get_color(site, mask_player)
embassy_pixels.replace(PLAYER_SUB_COLOR, owner_color)
del embassy_pixels # unlock image so pygame can draw it
surface.blit(self.embassy, (x + 8, y + 8))
# swap color back
embassy_pixels = pygame.PixelArray(self.embassy)
embassy_pixels.replace(owner_color, PLAYER_SUB_COLOR)
# draw upgrade images
# for upgrade_name in site.get_upgrades():
# surface.blit(self.upgrade_images[upgrade_name], position)
def pos_from_dir(self, x, y, direction):
if direction == hexgrid.WEST:
dx, dy = 6, 32
if direction == hexgrid.EAST:
dx, dy = 58, 32
if direction == hexgrid.NORTHEAST:
dx, dy = 46, 8
if direction == hexgrid.NORTHWEST:
dx, dy = 18, 8
if direction == hexgrid.SOUTHEAST:
dx, dy = 46, 56
if direction == hexgrid.SOUTHWEST:
dx, dy = 18, 56
return (x + dx , y + dy)
def draw_zone_border(self, surface, pixel_x, pixel_y, direction):
angle = direction * -60
if angle % 90 != 0:
pixel_x -= 12
pixel_y -= 13
image = pygame.transform.rotate(self.zone_border, angle)
surface.blit(image, (pixel_x, pixel_y))
def draw_road(self, surface, road, pixel_x, pixel_y):
for direction in road.connections:
angle = -60 * (direction - hexgrid.WEST)
image = pygame.transform.rotate(self.road, angle)
if angle % 90 != 0:
surface.blit(image, (pixel_x - 12, pixel_y - 12))
else:
surface.blit(image, (pixel_x, pixel_y))
def draw_river_image(self, surface, image, angle, pixel_x, pixel_y, flooded):
if angle % 90 != 0:
pixel_x -= 12
pixel_y -= 12
image = pygame.transform.rotate(image, angle)
if flooded:
river_pixels = pygame.PixelArray(image)
# swap in owner color for flags and the like on the site, to indicate ownership
# owner_color = site.owner.get_color(site.level, mask_player)
river_pixels.replace(NORMAL_WATER_COLOR1, FLOODED_WATER_COLOR1)
river_pixels.replace(NORMAL_WATER_COLOR2, FLOODED_WATER_COLOR2)
del river_pixels # unlock image so pygame can draw it
surface.blit(image, (pixel_x, pixel_y))
def draw_river(self, surface, river, pixel_x, pixel_y):
in_flows, out_flow = river.in_flows, river.out_flow
image = None
if len(in_flows) == 0:
image = self.river[0]
angle = -60 * out_flow
elif len(in_flows) == 1:
abs_angle = hexgrid.get_abs_angle(in_flows[0], out_flow)
image = self.river[abs_angle]
if abs(in_flows[0] - out_flow) > abs_angle:
angle = -60 * max(in_flows[0], out_flow)
else:
angle = -60 * min(in_flows[0], out_flow)
elif len(in_flows) == 2:
angle_ins = hexgrid.get_clock_angle(in_flows[0], in_flows[1])# hexgrid.get_abs_angle(in_flows[0], in_flows[1])
angle_out = hexgrid.get_clock_angle(out_flow, in_flows[0]) #hexgrid.get_abs_angle(in_flows[0], out_flow)
image = self.river.get((angle_out, angle_ins), None)
angle = -60 * (out_flow - hexgrid.WEST)
if image == None: # patch together something that looks okay
for in_flow in in_flows:
angle = -60 * (in_flow - hexgrid.WEST)
self.draw_river_image(surface, self.river_frag, angle, pixel_x, pixel_y, river.is_flooded)
angle = -60 * (out_flow - hexgrid.WEST)
self.draw_river_image(surface, self.river_frag, angle, pixel_x, pixel_y, river.is_flooded)
return
self.draw_river_image(surface, image, angle, pixel_x, pixel_y, river.is_flooded)
def prep_animation(self, sub_dir, base_file_name):
animation = []
for i in range(MAX_FRAME + 1):
animation.append(self.prep_image(sub_dir, base_file_name + str(i) + ".png"))
return animation
def prep_9patch(self, sub_dir, image_file_name):
nine_patch = make_2D_list(3, 3, None)
base_image = self.prep_image(sub_dir, image_file_name)
patch_size = base_image.get_width() / 3
assert(base_image.get_width() == base_image.get_height())
for y in range(3):
for x in range(3):
nine_patch[x][y] = base_image.subsurface((x * patch_size, y * patch_size,
patch_size, patch_size)).copy()
return patch_size, nine_patch
def image_exists(self, sub_dir, image_file_name):
return os.path.isfile( os.path.join('data', 'img', sub_dir, image_file_name))
def prep_image(self, sub_dir, image_file_name, rescale = None):
try:
# TODO: replace direct file access with packaged resources (setup.py)
image = pygame.image.load(os.path.join('data', 'img', sub_dir, image_file_name))
if rescale != None:
image = pygame.transform.smoothscale(image, rescale)
except pygame.error, message:
print 'Cannot load image:', image_file_name
raise SystemExit, message
return image.convert_alpha()
def draw_animation(self, animation, pixel_x, pixel_y, surface):
surface.blit(animation[self.frame_num], (pixel_x, pixel_y))
def unit_image(self, unit, on_map = False):
if on_map:
return self.unit_map_images[unit.get_icon_name()]
else:
return self.unit_images[unit.get_icon_name()]
def hex_image(self, terrain, hex_index, mini=False):
if mini:
return self.mini_hex_images[terrain.name]
else:
# self.inc_hash_index()
num_images = len(self.hex_images[terrain.name])
# print "x: " + str(hex_x) + " y: " + str(hex_y) + "index: " + str(self.hash_index)
#print((self.hash_values[self.hash_index]) % num_images)
return self.hex_images[terrain.name][(self.hash_values[hex_index % NUM_HASHES]) % num_images]
| 44.92435 | 147 | 0.596748 |
a9309b62dac0b3e5bf1b3b1778a5fde169960cd8 | 3,698 | py | Python | feature_selection/word_inspect.py | sjayakum/csci544 | c452ed518fda0909836107668428791be90b82b4 | [
"Apache-2.0"
] | 1 | 2019-02-02T17:50:51.000Z | 2019-02-02T17:50:51.000Z | feature_selection/word_inspect.py | sjayakum/csci544 | c452ed518fda0909836107668428791be90b82b4 | [
"Apache-2.0"
] | null | null | null | feature_selection/word_inspect.py | sjayakum/csci544 | c452ed518fda0909836107668428791be90b82b4 | [
"Apache-2.0"
] | null | null | null | import re
import string
feature_list = {}
def tokenize(sentence):
sentence = ' '.join(sentence)
sentence = sentence.lower().replace('.',' ').replace(',',' ').replace('&',' ').replace('/',' ').replace('-','')
sentence = sentence.split(' ')
return_list = []
for each_word in sentence:
if each_word not in stop_words_dict:
temp = each_word.rstrip('\'\"-,.:;!?()*<>+@ ').lstrip('\'\"-,.:;!?()*<>+@ ').strip('\n').strip('!')
temp = re.sub(r'\d', '', temp)
temp = temp.rstrip('\'\"-,.:;!?()*<>+@ ').lstrip('\'\"-,.:;!?()*<>+@ ').strip('\n').strip('!')
if len(temp)>1:
return_list.append(temp)
return return_list
stop_words_dict = {'during': 0, 'has': 0, "it's": 0, 'very': 0, 'itself': 0, "why's": 0, "we'll": 0, 'hers': 0,
"isn't": 0, 'off': 0, 'we': 0, 'it': 0, 'the': 0, 'doing': 0, 'over': 0, 'its': 0, 'with': 0,
'so': 0, 'but': 0, 'they': 0, 'am': 0, 'until': 0, 'because': 0, "shouldn't": 0, "you're": 0,
'is': 0, "they're": 0, "you'd": 0, "mustn't": 0, 'would': 0, 'while': 0, 'should': 0, 'as': 0,
"i'd": 0, "we've": 0, 'when': 0, "wouldn't": 0, 'why': 0, "i'll": 0, 'theirs': 0, "aren't": 0,
'our': 0, 'from': 0, "we'd": 0, 'each': 0, 'only': 0, 'yourself': 0, 'been': 0, 'again': 0, 'of': 0,
'whom': 0, 'themselves': 0, 'or': 0, 'that': 0, 'me': 0, "how's": 0, 'those': 0, 'having': 0,
'was': 0, 'and': 0, 'few': 0, 'no': 0, 'any': 0, 'being': 0, 'an': 0, "let's": 0, "they'd": 0,
'own': 0, 'his': 0, 'herself': 0, 'before': 0, 'did': 0, 'too': 0, 'here': 0, 'were': 0, "that's": 0,
"what's": 0, "she'll": 0, 'i': 0, 'all': 0, 'have': 0, "weren't": 0, "you've": 0, "i'm": 0,
"he'd": 0, 'some': 0, 'into': 0, 'down': 0, 'this': 0, "she'd": 0, "i've": 0, 'do': 0, "can't": 0,
'for': 0, 'below': 0, 'through': 0, "don't": 0, 'more': 0, 'once': 0, "didn't": 0, 'same': 0,
"she's": 0, "they've": 0, "he'll": 0, 'not': 0, 'had': 0, 'such': 0, 'cannot': 0, 'about': 0,
'myself': 0, 'if': 0, "won't": 0, 'a': 0, 'how': 0, 'she': 0, 'you': 0, "we're": 0, "there's": 0,
'be': 0, 'yours': 0, "here's": 0, 'above': 0, 'at': 0, 'out': 0, 'does': 0, 'my': 0, 'to': 0,
'ought': 0, "hadn't": 0, "doesn't": 0, "couldn't": 0, 'he': 0, 'your': 0, 'ours': 0, 'up': 0,
'after': 0, "where's": 0, 'could': 0, 'under': 0, 'nor': 0, 'against': 0, 'further': 0, "they'll": 0,
'what': 0, 'then': 0, "you'll": 0, 'ourselves': 0, 'which': 0, 'between': 0, "shan't": 0, 'these': 0,
'in': 0, 'their': 0, "who's": 0, "he's": 0, 'yourselves': 0, 'himself': 0, 'both': 0, "wasn't": 0,
'him': 0, 'on': 0, 'them': 0, "when's": 0, 'there': 0, 'where': 0, 'than': 0, 'are': 0, 'her': 0,
"hasn't": 0, 'by': 0, 'other': 0, 'who': 0, "haven't": 0, 'most': 0}
f = open('train-text.txt', 'r')
index = 0
key_to_index = {}
documents_full = []
for each_line in f:
temp = each_line.split(' ')
#temp[0] is the key
#temp[1:] is the document space separated
key_to_index[temp[0]] = index
index += 1
documents_full.append(list(temp[1:]))
vocab_train = []
for each_sentence in documents_full:
temp = tokenize(each_sentence)
for each_word in temp:
vocab_train.append(each_word)
vocab_train = set(vocab_train)
f.close()
f = open('vocabulary','w')
for each_vocab in vocab_train:
f.write(each_vocab + '\n' )
| 46.225 | 120 | 0.445646 |
c04f5c0d27e4c5360c0fe9422b7d389d212a9c3c | 4,646 | py | Python | zipline/examples/olmar.py | npezolano/zipline | 71effa5e98bd0425ac1863e1861c9b51fbc77242 | [
"Apache-2.0"
] | 1 | 2016-03-16T12:54:07.000Z | 2016-03-16T12:54:07.000Z | zipline/examples/olmar.py | Miles0918/zipline | e7a5e097c419bed7816d3cd6c370b5171db37b33 | [
"Apache-2.0"
] | null | null | null | zipline/examples/olmar.py | Miles0918/zipline | e7a5e097c419bed7816d3cd6c370b5171db37b33 | [
"Apache-2.0"
] | null | null | null | import sys
import logbook
import numpy as np
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
from zipline.finance import commission
zipline_logging = logbook.NestedSetup([
logbook.NullHandler(level=logbook.DEBUG, bubble=True),
logbook.StreamHandler(sys.stdout, level=logbook.INFO),
logbook.StreamHandler(sys.stderr, level=logbook.ERROR),
])
zipline_logging.push_application()
STOCKS = ['AMD', 'CERN', 'COST', 'DELL', 'GPS', 'INTC', 'MMM']
# On-Line Portfolio Moving Average Reversion
# More info can be found in the corresponding paper:
# http://icml.cc/2012/papers/168.pdf
def initialize(algo, eps=1, window_length=5):
algo.stocks = STOCKS
algo.m = len(algo.stocks)
algo.price = {}
algo.b_t = np.ones(algo.m) / algo.m
algo.last_desired_port = np.ones(algo.m) / algo.m
algo.eps = eps
algo.init = True
algo.days = 0
algo.window_length = window_length
algo.add_transform('mavg', 5)
algo.set_commission(commission.PerShare(cost=0))
def handle_data(algo, data):
algo.days += 1
if algo.days < algo.window_length:
return
if algo.init:
rebalance_portfolio(algo, data, algo.b_t)
algo.init = False
return
m = algo.m
x_tilde = np.zeros(m)
b = np.zeros(m)
# find relative moving average price for each security
for i, stock in enumerate(algo.stocks):
price = data[stock].price
# Relative mean deviation
x_tilde[i] = data[stock].mavg(algo.window_length) / price
###########################
# Inside of OLMAR (algo 2)
x_bar = x_tilde.mean()
# market relative deviation
mark_rel_dev = x_tilde - x_bar
# Expected return with current portfolio
exp_return = np.dot(algo.b_t, x_tilde)
weight = algo.eps - exp_return
variability = (np.linalg.norm(mark_rel_dev)) ** 2
# test for divide-by-zero case
if variability == 0.0:
step_size = 0
else:
step_size = max(0, weight / variability)
b = algo.b_t + step_size * mark_rel_dev
b_norm = simplex_projection(b)
np.testing.assert_almost_equal(b_norm.sum(), 1)
rebalance_portfolio(algo, data, b_norm)
# update portfolio
algo.b_t = b_norm
def rebalance_portfolio(algo, data, desired_port):
# rebalance portfolio
desired_amount = np.zeros_like(desired_port)
current_amount = np.zeros_like(desired_port)
prices = np.zeros_like(desired_port)
if algo.init:
positions_value = algo.portfolio.starting_cash
else:
positions_value = algo.portfolio.positions_value + \
algo.portfolio.cash
for i, stock in enumerate(algo.stocks):
current_amount[i] = algo.portfolio.positions[stock].amount
prices[i] = data[stock].price
desired_amount = np.round(desired_port * positions_value / prices)
algo.last_desired_port = desired_port
diff_amount = desired_amount - current_amount
for i, stock in enumerate(algo.stocks):
algo.order(stock, diff_amount[i])
def simplex_projection(v, b=1):
"""Projection vectors to the simplex domain
Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0
Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w
:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print(proj)
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print(proj.sum())
1.0
Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu)
Python-port: Copyright 2013 by Thomas Wiecki (thomas.wiecki@gmail.com).
"""
v = np.asarray(v)
p = len(v)
# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)
rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho + 1)])
w = (v - theta)
w[w < 0] = 0
return w
if __name__ == '__main__':
import pylab as pl
start = datetime(2004, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2008, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=STOCKS, indexes={}, start=start, end=end)
data = data.dropna()
olmar = TradingAlgorithm(handle_data=handle_data, initialize=initialize)
results = olmar.run(data)
results.portfolio_value.plot()
pl.show()
| 28.857143 | 76 | 0.654757 |
b90645e4eab385c39fe96ea1122ae32c57152a95 | 1,624 | py | Python | flask-app/sandbox/tokens.py | andreaskring/flask-keycloak-angular | c48070322c277ca42cd9deffc54636a555cbf9b8 | [
"Apache-2.0"
] | null | null | null | flask-app/sandbox/tokens.py | andreaskring/flask-keycloak-angular | c48070322c277ca42cd9deffc54636a555cbf9b8 | [
"Apache-2.0"
] | null | null | null | flask-app/sandbox/tokens.py | andreaskring/flask-keycloak-angular | c48070322c277ca42cd9deffc54636a555cbf9b8 | [
"Apache-2.0"
] | null | null | null | import json
import jwt
pub_key = b'-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoDv7GAhqyhSj4q9VnDfCUWTMxOhSHP+AtMaYrO7ByI3dua/DQZapib6qu+CnnR1K1uPHiPeHZ9O1Kkn8RV2m7D72C/tWCm1KDdRX14pbBYR38AR0T5VbYcxblyIPH58okKElWTAWFmGjidY6cXSlO3aEVZMjGB3p2at5o/QfNmCUfxpHivyOnW+b8yLs/h/vXV7EKg4JOLgZ9GV2EiQAZiwJ4pr1a3ttSCjkOVI+F7Gy/yeGeE0VpCDSAcbYuIWSGxB8fFqc58e4xKyLepCdDLFDVHAuIpSXwAwJkqBrwgS4uwNEvQT+INFzZhLEbBsTFKe4wlX3EnyZRMN0D/18rwIDAQAB\n-----END PUBLIC KEY-----'
token = 'eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICIzOWd4czh4MmRaYXhPVzdJMFFKRDBENUkzU0kxamw4U0NYcUNHcktXOG80In0.eyJleHAiOjE1OTYzODQyNTcsImlhdCI6MTU5NjM4MDY1NywianRpIjoiMWFlMmI0ZmItOTIyNi00OTc3LWIwYzMtZjRhMzhiMWNiMDNjIiwiaXNzIjoiaHR0cDovL2xvY2FsaG9zdDo4MDgwL2F1dGgvcmVhbG1zL2ZsYXNrLWRlbW8iLCJzdWIiOiJjYWE5NTBiNi0wNjYyLTRhNzUtYmM5ZS02OTE5MzViMjE2YTciLCJ0eXAiOiJCZWFyZXIiLCJhenAiOiJmbGFzayIsInNlc3Npb25fc3RhdGUiOiJjYzczZGI4ZS00MDg3LTQ5MmItODJlMS1jOGFhN2E4YWE0ZTciLCJhY3IiOiIxIiwic2NvcGUiOiJlbWFpbCBwcm9maWxlIiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJuYW1lIjoiQnJ1Y2UgTGVlIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYnJ1Y2UiLCJnaXZlbl9uYW1lIjoiQnJ1Y2UiLCJmYW1pbHlfbmFtZSI6IkxlZSIsImVtYWlsIjoiYnJ1Y2VAa3VuZy5mdSJ9.N2ujTH1oc1-7xYhLicXEDhB-CguIFIkJhv0PBa_xfKFsFhe68PIlzJNaH0WDE_ddv4nO5aTKcJ083kivDXNx4hCUJKlT7lB3KEwPtQGK3mmde3VYsQjsmxq5JifXIkCmMq1r4iybRRsHWgaay4cOM-vSxtdP46npxLXYsDW1qTFcCw1hybIjgJLJSKR4D_ocnK61oIdNQGovEoFN2_5ph2fnqW3cEP4WaHjQNA5p5DTwMJr5AkSWpl1Bgx5MQgsI1-ItuRg6ld35R2mEB5U7WroguVRqwEDfFncGBkzv1NN-cq4cu48V1COl_XDFy3Y1fRNQIPRwSjF-cVKuaL-hnw'
decoded = jwt.decode(token, pub_key, algorithms=['RS256'])
print(json.dumps(decoded, indent=2)) | 162.4 | 1,041 | 0.943966 |
6adb234e8432e574e78981751d1f855656308a42 | 1,825 | py | Python | airflow/contrib/sensors/gcp_transfer_sensor.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | airflow/contrib/sensors/gcp_transfer_sensor.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | airflow/contrib/sensors/gcp_transfer_sensor.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated.
Please use `airflow.providers.google.cloud.sensors.cloud_storage_transfer_service`.
"""
import warnings
from airflow.providers.google.cloud.sensors.cloud_storage_transfer_service import (
CloudDataTransferServiceJobStatusSensor,
)
warnings.warn(
"This module is deprecated. "
"Please use `airflow.providers.google.cloud.sensors.cloud_storage_transfer_service`.",
DeprecationWarning,
stacklevel=2,
)
class GCPTransferServiceWaitForJobStatusSensor(CloudDataTransferServiceJobStatusSensor):
"""This class is deprecated.
Please use `airflow.providers.google.cloud.sensors.transfer.CloudDataTransferServiceJobStatusSensor`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.google.cloud.sensors.transfer.CloudDataTransferServiceJobStatusSensor`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
| 35.096154 | 106 | 0.745205 |
2e8a57e55f8f2e4f109f308cf0d4d90170c20796 | 510 | py | Python | tests/r/test_armada.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | tests/r/test_armada.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | tests/r/test_armada.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.armada import armada
def test_armada():
"""Test module armada.py by downloading
armada.csv and testing shape of
extracted data has 10 rows and 11 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = armada(test_path)
try:
assert x_train.shape == (10, 11)
except:
shutil.rmtree(test_path)
raise()
| 21.25 | 44 | 0.75098 |
36b2b824214f517de9fb406c70a13f9b61368d1d | 522 | py | Python | co2.py | lionyhw/PlanetX_MicroPython | 0bcbb637be9971260c32846acec3ecbd60df647c | [
"MIT"
] | 2 | 2020-08-06T07:32:57.000Z | 2022-02-11T02:37:21.000Z | co2.py | lionyhw/PlanetX_MicroPython | 0bcbb637be9971260c32846acec3ecbd60df647c | [
"MIT"
] | null | null | null | co2.py | lionyhw/PlanetX_MicroPython | 0bcbb637be9971260c32846acec3ecbd60df647c | [
"MIT"
] | 1 | 2021-09-11T02:34:39.000Z | 2021-09-11T02:34:39.000Z | from microbit import *
from enum import *
class CO2(object):
"""基本描述
二氧化碳传感器
Args:
RJ_pin (pin): 连接端口
Returns:
value: 二氧化碳含量
"""
def __init__(self, RJ_pin):
if RJ_pin == J1:
self.__pin = pin1
elif RJ_pin == J2:
self.__pin = pin2
def get_co2(self):
"""基本描述
获取二氧化碳值
"""
return 1024 - self.__pin.read_analog()
if __name__ == '__main__':
co2 = CO2()
while True:
print(co2.get_co2())
| 14.5 | 46 | 0.505747 |
cf703588bd5e4260f1b093f292ace66464b9b42f | 469 | py | Python | packages/python/plotly/plotly/validators/mesh3d/_color.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/mesh3d/_color.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/mesh3d/_color.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="mesh3d", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
colorscale_path=kwargs.pop("colorscale_path", "mesh3d.colorscale"),
**kwargs,
)
| 36.076923 | 79 | 0.66951 |
c210f0b57e5ef5120e05d8f5b1dfb708cf5633cb | 7,652 | py | Python | python/S3_DEFAULT_ENCRYPTION_KMS/S3_DEFAULT_ENCRYPTION_KMS_test.py | praghu1/aws-config-rules | 0819d158e9d6fe3a9bb22d3aeea50ca67c1df202 | [
"CC0-1.0"
] | null | null | null | python/S3_DEFAULT_ENCRYPTION_KMS/S3_DEFAULT_ENCRYPTION_KMS_test.py | praghu1/aws-config-rules | 0819d158e9d6fe3a9bb22d3aeea50ca67c1df202 | [
"CC0-1.0"
] | null | null | null | python/S3_DEFAULT_ENCRYPTION_KMS/S3_DEFAULT_ENCRYPTION_KMS_test.py | praghu1/aws-config-rules | 0819d158e9d6fe3a9bb22d3aeea50ca67c1df202 | [
"CC0-1.0"
] | null | null | null | # Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import unittest
from rdklib import Evaluation, ComplianceType, InvalidParametersError
import rdklibtest
RESOURCE_TYPE = 'AWS::S3::Bucket'
MODULE = __import__('S3_DEFAULT_ENCRYPTION_KMS')
RULE = MODULE.S3_DEFAULT_ENCRYPTION_KMS()
class ComplianceTest(unittest.TestCase):
def test_scenario1_evaluateparameters_emptyruleparameter_returnsuccess(self):
rule_invalid_parameter = {
"KmsKeyArns": ""
}
response = RULE.evaluate_parameters(rule_invalid_parameter)
self.assertEqual(response, {})
def test_scenario1_evaluateparameters_invalidruleparameter_returnserror(self):
rule_invalid_parameter = {
"KmsKeyArns": "dummy-arn,arn:aws:kms:us-west-2:123456789000:key/32131231-53434-5342-80d5-112137654365"
}
with self.assertRaises(InvalidParametersError) as context:
RULE.evaluate_parameters(rule_invalid_parameter)
self.assertIn('Invalid AWS KMS Key Arn format for "dummy-arn". AWS KMS Key Arn starts with "arn:aws:kms:"', str(context.exception))
def test_scenario1_evaluateparameters_validruleparameter_returnsuccess(self):
rule_valid_parameter = {
"KmsKeyArns": " arn:aws:kms:us-west-2:123456789000:key/a3175963-d26f-4601-80d5-1959c9347f78, arn:aws:kms:us-west-2:123456789000:key/32131231-53434-5342-80d5-112137654365"
}
resp_expected = {
"KmsKeyArns": [
"arn:aws:kms:us-west-2:123456789000:key/a3175963-d26f-4601-80d5-1959c9347f78",
"arn:aws:kms:us-west-2:123456789000:key/32131231-53434-5342-80d5-112137654365"
]
}
response = RULE.evaluate_parameters(rule_valid_parameter)
self.assertEqual(response, resp_expected)
def test_scenario1_evaluateparameters_noruleparameter_returnsuccess(self):
response = RULE.evaluate_parameters({})
self.assertEqual(response, {})
def test_scenario2_bucketencryptedwithkmskey_validruleparameter_returncompliant(self):
valid_rule_parameter = {
"KmsKeyArns": [
"arn:aws:kms:us-west-2:123456789000:key/a3175963-d26f-4601-80d5-1959c9347f78",
"arn:aws:kms:us-west-2:123456789000:key/32131231-53434-5342-80d5-112137654365"
]
}
config_item = {
"configuration": {
"name": "dummy-s3-bucket-name"
},
"supplementaryConfiguration": {
"ServerSideEncryptionConfiguration": {
"rules": [
{
"applyServerSideEncryptionByDefault": {
"sseAlgorithm": "aws:kms",
"kmsMasterKeyID": "arn:aws:kms:us-west-2:123456789000:key/32131231-53434-5342-80d5-112137654365"
}
}
]
}
}
}
response = RULE.evaluate_change({}, {}, config_item, valid_rule_parameter)
resp_expected = [
Evaluation(ComplianceType.COMPLIANT)
]
rdklibtest.assert_successful_evaluation(self, response, resp_expected)
def test_scenario3_bucketencryptedwithkmskey_noruleparameter_returncompliant(self):
config_item = {
"configuration": {
"name": "dummy-s3-bucket-name"
},
"supplementaryConfiguration": {
"ServerSideEncryptionConfiguration": {
"rules": [
{
"applyServerSideEncryptionByDefault": {
"sseAlgorithm": "aws:kms",
"kmsMasterKeyID": "arn:aws:kms:us-west-2:123456789000:key/32131231-53434-5342-80d5-112137654365"
}
}
]
}
}
}
response = RULE.evaluate_change({}, {}, config_item, {})
resp_expected = [
Evaluation(ComplianceType.COMPLIANT)
]
rdklibtest.assert_successful_evaluation(self, response, resp_expected)
def test_scenario4_bucketencryptedwithinvalidkmskey_validruleparameter_returnnoncompliant(self):
valid_rule_parameter = {
"KmsKeyArns": [
"arn:aws:kms:us-west-2:123456789000:key/a3175963-d26f-4601-80d5-1959c9347f78",
"arn:aws:kms:us-west-2:123456789000:key/32131231-53434-5342-80d5-112137654365"
]
}
config_item = {
"configuration": {
"name": "dummy-s3-bucket-name"
},
"supplementaryConfiguration": {
"ServerSideEncryptionConfiguration": {
"rules": [
{
"applyServerSideEncryptionByDefault": {
"sseAlgorithm": "aws:kms",
"kmsMasterKeyID": "arn:aws:kms:us-west-2:123456789000:key/dummy-key"
}
}
]
}
}
}
response = RULE.evaluate_change({}, {}, config_item, valid_rule_parameter)
resp_expected = [
Evaluation(ComplianceType.NON_COMPLIANT, annotation="AWS KMS key 'arn:aws:kms:us-west-2:123456789000:key/dummy-key' used to encrypt the Amazon S3 bucket is not in rule_paramter 'KmsKeyArns'")
]
rdklibtest.assert_successful_evaluation(self, response, resp_expected)
def test_scenario5_bucketencryptedwithaes256_noruleparameter_returnnoncompliant(self):
config_item = {
"configuration": {
"name": "dummy-s3-bucket-name"
},
"supplementaryConfiguration": {
"ServerSideEncryptionConfiguration": {
"rules": [
{
"applyServerSideEncryptionByDefault": {
"sseAlgorithm": "AES256",
"kmsMasterKeyID": None
}
}
]
}
}
}
response = RULE.evaluate_change({}, {}, config_item, {})
resp_expected = [
Evaluation(ComplianceType.NON_COMPLIANT, annotation="Amazon S3 bucket is not encrypted with AWS KMS key")
]
rdklibtest.assert_successful_evaluation(self, response, resp_expected)
def test_scenario5_bucketnotencrypted_noruleparameter_returnnoncompliant(self):
config_item = {
"configuration": {
"name": "dummy-s3-bucket-name"
},
"supplementaryConfiguration": {
}
}
response = RULE.evaluate_change({}, {}, config_item, {})
resp_expected = [
Evaluation(ComplianceType.NON_COMPLIANT, annotation="Amazon S3 bucket is not encrypted with AWS KMS key")
]
rdklibtest.assert_successful_evaluation(self, response, resp_expected)
| 42.043956 | 203 | 0.584553 |
40b2b256f71a9007531a797685752c86f15017a8 | 20,593 | py | Python | phenocube/lib/python3.8/site-packages/setuptools/config.py | phenocube/phenocube-py | cb262aef1c0925efd2e955170bacd2989da03769 | [
"MIT"
] | null | null | null | phenocube/lib/python3.8/site-packages/setuptools/config.py | phenocube/phenocube-py | cb262aef1c0925efd2e955170bacd2989da03769 | [
"MIT"
] | null | null | null | phenocube/lib/python3.8/site-packages/setuptools/config.py | phenocube/phenocube-py | cb262aef1c0925efd2e955170bacd2989da03769 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, unicode_literals
import io
import os
import sys
import warnings
import functools
from collections import defaultdict
from functools import partial
from functools import wraps
from importlib import import_module
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.extern.packaging.version import LegacyVersion, parse
from setuptools.extern.packaging.specifiers import SpecifierSet
from setuptools.extern.six import string_types, PY3
__metaclass__ = type
def read_configuration(filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError("Configuration file %s does not exist." % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options, ignore_option_errors=ignore_option_errors
)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers)
def _get_option(target_obj, key):
"""
Given a target object and option key, get that option from
the target object, either through a get_{key} method or
from an attribute directly.
"""
getter_name = "get_{key}".format(**locals())
by_attribute = functools.partial(getattr, target_obj, key)
getter = getattr(target_obj, getter_name, by_attribute)
return getter()
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
for option in handler.set_options:
value = _get_option(handler.target_obj, option)
config_dict[handler.section_prefix][option] = value
return config_dict
def parse_configuration(distribution, command_options, ignore_option_errors=False):
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
options = ConfigOptionsHandler(distribution, command_options, ignore_option_errors)
options.parse()
meta = ConfigMetadataHandler(
distribution.metadata,
command_options,
ignore_option_errors,
distribution.package_dir,
)
meta.parse()
return meta, options
class ConfigHandler:
"""Handles metadata supplied in configuration files."""
section_prefix = None
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(self, target_obj, options, ignore_option_errors=False):
sections = {}
section_prefix = self.section_prefix
for section_name, section_options in options.items():
if not section_name.startswith(section_prefix):
continue
section_name = section_name.replace(section_prefix, "").strip(".")
sections[section_name] = section_options
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = sections
self.set_options = []
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
"%s must provide .parsers property" % self.__class__.__name__
)
def __setitem__(self, option_name, value):
unknown = tuple()
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
current_value = getattr(target_obj, option_name, unknown)
if current_value is unknown:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
skip_option = False
parser = self.parsers.get(option_name)
if parser:
try:
value = parser(value)
except Exception:
skip_option = True
if not self.ignore_option_errors:
raise
if skip_option:
return
setter = getattr(target_obj, "set_%s" % option_name, None)
if setter is None:
setattr(target_obj, option_name, value)
else:
setter(value)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=","):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if "\n" in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = "="
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise DistutilsOptionError(
"Unable to parse option value to dict: %s" % value
)
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ("1", "true", "yes")
@classmethod
def _exclude_files_parser(cls, key):
"""Returns a parser function to make sure field inputs
are not files.
Parses a value after getting the key so error messages are
more informative.
:param key:
:rtype: callable
"""
def parser(value):
exclude_directive = "file:"
if value.startswith(exclude_directive):
raise ValueError(
"Only strings are accepted for the {0} field, "
"files are not accepted".format(key)
)
return value
return parser
@classmethod
def _parse_file(cls, value):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
file: README.rst, CHANGELOG.md, src/file.txt
:param str value:
:rtype: str
"""
include_directive = "file:"
if not isinstance(value, string_types):
return value
if not value.startswith(include_directive):
return value
spec = value[len(include_directive) :]
filepaths = (os.path.abspath(path.strip()) for path in spec.split(","))
return "\n".join(
cls._read_file(path)
for path in filepaths
if (cls._assert_local(path) or True) and os.path.isfile(path)
)
@staticmethod
def _assert_local(filepath):
if not filepath.startswith(os.getcwd()):
raise DistutilsOptionError("`file:` directive can not access %s" % filepath)
@staticmethod
def _read_file(filepath):
with io.open(filepath, encoding="utf-8") as f:
return f.read()
@classmethod
def _parse_attr(cls, value, package_dir=None):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = "attr:"
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, "").strip().split(".")
attr_name = attrs_path.pop()
module_name = ".".join(attrs_path)
module_name = module_name or "__init__"
parent_path = os.getcwd()
if package_dir:
if attrs_path[0] in package_dir:
# A custom path was specified for the module we want to import
custom_path = package_dir[attrs_path[0]]
parts = custom_path.rsplit("/", 1)
if len(parts) > 1:
parent_path = os.path.join(os.getcwd(), parts[0])
module_name = parts[1]
else:
module_name = custom_path
elif "" in package_dir:
# A custom parent directory was specified for all root modules
parent_path = os.path.join(os.getcwd(), package_dir[""])
sys.path.insert(0, parent_path)
try:
module = import_module(module_name)
value = getattr(module, attr_name)
finally:
sys.path = sys.path[1:]
return value
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to values.
:param dict section_options:
:param callable values_parser:
:rtype: dict
"""
value = {}
values_parser = values_parser or (lambda val: val)
for key, (_, val) in section_options.items():
value[key] = values_parser(val)
return value
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for (name, (_, value)) in section_options.items():
try:
self[name] = value
except KeyError:
pass # Keep silent for a new option may appear anytime.
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ""
if section_name: # [section.option] variant
method_postfix = "_%s" % section_name
section_parser_method = getattr(
self,
# Dots in section names are translated into dunderscores.
("parse_section%s" % method_postfix).replace(".", "__"),
None,
)
if section_parser_method is None:
raise DistutilsOptionError(
"Unsupported distribution option section: [%s.%s]"
% (self.section_prefix, section_name)
)
section_parser_method(section_options)
def _deprecated_config_handler(self, func, msg, warning_class):
"""this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param warning_class: class of warning exception to be raised
:param func: function to be wrapped around
"""
@wraps(func)
def config_handler(*args, **kwargs):
warnings.warn(msg, warning_class)
return func(*args, **kwargs)
return config_handler
class ConfigMetadataHandler(ConfigHandler):
section_prefix = "metadata"
aliases = {
"home_page": "url",
"summary": "description",
"classifier": "classifiers",
"platform": "platforms",
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
def __init__(
self, target_obj, options, ignore_option_errors=False, package_dir=None
):
super(ConfigMetadataHandler, self).__init__(
target_obj, options, ignore_option_errors
)
self.package_dir = package_dir
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = self._parse_file
parse_dict = self._parse_dict
exclude_files_parser = self._exclude_files_parser
return {
"platforms": parse_list,
"keywords": parse_list,
"provides": parse_list,
"requires": self._deprecated_config_handler(
parse_list,
"The requires parameter is deprecated, please use "
"install_requires for runtime dependencies.",
DeprecationWarning,
),
"obsoletes": parse_list,
"classifiers": self._get_parser_compound(parse_file, parse_list),
"license": exclude_files_parser("license"),
"license_files": parse_list,
"description": parse_file,
"long_description": parse_file,
"version": self._parse_version,
"project_urls": parse_dict,
}
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_file(value)
if version != value:
version = version.strip()
# Be strict about versions loaded from file because it's easy to
# accidentally include newlines and other unintended content
if isinstance(parse(version), LegacyVersion):
tmpl = (
"Version loaded from {value} does not "
"comply with PEP 440: {version}"
)
raise DistutilsOptionError(tmpl.format(**locals()))
return version
version = self._parse_attr(value, self.package_dir)
if callable(version):
version = version()
if not isinstance(version, string_types):
if hasattr(version, "__iter__"):
version = ".".join(map(str, version))
else:
version = "%s" % version
return version
class ConfigOptionsHandler(ConfigHandler):
section_prefix = "options"
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_list_semicolon = partial(self._parse_list, separator=";")
parse_bool = self._parse_bool
parse_dict = self._parse_dict
return {
"zip_safe": parse_bool,
"use_2to3": parse_bool,
"include_package_data": parse_bool,
"package_dir": parse_dict,
"use_2to3_fixers": parse_list,
"use_2to3_exclude_fixers": parse_list,
"convert_2to3_doctests": parse_list,
"scripts": parse_list,
"eager_resources": parse_list,
"dependency_links": parse_list,
"namespace_packages": parse_list,
"install_requires": parse_list_semicolon,
"setup_requires": parse_list_semicolon,
"tests_require": parse_list_semicolon,
"packages": self._parse_packages,
"entry_points": self._parse_file,
"py_modules": parse_list,
"python_requires": SpecifierSet,
}
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directives = ["find:", "find_namespace:"]
trimmed_value = value.strip()
if trimmed_value not in find_directives:
return self._parse_list(value)
findns = trimmed_value == find_directives[1]
if findns and not PY3:
raise DistutilsOptionError(
"find_namespace: directive is unsupported on Python < 3.3"
)
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get("packages.find", {})
)
if findns:
from setuptools import find_namespace_packages as find_packages
else:
from setuptools import find_packages
return find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(section_options, self._parse_list)
valid_keys = ["where", "include", "exclude"]
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v]
)
where = find_kwargs.get("where")
if where is not None:
find_kwargs["where"] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self["entry_points"] = parsed
def _parse_package_data(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
root = parsed.get("*")
if root:
parsed[""] = root
del parsed["*"]
return parsed
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self["package_data"] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self["exclude_package_data"] = self._parse_package_data(section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=";")
self["extras_require"] = self._parse_section_to_dict(
section_options, parse_list
)
def parse_section_data_files(self, section_options):
"""Parses `data_files` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self["data_files"] = [(k, v) for k, v in parsed.items()]
| 30.690015 | 88 | 0.613704 |
12ada9924d6389349c66f337f9aedbffbe34d499 | 4,547 | py | Python | uctp_ufabc/buildTableGraphics.py | luizfmgarcia/uctp_ufabc | 2342f5431e258a4feffdf4e7931344a9d03a8f9c | [
"MIT"
] | null | null | null | uctp_ufabc/buildTableGraphics.py | luizfmgarcia/uctp_ufabc | 2342f5431e258a4feffdf4e7931344a9d03a8f9c | [
"MIT"
] | 6 | 2018-10-30T00:37:20.000Z | 2019-07-23T00:23:18.000Z | uctp_ufabc/buildTableGraphics.py | luizfmgarcia/uctp_ufabc | 2342f5431e258a4feffdf4e7931344a9d03a8f9c | [
"MIT"
] | 1 | 2019-06-06T00:54:13.000Z | 2019-06-06T00:54:13.000Z | # -*- coding: utf-8 -*-
"""
describe()
count: conta a quantidade de linhas envolvidas
mean: calcula a média dos elementos da coluna
std: calcula o desvio padrão dos elementos da coluna
min: menor elemento da coluna (0% dos elementos são menores do que ele)
25%: primeiro quartil da coluna (25% dos elementos são menores do que ele)
50%: segundo quartil da coluna, equivalente à mediana (50% dos elementos são menores do que ele)
75%: terceiro quartil da coluna (75% dos elementos são menores do que ele)
max: maior elemento da coluna (100% dos elementos são menores do que ele)
------------------------------------------
-when you want an inline plot
%matplotlib inline
-when you want graphs in a separate window and
%matplotlib qt
"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib qt
sns.set(style="darkgrid")
def mma():
j=5
x_title = "Iter"
hue_title = "MMA"
mmaInfo = pd.read_csv("totalMinMaxAvg_"+str(j)+".csv", sep=";")
y_title = ["Inf","Fea"]
mmaInfo = [mmaInfo[mmaInfo["Pop"]=="Inf"], mmaInfo[mmaInfo["Pop"]=="Fea"]]
mmaInfo = [pd.melt(mmaInfo[0], id_vars=["Pop", "Iter"], var_name="MMA", value_name="Fit"), pd.melt(mmaInfo[1], id_vars=["Pop", "Iter"], var_name="MMA", value_name="Fit")]
# Plotting
for i in range(len(y_title)):
otherInfoMMA = mmaInfo[i].describe()
otherInfoMMA.to_csv("otherInfoMMA_"+y_title[i]+"_"+str(j)+".csv", sep=";")
fig = sns.relplot(x=x_title, y="Fit", hue=hue_title, data=mmaInfo[i], kind="line", aspect=2)
fig.savefig(x_title+"_"+hue_title+"_"+y_title[i]+"_"+str(j)+"_line.png", dpi=120)
#----------------------------------------------------------------------------
def instances():
num = 10
x_title = "twoPointsCross"
hue_title = "reposCross"
configs = pd.read_csv("manyInstances.csv", sep=";")
for i in range(1,num+1):
#table = pd.read_csv("fitInstances"+str(i)+".csv", sep=";")
table = pd.read_csv("occurrSum"+str(i)+".csv", sep=";")
y_title = list(table)[1:]
table1 = pd.DataFrame(table[list(table)[0]])
table2 = pd.DataFrame(table, columns=y_title)
table = pd.concat([table1, configs, table2], axis=1, join='inner')
if(i==1):
finalSumAvg = table
finalAppend = table
else:
finalSumAvg = finalSumAvg + table
finalAppend = finalAppend.append(table)
finalSumAvg = finalSumAvg/num
finalSumAvg.to_csv(x_title+"_finalSumAvg.csv", sep=";")
finalAppend.to_csv(x_title+"_finalAppend.csv", sep=";")
# Other infos
otherInfoT = finalSumAvg.describe()
otherInfoA = finalAppend.describe()
otherInfoT.to_csv(x_title+"_otherInfoSumAvg.csv", sep=";")
otherInfoA.to_csv(x_title+"_otherInfoAppend.csv", sep=";")
# Plotting
final = pd.read_csv(x_title+"_finalAppend.csv", sep=";")
#These lines concat all the weights - when needed
#final["infWeights"] = final['w_alpha'].astype(str)+","+final['w_beta'].astype(str)+","+final['w_gamma'].astype(str)
#final["feaWeights"] = final['w_delta'].astype(str)+","+final['w_omega'].astype(str)+","+final['w_sigma'].astype(str)+","+final['w_pi'].astype(str)+","+final['w_rho'].astype(str)+","+final['w_lambda'].astype(str)+","+final['w_theta'].astype(str)
for i in y_title:
if(hue_title != ''):
fig = sns.catplot(x=x_title, y=i, hue=hue_title, data=final, kind="box", aspect=2)
fig.set_xticklabels(rotation=80)
fig.savefig(x_title+"_"+hue_title+"_"+i+"_box.png", dpi=120)
fig = sns.relplot(x=x_title, y=i, hue=hue_title, data=final, marker="o", kind="line", aspect=2)
fig.set_xticklabels(rotation=80)
fig.savefig(x_title+"_"+hue_title+"_"+i+"_line.png", dpi=120)
else:
fig = sns.catplot(x=x_title, y=i, data=final, kind="box", aspect=2)
fig.set_xticklabels(rotation=80)
plt.tight_layout()
fig.savefig(x_title+"_"+i+"_box.png", dpi=120)
fig = sns.relplot(x=x_title, y=i, data=final, marker="o", kind="line", aspect=2)
fig.set_xticklabels(rotation=80)
#plt.tight_layout()
#fig.savefig(x_title+"_"+i+"_marker_line.png", dpi=120)
#fig = sns.relplot(x=x_title, y=i, data=final, kind="line", aspect=2)
#fig.set_xticklabels(rotation=80)
plt.tight_layout()
fig.savefig(x_title+"_"+i+"_line.png", dpi=120)
| 45.47 | 249 | 0.607433 |
c837c8ca129aacc7eb1a478cb88c7776d7a253d4 | 10,746 | py | Python | tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/quantization/quant_strategy.py | bluetiger9/Vitis-AI | a7728733bbcfc292ff3afa46b9c8b03e94b740b3 | [
"Apache-2.0"
] | null | null | null | tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/quantization/quant_strategy.py | bluetiger9/Vitis-AI | a7728733bbcfc292ff3afa46b9c8b03e94b740b3 | [
"Apache-2.0"
] | null | null | null | tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/quantization/quant_strategy.py | bluetiger9/Vitis-AI | a7728733bbcfc292ff3afa46b9c8b03e94b740b3 | [
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Dict, List
from nndct_shared.base import NNDCT_OP
def create_quant_strategy(bits_weight: int, bits_act: int, is_lstm: bool, mix_bit: bool):
if is_lstm:
quant_strategy = LstmQstrategy(bits_weight, bits_act, bits_act)
else:
if mix_bit:
quant_strategy = TQTStrategy(bits_weight, 8, bits_act)
else:
quant_strategy = DefaultQstrategy(bits_weight, bits_act, bits_act)
return quant_strategy
class QuantStrategyBase(ABC):
def __init__(self, bits_weight, bits_bias, bits_activation, mix_bit=False):
self._bits_weight = bits_weight
self._bits_bias = bits_bias
self._bits_act = bits_activation
self._mix_bit = mix_bit
@abstractmethod
def create_quant_config(self, *args,
**kwargs) -> Dict[str, Dict[str, List[int]]]:
"""create input/output/param quantization configuration
Returns
dict: quant config
"""
pass
def _get_default_quant_config(self,
quant_info_mgr,
lstm=False):
"""
1. unified activation bits
2 .mixed bits for lstm
"""
# import ipdb
# ipdb.set_trace()
config = {'param': {}, 'output': {}, 'input': {}}
for node in quant_info_mgr.Nndctgraph.nodes:
# print('---- Handling node %s type: %s' % (node.name, node.op.type))
if quant_info_mgr.is_node_quantizable(node, lstm):
# parameters
for k in quant_info_mgr.quant_node_params(node).keys():
p = quant_info_mgr.quant_node_params(node)[k]
# for mix precision quantization
bw = self._bits_act
if (node.has_bound_params() and
(hasattr(node.op.ParamName, 'WEIGHTS') and k == node.op.ParamName.WEIGHTS or
hasattr(node.op.ParamName, 'GAMMA') and k == node.op.ParamName.GAMMA)):
bw = self._bits_weight
config['param'][p.name] = [bw, None]
# print('---- Add fix of param %s' % p.name)
# output blobs
end = quant_info_mgr.quant_output(node.name).name
if end not in config['output']:
config['output'][end] = [self._bits_act, None]
# print('---- Add fix of output blob %s' % end)
# input blobs (for mix precision quantization)
if self._bits_weight != self._bits_act:
if node.op.type in [NNDCT_OP.DENSE, NNDCT_OP.CONV2D]:
config['input'][node.name] = [self._bits_weight, None]
# print('---- Add fix of input blob %s' % end)
elif (lstm and (node in quant_info_mgr.Nndctgraph.inputs)):
# print('---- Handling input node %s' % (node.name))
# this path is only for quantizing a whole graph without quant stub OP
# for lstm, check the following node type
if (node.in_quant_part or (any(
(quant_info_mgr.is_node_quantizable(c, lstm) and
c.op.type is not NNDCT_OP.QUANT_STUB)
for c in quant_info_mgr.Nndctgraph.children(node.name)))):
end = quant_info_mgr.quant_output(node.name).name
if end not in config['output']:
config['output'][end] = [self._bits_act, None]
# print('---- Add fix of quant net input blob %s' % end)
# check the input fix of all quantized ops
# import ipdb
# ipdb.set_trace()
if not lstm:
for node in quant_info_mgr.Nndctgraph.nodes:
if quant_info_mgr.is_node_quantizable(node, lstm):
if node.op.type not in [NNDCT_OP.INPUT, NNDCT_OP.QUANT_STUB, NNDCT_OP.CONCAT]:
for p_n in quant_info_mgr.Nndctgraph.parents(node):
if not quant_info_mgr.op_unquantizable(p_n.op.type):
end = quant_info_mgr.quant_output(p_n.name).name
end_node = quant_info_mgr.Nndctgraph.node(end)
out_is_tensor = True
for tensor in end_node.out_tensors:
if tensor.shape == None:
out_is_tensor = False
if end not in config['output'] and out_is_tensor:
config['output'][end] = [self._bits_act, None]
return config
@property
def num_bits_w(self):
return self._bits_weight
@property
def num_bits_a(self):
return self._bits_act
@property
def mix_bit(self):
return self._mix_bit
class DefaultQstrategy(QuantStrategyBase):
def __init__(self, bits_weight, bits_bias, bits_activation, mix_bit=False):
super().__init__(bits_weight, bits_bias, bits_activation, mix_bit)
def create_quant_config(self, quant_info_mgr):
return self._get_default_quant_config(quant_info_mgr)
class LstmQstrategy(QuantStrategyBase):
def __init__(self, bits_weight, bits_bias, bits_activation):
super().__init__(bits_weight, bits_bias, bits_activation, False)
def create_quant_config(self, quant_info_mgr):
return self._get_default_quant_config(quant_info_mgr, lstm=True)
class TQTStrategy(QuantStrategyBase):
_max_bit = 8
_min_bit = 4
def __init__(self, bits_weight, bits_bias, bits_activation):
super().__init__(bits_weight, bits_bias, bits_activation, True)
# [input_bits, output_bits]
self._init_bit_config = {
NNDCT_OP.CONV2D: [self._bits_act, self._bits_act],
NNDCT_OP.ADD: [self._max_bit, self._max_bit],
NNDCT_OP.MAX_POOL: [self._max_bit, self._max_bit],
NNDCT_OP.AVG_POOL: [self._max_bit, self._max_bit],
NNDCT_OP.ADAPTIVEAVGPOOL2D: [self._max_bit, self._max_bit],
NNDCT_OP.DENSE: [self._bits_act, self._bits_act],
NNDCT_OP.BATCH_NORM: [self._max_bit, self._min_bit],
NNDCT_OP.QUANT_STUB: [None, self._max_bit]
}
self._input_fix_op_types = [
NNDCT_OP.CONV2D, NNDCT_OP.CONVTRANSPOSE2D, NNDCT_OP.DENSE,
]
self._activation_op_types = [NNDCT_OP.RELU, NNDCT_OP.RELU6, NNDCT_OP.TANH, NNDCT_OP.LEAKY_RELU]
# self._passive_quant_ops = [NNDCT_OP.CONCAT]
def _get_init_config_from_type(self, op_type):
default = [self._max_bit, self._max_bit]
return copy.copy(self._init_bit_config.get(op_type, default))
def create_quant_config(self, quant_info_mgr):
# [input_bw, output_bw]
config = {
"param": defaultdict(list),
"output": defaultdict(list),
"input": defaultdict(list)
}
# handle params bits
for node in quant_info_mgr.Nndctgraph.nodes:
# print('---- Handling node %s type: %s' % (node.name, node.op.type))
if quant_info_mgr.is_node_quantizable(node, False):
# parameters
for k in quant_info_mgr.quant_node_params(node).keys():
p = quant_info_mgr.quant_node_params(node)[k]
# for mix precision quantization
if k == node.op.ParamName.WEIGHTS:
config['param'][p.name] = [self._bits_weight, None]
else:
config['param'][p.name] = [self._bits_bias, None]
# print('---- Add fix of param %s' % p.name)
# handle output bits
node_bits_map = {}
for node in quant_info_mgr.Nndctgraph.nodes:
if quant_info_mgr.is_node_quantizable(node, False):
# *_, end = quant_info_mgr.quant_groups[node.name]
node_bits_map[node.name] = self._get_init_config_from_type(node.op.type)
if node in (tensor.node for tensor in quant_info_mgr.Nndctgraph.end_tensors):
node_bits_map[node.name][1] = self._max_bit
elif node.op.type in self._input_fix_op_types:
output_bit_list = []
for c_node in quant_info_mgr.Nndctgraph.children(node):
self._find_next_quant_nodes_bits(quant_info_mgr, c_node,
output_bit_list)
node_bits_map[node.name][1] = max(output_bit_list) if output_bit_list else self._max_bit
# if node.op.type in self._passive_quant_ops:
# node_bits_map[node.name][0] = node_bits_map[node.name][1]
for pn in quant_info_mgr.Nndctgraph.parents(node):
if pn.name in node_bits_map:
p_out_bits = node_bits_map[pn.name][1]
if p_out_bits == node_bits_map[node.name][0]:
node_bits_map[node.name][0] = None
else:
for pn in quant_info_mgr.Nndctgraph.parents(node):
if pn.name in node_bits_map:
node_bits_map[node.name] = node_bits_map[pn.name]
break
if node.name not in node_bits_map:
node_bits_map[node.name] = self._get_init_config_from_type(node.op.type)
# handle input bits
for node in quant_info_mgr.Nndctgraph.nodes:
if quant_info_mgr.is_node_quantizable(node, False):
*_, end = quant_info_mgr.quant_groups[node.name]
if node.op.type in self._input_fix_op_types and node_bits_map[node.name][0] is not None:
config["input"][node.name] = [node_bits_map[node.name][0], None]
if end not in config["output"] and node_bits_map[node.name][1] is not None:
quant_output = None
for out_node in quant_info_mgr.quant_groups[node.name]:
if quant_info_mgr.Nndctgraph.node(out_node).op_type in self._activation_op_types:
quant_output = out_node
break
if quant_output is not None:
config["output"][quant_output] = [node_bits_map[node.name][1], None]
else:
config["output"][node.name] = [node_bits_map[node.name][1], None]
# import json
# string = json.dumps(config, indent=4, separators=(',', ': '))
# print(string)
return config
def _find_next_quant_nodes_bits(self,
quant_info_mgr,
node,
output_bits_candidates=None):
if quant_info_mgr.is_node_quantizable(node, False):
output_bits = self._get_init_config_from_type(node.op.type)[0]
output_bits_candidates.append(output_bits)
return
for c_node in quant_info_mgr.Nndctgraph.children(node):
self._find_next_quant_nodes_bits(quant_info_mgr, c_node,
output_bits_candidates)
| 39.076364 | 99 | 0.648148 |
e791becb999f18e5be8b91e591d2e8a370fad35a | 7,615 | py | Python | snooty/util_test.py | rayangler/snooty-parser | 3812adab1338ef78ff6f9aecae5e17d2ec5c5181 | [
"Apache-2.0",
"CNRI-Python-GPL-Compatible"
] | 6 | 2021-02-09T18:25:25.000Z | 2022-02-22T02:27:52.000Z | snooty/util_test.py | rayangler/snooty-parser | 3812adab1338ef78ff6f9aecae5e17d2ec5c5181 | [
"Apache-2.0",
"CNRI-Python-GPL-Compatible"
] | 37 | 2020-12-15T16:26:49.000Z | 2022-02-22T03:34:20.000Z | snooty/util_test.py | rayangler/snooty-parser | 3812adab1338ef78ff6f9aecae5e17d2ec5c5181 | [
"Apache-2.0",
"CNRI-Python-GPL-Compatible"
] | 6 | 2021-01-23T21:06:14.000Z | 2021-08-14T10:50:52.000Z | import contextlib
import inspect
import os
import sys
import tempfile
import textwrap
import xml.etree.ElementTree as ET
from collections import defaultdict
from pathlib import Path, PurePath
from typing import Any, Dict, Iterator, List, Optional, Tuple
from xml.sax.saxutils import escape
from . import n, rstparser
from .diagnostics import Diagnostic
from .page import Page
from .parser import JSONVisitor, Project
from .parser import parse_rst as parse_rst_multi
from .types import BuildIdentifierSet, FileId, SerializableType
__all__ = ("eprint", "ast_to_testing_string", "assert_etree_equals")
def eprint(*args: str) -> None:
"""print() to stderr."""
print(*args, file=sys.stderr)
class FinalAssertionError(AssertionError):
"""An AssertionError whose details were already logged to stderr."""
pass
def toctree_to_testing_string(ast: Any) -> str:
"""Create an XML string representation of a toctree."""
value = ast.get("value", "")
children = ast.get("children", [])
attr_pairs = [
(k, v)
for k, v in ast.items()
if k not in ("children", "options", "title") and v
]
attr_pairs.extend((k, v) for k, v in ast.get("options", {}).items())
attrs = " ".join(
'{}="{}"'.format(k, escape(str(v), {'"': """})) for k, v in attr_pairs
)
contents = (
escape(value)
if value
else (
"".join(toctree_to_testing_string(child) for child in children)
if children
else ""
)
)
if ast.get("title"):
contents = "<title>{}</title>{}".format(
"".join(ast_to_testing_string(part) for part in ast["title"]), contents
)
return "<toctree{}>{}</toctree>".format(" " + attrs if attrs else "", contents)
def check_toctree_testing_string(ast: Any, testing_string: str) -> None:
"""Ensure that an AST node matches the given testing XML string, using ast_to_testing_string()."""
correct_tree = ET.fromstring(testing_string)
evaluating_tree = ET.fromstring(toctree_to_testing_string(ast))
assert_etree_equals(evaluating_tree, correct_tree)
def ast_to_testing_string(ast: Any) -> str:
"""Create an XML string representation of an AST node."""
if isinstance(ast, n.Node):
ast = ast.serialize()
value = ast.get("value", "")
children = ast.get("children", [])
attr_pairs = [
(k, v)
for k, v in ast.items()
if k
not in ("argument", "term", "value", "children", "type", "position", "options")
and v
]
attr_pairs.extend((k, v) for k, v in ast.get("options", {}).items())
attrs = " ".join(
'{}="{}"'.format(k, escape(str(v), {'"': """})) for k, v in attr_pairs
)
contents = (
escape(value)
if value
else (
"".join(ast_to_testing_string(child) for child in children)
if children
else ""
)
)
if "argument" in ast:
contents = (
"".join(ast_to_testing_string(part) for part in ast["argument"]) + contents
)
if "term" in ast:
contents = (
"<term>"
+ "".join(ast_to_testing_string(part) for part in ast["term"])
+ "</term>"
+ contents
)
return "<{}{}>{}</{}>".format(
ast["type"], " " + attrs if attrs else "", contents, ast["type"]
)
def assert_etree_equals(e1: ET.Element, goal: ET.Element) -> None:
"""Assert that two XML Elements are the same. If there is a difference in a child,
log the difference to stderr."""
assert e1.tag == goal.tag
if e1.text and goal.text:
assert (e1.text.strip() if e1.text else "") == (
goal.text.strip() if goal.text else ""
)
# Comparing the tail is interesting because we want to consider
# "<whitespace>" and None to be equivalent. Coerce None to an empty
# string, and strip both sides.
assert (e1.tail or "").strip() == (goal.tail or "").strip()
assert e1.attrib == goal.attrib
assert len(e1) == len(goal)
for c1, goalc in zip(e1, goal):
try:
assert_etree_equals(c1, goalc)
except AssertionError as err:
# If the assertion has already been logged, don't do it again.
if isinstance(err, FinalAssertionError):
raise err
# Report this tree diff to stderr.
wrapper = textwrap.TextWrapper(
width=100, initial_indent=" ", subsequent_indent=" "
)
eprint(
"{}\n{}\nshould be\n{}".format(
err,
"\n".join(wrapper.wrap(ET.tostring(c1, encoding="unicode"))),
"\n".join(wrapper.wrap(ET.tostring(goalc, encoding="unicode"))),
)
)
# Inform higher stack frames not to log this exception
raise FinalAssertionError(err)
def check_ast_testing_string(ast: Any, testing_string: str) -> None:
"""Ensure that an AST node matches the given testing XML string, using ast_to_testing_string()."""
correct_tree = ET.fromstring(testing_string)
evaluating_tree = ET.fromstring(ast_to_testing_string(ast))
assert_etree_equals(evaluating_tree, correct_tree)
class BackendTestResults:
"""A utility class for tracking the output of a build in tests."""
def __init__(self) -> None:
self.diagnostics: Dict[FileId, List[Diagnostic]] = defaultdict(list)
self.pages: Dict[FileId, Page] = {}
self.metadata: Dict[str, SerializableType] = {}
def on_progress(self, progress: int, total: int, message: str) -> None:
pass
def on_diagnostics(self, path: FileId, diagnostics: List[Diagnostic]) -> None:
self.diagnostics[path].extend(diagnostics)
def on_update(
self,
prefix: List[str],
build_identifiers: BuildIdentifierSet,
page_id: FileId,
page: Page,
) -> None:
self.pages[page_id] = page
def on_update_metadata(
self,
prefix: List[str],
build_identifiers: BuildIdentifierSet,
field: Dict[str, SerializableType],
) -> None:
self.metadata.update(field)
def on_delete(self, page_id: FileId, build_identifiers: BuildIdentifierSet) -> None:
pass
def flush(self) -> None:
pass
@contextlib.contextmanager
def make_test(
files: Dict[PurePath, str], name: str = ""
) -> Iterator[BackendTestResults]:
"""Create a temporary test project with the given files."""
need_to_make_snooty_toml = Path("snooty.toml") not in files
if need_to_make_snooty_toml:
# Create a reasonable name for this project: go with the caller name
name = inspect.stack()[2].function
with tempfile.TemporaryDirectory(prefix=f"{name}-") as tempdir:
root = Path(tempdir)
if need_to_make_snooty_toml:
root.joinpath("snooty.toml").write_text(f'name = "{name}"\n')
for filename, file_text in files.items():
assert not filename.is_absolute()
file_path = root.joinpath(filename)
if file_path.parent != root:
os.makedirs(file_path.parent, exist_ok=True)
file_path.write_text(file_text)
backend = BackendTestResults()
project = Project(root, backend, {})
project.build()
yield backend
def parse_rst(
parser: rstparser.Parser[JSONVisitor], path: Path, text: Optional[str] = None
) -> Tuple[Page, List[Diagnostic]]:
return parse_rst_multi(parser, path, text)[0]
| 31.995798 | 102 | 0.611819 |
d0166364ce76839b9cc7423af578ea130d881df6 | 5,797 | py | Python | autorad/feature_extraction/extractor.py | pwoznicki/Radiomics | f30d8b5b2b54f52de45bc18a433be7855ca346c9 | [
"Apache-2.0"
] | null | null | null | autorad/feature_extraction/extractor.py | pwoznicki/Radiomics | f30d8b5b2b54f52de45bc18a433be7855ca346c9 | [
"Apache-2.0"
] | null | null | null | autorad/feature_extraction/extractor.py | pwoznicki/Radiomics | f30d8b5b2b54f52de45bc18a433be7855ca346c9 | [
"Apache-2.0"
] | null | null | null | import logging
from pathlib import Path
import pandas as pd
import radiomics
from joblib import Parallel, delayed
from radiomics.featureextractor import RadiomicsFeatureExtractor
from tqdm import tqdm
from autorad.config import config
from autorad.config.type_definitions import PathLike
from autorad.data.dataset import ImageDataset
from autorad.utils.utils import set_n_jobs, time_it
log = logging.getLogger(__name__)
# Silence the pyRadiomics logger
logging.getLogger("radiomics").setLevel(logging.WARNING)
class FeatureExtractor:
def __init__(
self,
dataset: ImageDataset,
feature_set: str = "pyradiomics",
extraction_params: PathLike = "Baessler_CT.yaml",
n_jobs: int | None = None,
):
"""
Args:
dataset: ImageDataset containing image paths, mask paths, and IDs
feature_set: library to use features from (for now only pyradiomics)
extraction_params: path to the JSON file containing the extraction
parameters, or a string containing the name of the file in the
default extraction parameter directory
(autorad.config.pyradiomics_params)
n_jobs: number of parallel jobs to run
Returns:
None
"""
self.dataset = dataset
self.feature_set = feature_set
self.extraction_params = self._get_extraction_param_path(
extraction_params
)
log.info(f"Using extraction params from {self.extraction_params}")
self.n_jobs = set_n_jobs(n_jobs)
self._initialize_extractor()
def _get_extraction_param_path(self, extraction_params: PathLike) -> Path:
default_extraction_param_dir = Path(config.PARAM_DIR)
if Path(extraction_params).is_file():
result = Path(extraction_params)
elif (default_extraction_param_dir / str(extraction_params)).is_file():
result = default_extraction_param_dir / extraction_params
else:
raise ValueError(
f"Extraction parameter file {extraction_params} not found."
)
return result
def run(self) -> pd.DataFrame:
"""
Run feature extraction.
Returns a DataFrame with extracted features merged with data from the
ImageDataset.df.
"""
log.info("Extracting features")
if self.n_jobs is None:
feature_df = self.get_features()
else:
feature_df = self.get_features_parallel()
# Add all data from ImageDataset.df
try:
result = self.dataset.df.merge(
feature_df, left_index=True, right_index=True
)
except ValueError:
raise ValueError("Error concatenating features and metadata.")
return result
def save_config(self):
"""
Save the extraction parameters to a JSON file. Should I use MLFlow here?
"""
pass
def _initialize_extractor(self):
if self.feature_set == "pyradiomics":
self.extractor = RadiomicsFeatureExtractor(
str(self.extraction_params)
)
else:
raise ValueError("Feature set not supported")
log.info(f"Initialized extractor {self.feature_set}")
return self
def get_features_for_single_case(
self, image_path: PathLike, mask_path: PathLike
) -> dict | None:
"""
Returns:
feature_series: dict with extracted features
"""
if not Path(image_path).is_file():
log.warning(
f"Image not found. Skipping case... (path={image_path}"
)
return None
if not Path(mask_path).is_file():
log.warning(f"Mask not found. Skipping case... (path={mask_path}")
return None
try:
feature_dict = self.extractor.execute(
str(image_path),
str(mask_path),
)
except ValueError:
error_msg = f"Error extracting features for image, \
mask pair {image_path}, {mask_path}"
log.error(error_msg)
return None
return dict(feature_dict)
@time_it
def get_features(self) -> pd.DataFrame:
"""
Run extraction for all cases.
"""
image_paths = self.dataset.image_paths
mask_paths = self.dataset.mask_paths
lst_of_feature_dicts = [
self.get_features_for_single_case(image_path, mask_path)
for image_path, mask_path in tqdm(zip(image_paths, mask_paths))
]
feature_df = pd.DataFrame(lst_of_feature_dicts)
return feature_df
@time_it
def get_features_parallel(self) -> pd.DataFrame:
image_paths = self.dataset.image_paths
mask_paths = self.dataset.mask_paths
try:
with Parallel(n_jobs=self.n_jobs) as parallel:
list_of_feature_dicts = parallel(
delayed(self.get_features_for_single_case)(
image_path, mask_path
)
for image_path, mask_path in zip(image_paths, mask_paths)
)
except Exception:
raise RuntimeError("Multiprocessing failed! :/")
feature_df = pd.DataFrame(list_of_feature_dicts)
return feature_df
def get_pyradiomics_feature_names(self) -> list[str]:
class_obj = radiomics.featureextractor.getFeatureClasses()
feature_classes = list(class_obj.keys())
feature_names = [
f"{klass}_{name}"
for klass in feature_classes
for name in class_obj[klass].getFeatureNames().keys()
]
return feature_names
| 34.505952 | 80 | 0.618596 |
a470707db996102d9a69c4b80458b9e8b57e1775 | 3,113 | py | Python | senlin-7.0.0/senlin/engine/notifications/heat_endpoint.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | senlin-7.0.0/senlin/engine/notifications/heat_endpoint.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | senlin-7.0.0/senlin/engine/notifications/heat_endpoint.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from senlin.common import context
from senlin.engine.notifications import base
from senlin import objects
from senlin.rpc import client as rpc_client
LOG = logging.getLogger(__name__)
class HeatNotificationEndpoint(base.Endpoints):
STACK_FAILURE_EVENTS = {
'orchestration.stack.delete.end': 'DELETE',
}
def __init__(self, project_id, engine_id, recover_action):
self.filter_rule = messaging.NotificationFilter(
publisher_id='^orchestration.*',
event_type='^orchestration\.stack\..*',
context={'project_id': '^%s$' % project_id})
self.project_id = project_id
self.engine_id = engine_id
self.rpc = rpc_client.EngineClient()
self.recover_action = recover_action
self.exchange = cfg.CONF.health_manager.heat_control_exchange
self.target = messaging.Target(topic='notifications',
exchange=self.exchange)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
if event_type not in self.STACK_FAILURE_EVENTS:
return
tags = payload['tags']
if tags is None or tags == []:
return
cluster_id = None
node_id = None
for tag in tags:
if cluster_id is None:
start = tag.find('cluster_id')
if start == 0 and tag[11:]:
cluster_id = tag[11:]
if node_id is None:
start = tag.find('cluster_node_id')
if start == 0:
node_id = tag[16:]
if cluster_id is None or node_id is None:
return
ctx = context.get_service_context(project=self.project_id,
user=payload['user_identity'])
enabled = self._check_registry_status(ctx, self.engine_id, cluster_id)
if enabled is False:
return
params = {
'event': self.STACK_FAILURE_EVENTS[event_type],
'state': payload.get('state', 'Unknown'),
'stack_id': payload.get('stack_identity', 'Unknown'),
'timestamp': metadata['timestamp'],
'publisher': publisher_id,
'operation': self.recover_action['operation'],
}
LOG.info("Requesting stack recovery: %s", node_id)
req = objects.NodeRecoverRequest(identity=node_id, params=params)
self.rpc.call(ctx, 'node_recover', req)
| 37.059524 | 78 | 0.630903 |
802ee155549e72efc4fa369e67c577c77def86b6 | 456 | py | Python | smartcontract/venv/lib/python3.6/site-packages/klein/interfaces.py | simplitech/neoinvoice | bc9a0217858938b49f99fef13b3439f4a537a5f5 | [
"MIT"
] | null | null | null | smartcontract/venv/lib/python3.6/site-packages/klein/interfaces.py | simplitech/neoinvoice | bc9a0217858938b49f99fef13b3439f4a537a5f5 | [
"MIT"
] | null | null | null | smartcontract/venv/lib/python3.6/site-packages/klein/interfaces.py | simplitech/neoinvoice | bc9a0217858938b49f99fef13b3439f4a537a5f5 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division
from zope.interface import Attribute, Interface
class IKleinRequest(Interface):
branch_segments = Attribute("Segments consumed by a branch route.")
mapper = Attribute("L{werkzeug.routing.MapAdapter}")
def url_for(
self, endpoint, values=None, method=None,
force_external=False, append_unknown=True,
):
"""
L{werkzeug.routing.MapAdapter.build}
"""
| 26.823529 | 71 | 0.692982 |
f901e42dcdee796fb75afe93ccbefd70c14c4064 | 36,117 | py | Python | lib/galaxy/webapps/galaxy/controllers/forms.py | vimalkumarvelayudhan/galaxy | ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/webapps/galaxy/controllers/forms.py | vimalkumarvelayudhan/galaxy | ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7 | [
"CC-BY-3.0"
] | 1 | 2015-02-21T18:48:19.000Z | 2015-02-27T15:50:32.000Z | lib/galaxy/webapps/galaxy/controllers/forms.py | vimalkumarvelayudhan/galaxy | ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7 | [
"CC-BY-3.0"
] | 3 | 2015-02-22T13:34:16.000Z | 2020-10-01T01:28:04.000Z | from galaxy.web.base.controller import *
from galaxy.model.orm import *
from galaxy.datatypes import sniff
from galaxy import model, util
import logging, os, sys
from galaxy.web.form_builder import *
from galaxy.tools.parameters.basic import parameter_types
from xml.etree.ElementTree import XML, Element
from galaxy.util.odict import odict
import copy
from galaxy.web.framework.helpers import time_ago, iff, grids
log = logging.getLogger( __name__ )
VALID_FIELDNAME_RE = re.compile( "^[a-zA-Z0-9\_]+$" )
class FormsGrid( grids.Grid ):
# Custom column types
class NameColumn( grids.TextColumn ):
def get_value(self, trans, grid, form):
return form.latest_form.name
class DescriptionColumn( grids.TextColumn ):
def get_value(self, trans, grid, form):
return form.latest_form.desc
class TypeColumn( grids.TextColumn ):
def get_value(self, trans, grid, form):
return form.latest_form.type
# Grid definition
title = "Forms"
template = "admin/forms/grid.mako"
model_class = model.FormDefinitionCurrent
default_sort_key = "-create_time"
num_rows_per_page = 50
preserve_state = True
use_paging = True
default_filter = dict( deleted="False" )
columns = [
NameColumn( "Name",
key="name",
model_class=model.FormDefinition,
link=( lambda item: iff( item.deleted, None, dict( operation="view_latest_form_definition",
id=item.id ) ) ),
attach_popup=True,
filterable="advanced" ),
DescriptionColumn( "Description",
key='desc',
model_class=model.FormDefinition,
filterable="advanced" ),
TypeColumn( "Type" ),
grids.DeletedColumn( "Deleted",
key="deleted",
visible=False,
filterable="advanced" )
]
columns.append( grids.MulticolFilterColumn( "Search",
cols_to_filter=[ columns[0], columns[1] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = [
grids.GridOperation( "Edit", allow_multiple=False, condition=( lambda item: not item.deleted ) ),
grids.GridOperation( "Delete", allow_multiple=True, condition=( lambda item: not item.deleted ) ),
grids.GridOperation( "Undelete", condition=( lambda item: item.deleted ) ),
]
global_actions = [
grids.GridAction( "Create new form", dict( controller='forms', action='create_form_definition' ) )
]
def build_initial_query( self, trans, **kwargs ):
return trans.sa_session.query( self.model_class ).join (model.FormDefinition, self.model_class.latest_form_id == model.FormDefinition.id)
class Forms( BaseUIController ):
# Empty TextField
empty_field = { 'name': '',
'label': '',
'helptext': '',
'visible': True,
'required': False,
'type': model.TextField.__name__,
'selectlist': [],
'layout': 'none',
'default': '' }
forms_grid = FormsGrid()
@web.expose
@web.require_admin
def browse_form_definitions( self, trans, **kwd ):
if 'operation' in kwd:
operation = kwd['operation'].lower()
if not kwd.get( 'id', None ):
return trans.response.send_redirect( web.url_for( controller='forms',
action='browse_form_definitions',
status='error',
message="Invalid form ID") )
if operation == "view_latest_form_definition":
return self.view_latest_form_definition( trans, **kwd )
elif operation == "delete":
return self.delete_form_definition( trans, **kwd )
elif operation == "undelete":
return self.undelete_form_definition( trans, **kwd )
elif operation == "edit":
return self.edit_form_definition( trans, **kwd )
return self.forms_grid( trans, **kwd )
@web.expose
@web.require_admin
def view_latest_form_definition( self, trans, **kwd ):
'''Displays the layout of the latest version of the form definition'''
form_definition_current_id = kwd.get( 'id', None )
try:
form_definition_current = trans.sa_session.query( trans.app.model.FormDefinitionCurrent ) \
.get( trans.security.decode_id( form_definition_current_id ) )
except:
return trans.response.send_redirect( web.url_for( controller='forms',
action='browse_form_definitions',
message='Invalid form',
status='error' ) )
return trans.fill_template( '/admin/forms/view_form_definition.mako',
form_definition=form_definition_current.latest_form )
@web.expose
@web.require_admin
def create_form_definition( self, trans, **kwd ):
params = util.Params( kwd )
message = util.restore_text( params.get( 'message', '' ) )
status = params.get( 'status', 'done' )
self.__imported_from_file = False
if params.get( 'create_form_button', False ):
form_definition, message = self.save_form_definition( trans, form_definition_current_id=None, **kwd )
if not form_definition:
return trans.response.send_redirect( web.url_for( controller='forms',
action='create_form_definition',
message=message,
status='error',
name=util.restore_text( params.get( 'name', '' ) ),
description=util.restore_text( params.get( 'description', '' ) ) ))
if self.__imported_from_file:
return trans.response.send_redirect( web.url_for( controller='forms',
action='edit_form_definition',
id=trans.security.encode_id( form_definition.current.id )) )
else:
return trans.response.send_redirect( web.url_for( controller='forms',
action='edit_form_definition',
id=trans.security.encode_id( form_definition.current.id ),
add_field_button='Add field',
name=form_definition.name,
description=form_definition.desc,
form_type_select_field=form_definition.type ) )
inputs = [ ( 'Name', TextField( 'name', 40, util.restore_text( params.get( 'name', '' ) ) ) ),
( 'Description', TextField( 'description', 40, util.restore_text( params.get( 'description', '' ) ) ) ),
( 'Type', self.__build_form_types_widget( trans, selected=params.get( 'form_type', 'none' ) ) ),
( 'Import from csv file (Optional)', FileField( 'file_data', 40, '' ) ) ]
return trans.fill_template( '/admin/forms/create_form.mako',
inputs=inputs,
message=message,
status=status )
@web.expose
@web.require_admin
def edit_form_definition( self, trans, response_redirect=None, **kwd ):
'''
This callback method is for handling form editing. The value of response_redirect
should be an URL that is defined by the caller. This allows for redirecting as desired
when the form changes have been saved. For an example of how this works, see the
edit_template() method in the base controller.
'''
params = util.Params( kwd )
message = util.restore_text( params.get( 'message', '' ) )
status = params.get( 'status', 'done' )
try:
form_definition_current = trans.sa_session.query( trans.app.model.FormDefinitionCurrent ).get( trans.security.decode_id(kwd['id']) )
except:
return trans.response.send_redirect( web.url_for( controller='forms',
action='browse_form_definitions',
message='Invalid form',
status='error' ) )
form_definition = form_definition_current.latest_form
# TODO: eliminate the need for this refresh param.
if params.get( 'refresh', False ):
# Refresh
current_form = self.get_current_form( trans, **kwd )
else:
# Show the saved form for editing
current_form = self.get_saved_form( form_definition )
# Save changes
if params.get( 'save_changes_button', False ):
new_form_definition, message = self.save_form_definition( trans, form_definition_current_id=form_definition.form_definition_current.id, **kwd )
# if validation error encountered while saving the form, show the
# unsaved form, with the error message
if not new_form_definition:
status = 'error'
else:
# everything went fine. form saved successfully. Show the saved form or redirect
# to response_redirect if appropriate.
if response_redirect:
return trans.response.send_redirect( response_redirect )
form_definition = new_form_definition
current_form = self.get_saved_form( form_definition )
message = "The form '%s' has been updated with the changes." % form_definition.name
# Add a layout grid
elif params.get( 'add_layout_grid_button', False ):
current_form[ 'layout' ].append( '' )
# Delete a layout grid
elif params.get( 'remove_layout_grid_button', False ):
index = int( kwd[ 'remove_layout_grid_button' ].split( ' ' )[2] ) - 1
del current_form[ 'layout' ][index]
# Add a field
elif params.get( 'add_field_button', False ):
field_index = len( current_form[ 'fields' ] ) + 1
self.empty_field[ 'name' ] = '%i_field_name' % field_index
self.empty_field[ 'label' ] = 'Field label %i' % field_index
current_form[ 'fields' ].append( self.empty_field )
# Delete a field
elif params.get( 'remove_button', False ):
# find the index of the field to be removed from the remove button label
index = int( kwd[ 'remove_button' ].split( ' ' )[2] ) - 1
del current_form[ 'fields' ][ index ]
# Add SelectField option
elif 'Add' in kwd.values():
current_form, status, message = self.__add_select_field_option( trans=trans,
current_form=current_form,
**kwd)
# Remove SelectField option
elif 'Remove' in kwd.values():
current_form, status, message = self.__remove_select_field_option( trans=trans,
current_form=current_form,
**kwd)
return self.show_editable_form_definition( trans=trans,
form_definition=form_definition,
current_form=current_form,
message=message,
status=status,
response_redirect=response_redirect,
**kwd )
def get_saved_form( self, form_definition ):
'''
This retrieves the saved form and returns a dictionary containing the name,
desc, type, layout & fields of the form
'''
if form_definition.type == form_definition.types.SAMPLE:
return dict( name=form_definition.name,
desc=form_definition.desc,
type=form_definition.type,
layout=list( copy.deepcopy( form_definition.layout ) ),
fields=list( copy.deepcopy( form_definition.fields ) ) )
return dict( name=form_definition.name,
desc=form_definition.desc,
type=form_definition.type,
layout=[],
fields=list( copy.deepcopy( form_definition.fields ) ) )
def get_current_form( self, trans, **kwd ):
'''
This method gets all the unsaved user-entered form details and returns a
dictionary containing the name, desc, type, layout & fields of the form
'''
params = util.Params( kwd )
name = util.restore_text( params.name )
desc = util.restore_text( params.description ) or ""
form_type = util.restore_text( params.form_type_select_field )
# get the user entered layout grids in it is a sample form definition
layout = []
if form_type == trans.model.FormDefinition.types.SAMPLE:
index = 0
while True:
if kwd.has_key( 'grid_layout%i' % index ):
grid_name = util.restore_text( params.get( 'grid_layout%i' % index, '' ) )
layout.append( grid_name )
index = index + 1
else:
break
# for csv file import
csv_file = params.get( 'file_data', '' )
fields = []
if csv_file == '':
# get the user entered fields
index = 0
while True:
if kwd.has_key( 'field_label_%i' % index ):
fields.append( self.__get_field( index, **kwd ) )
index = index + 1
else:
break
fields = fields
else:
fields, layout = self.__import_fields(trans, csv_file, form_type)
return dict(name = name,
desc = desc,
type = form_type,
layout = layout,
fields = fields)
def save_form_definition( self, trans, form_definition_current_id=None, **kwd ):
'''
This method saves the current form
'''
# check the form for invalid inputs
flag, message = self.__validate_form( **kwd )
if not flag:
return None, message
current_form = self.get_current_form( trans, **kwd )
# validate fields
field_names_dict = {}
for field in current_form[ 'fields' ]:
if not field[ 'label' ]:
return None, "All the field labels must be completed."
if not VALID_FIELDNAME_RE.match( field[ 'name' ] ):
return None, "'%s' is not a valid field name." % field[ 'name' ]
if field_names_dict.has_key( field[ 'name' ] ):
return None, "Each field name must be unique in the form definition. '%s' is not unique." % field[ 'name' ]
else:
field_names_dict[ field[ 'name' ] ] = 1
# if type is sample form, it should have at least one layout grid
if current_form[ 'type' ] == trans.app.model.FormDefinition.types.SAMPLE and not len( current_form[ 'layout' ] ):
current_form[ 'layout' ] = [ 'Layout1' ]
# create a new form definition
form_definition = trans.app.model.FormDefinition( name=current_form[ 'name' ],
desc=current_form[ 'desc' ],
fields=current_form[ 'fields' ],
form_definition_current=None,
form_type=current_form[ 'type' ],
layout=current_form[ 'layout' ] )
if form_definition_current_id: # save changes to the existing form
# change the pointer in the form_definition_current table to point
# to this new record
form_definition_current = trans.sa_session.query( trans.app.model.FormDefinitionCurrent ).get( form_definition_current_id )
else: # create a new form
form_definition_current = trans.app.model.FormDefinitionCurrent()
# create corresponding row in the form_definition_current table
form_definition.form_definition_current = form_definition_current
form_definition_current.latest_form = form_definition
trans.sa_session.add( form_definition_current )
trans.sa_session.flush()
message = "The new form named '%s' has been created. " % (form_definition.name)
return form_definition, message
def show_editable_form_definition( self, trans, form_definition, current_form, message='', status='done', response_redirect=None, **kwd ):
"""
Displays the form and any of the changes made to it in edit mode. In this method
all the widgets are build for all name, description and all the fields of a form
definition.
"""
params = util.Params( kwd )
# name & description
form_details = [ ( 'Name', TextField( 'name', 40, current_form[ 'name' ] ) ),
( 'Description', TextField( 'description', 40, current_form[ 'desc' ] ) ),
( 'Type', HiddenField( 'form_type_select_field', current_form['type']) ) ]
form_layout = []
if current_form[ 'type' ] == trans.app.model.FormDefinition.types.SAMPLE:
for index, layout_name in enumerate( current_form[ 'layout' ] ):
form_layout.append( TextField( 'grid_layout%i' % index, 40, layout_name ))
# fields
field_details = []
for field_index, field in enumerate( current_form[ 'fields' ] ):
field_widgets = self.build_form_definition_field_widgets( trans=trans,
layout_grids=current_form['layout'],
field_index=field_index,
field=field,
form_type=current_form['type'] )
field_details.append( field_widgets )
return trans.fill_template( '/admin/forms/edit_form_definition.mako',
form_details=form_details,
field_details=field_details,
form_definition=form_definition,
field_types=trans.model.FormDefinition.supported_field_types,
message=message,
status=status,
current_form_type=current_form[ 'type' ],
layout_grids=form_layout,
response_redirect=response_redirect )
@web.expose
@web.require_admin
def delete_form_definition( self, trans, **kwd ):
id_list = util.listify( kwd['id'] )
delete_failed = []
for id in id_list:
try:
form_definition_current = trans.sa_session.query( trans.app.model.FormDefinitionCurrent ).get( trans.security.decode_id(id) )
except:
return trans.response.send_redirect( web.url_for( controller='forms',
action='browse_form_definitions',
message='Invalid form',
status='error' ) )
form_definition_current.deleted = True
trans.sa_session.add( form_definition_current )
trans.sa_session.flush()
return trans.response.send_redirect( web.url_for( controller='forms',
action='browse_form_definitions',
message='%i forms have been deleted.' % len(id_list),
status='done') )
@web.expose
@web.require_admin
def undelete_form_definition( self, trans, **kwd ):
id_list = util.listify( kwd['id'] )
delete_failed = []
for id in id_list:
try:
form_definition_current = trans.sa_session.query( trans.app.model.FormDefinitionCurrent ).get( trans.security.decode_id(id) )
except:
return trans.response.send_redirect( web.url_for( controller='forms',
action='browse_form_definitions',
message='Invalid form',
status='error' ) )
form_definition_current.deleted = False
trans.sa_session.add( form_definition_current )
trans.sa_session.flush()
return trans.response.send_redirect( web.url_for( controller='forms',
action='browse_form_definitions',
message='%i forms have been undeleted.' % len(id_list),
status='done') )
def build_form_definition_field_widgets( self, trans, layout_grids, field_index, field, form_type ):
'''
This method returns a list of widgets which describes a form definition field. This
includes the field label, helptext, type, selectfield options, required/optional & layout
'''
# field label
label = TextField( 'field_label_'+str( field_index ), 40, field['label'] )
# help text
helptext = TextField( 'field_helptext_'+str( field_index ), 40, field['helptext'] )
# field type
field_type_select_field = SelectField( 'field_type_'+str( field_index ),
refresh_on_change=True,
refresh_on_change_values=[ SelectField.__name__ ] )
# fill up the field type selectfield options
field_type_options = []
# if the form is for defining samples, then use the sample field types
# which does not include TextArea & AddressField
if form_type == trans.model.FormDefinition.types.SAMPLE:
for supported_field_type in trans.model.Sample.supported_field_types:
if supported_field_type.__name__ == field[ 'type' ]:
field_type_select_field.add_option( supported_field_type.__name__,
supported_field_type.__name__,
selected=True )
if supported_field_type.__name__ == SelectField.__name__:
# when field type is Selectfield, add option Textfields
field_type_options = self.__build_field_type_select_field_options( field, field_index )
else:
field_type_select_field.add_option( supported_field_type.__name__,
supported_field_type.__name__ )
else:
for supported_field_type in trans.model.FormDefinition.supported_field_types:
if supported_field_type.__name__ == field[ 'type' ]:
field_type_select_field.add_option( supported_field_type.__name__,
supported_field_type.__name__,
selected=True )
if supported_field_type.__name__ == SelectField.__name__:
# when field type is Selectfield, add option Textfields
field_type_options = self.__build_field_type_select_field_options( field, field_index )
else:
field_type_select_field.add_option( supported_field_type.__name__,
supported_field_type.__name__ )
# required/optional radio button
required = SelectField( 'field_required_'+str(field_index), display='radio' )
if field[ 'required' ] == 'required':
required.add_option( 'Required', 'required', selected=True )
required.add_option( 'Optional', 'optional' )
else:
required.add_option( 'Required', 'required' )
required.add_option( 'Optional', 'optional', selected=True )
# layout grid option select_field
if layout_grids and form_type == trans.model.FormDefinition.types.SAMPLE:
layout_select_field = SelectField( 'field_layout_'+str( field_index ) )
for index, grid_name in enumerate( layout_grids ):
if str( field.get( 'layout', None ) ) == str( index ): #existing behavior: integer indexes are stored as strings.
grid_selected = True
else:
grid_selected = False
layout_select_field.add_option("%i. %s" %( index+1, grid_name ), index, selected=grid_selected )
# default value
default_value = TextField( 'field_default_'+str(field_index),
40,
field.get( 'default', '' ) )
# field name
name = TextField( 'field_name_' + str( field_index ), 40, field[ 'name' ] )
name_helptext = "The field name must be unique for each field and must contain only alphanumeric characters and underscore ."
if layout_grids and form_type == trans.model.FormDefinition.types.SAMPLE:
return [ ( 'Field label', label ),
( 'Help text', helptext ),
( 'Type', field_type_select_field, "Add options below", field_type_options ),
( 'Default value', default_value ),
( '', required ),
( 'Select the grid layout to place this field', layout_select_field ),
( 'Field name', name, name_helptext ) ]
return [ ( 'Field label', label ),
( 'Help text', helptext ),
( 'Type', field_type_select_field, "Add options below", field_type_options),
( 'Default value', default_value ),
( '', required),
( 'Field name', name, name_helptext ) ]
def __build_field_type_select_field_options( self, field, field_index ):
'''
Returns a list of TextFields, one for each select field option
'''
field_type_options = []
if field[ 'selectlist' ]:
for ctr, option in enumerate( field[ 'selectlist' ] ):
option_textfield = TextField( 'field_'+str( field_index )+'_option_'+str( ctr ), 40, option )
field_type_options.append( ( 'Option '+str( ctr+1 ), option_textfield ) )
return field_type_options
def __add_select_field_option( self, trans, current_form, **kwd ):
'''
This method adds a select_field option. The kwd dict searched for
the field index which needs to be removed
'''
message=''
status='ok',
index = -1
for k, v in kwd.items():
if v == 'Add':
# extract the field index from the
# button name of format: 'addoption_<field>'
index = int(k.split('_')[1])
break
if index == -1:
# something wrong happened
message='Error in adding selectfield option',
status='error',
return current_form, status, message
# add an empty option
current_form[ 'fields' ][ index ][ 'selectlist' ].append( '' )
return current_form, status, message
def __remove_select_field_option( self, trans, current_form, **kwd ):
'''
This method removes a select_field option. The kwd dict searched for
the field index and option index which needs to be removed
'''
message=''
status='ok',
option = -1
for k, v in kwd.items():
if v == 'Remove':
# extract the field & option indices from the
# button name of format: 'removeoption_<field>_<option>'
index = int( k.split( '_' )[1] )
option = int( k.split( '_' )[2] )
break
if option == -1:
# something wrong happened
message='Error in removing selectfield option',
status='error',
return current_form, status, message
# remove the option
del current_form[ 'fields' ][ index ][ 'selectlist' ][ option ]
return current_form, status, message
def __get_select_field_options( self, index, **kwd ):
'''
This method gets all the options entered by the user for field when
the fieldtype is SelectField
'''
params = util.Params( kwd )
ctr=0
sb_options = []
while True:
if kwd.has_key( 'field_'+str(index)+'_option_'+str(ctr) ):
option = params.get( 'field_'+str(index)+'_option_'+str(ctr), None )
sb_options.append( util.restore_text( option ) )
ctr = ctr+1
else:
return sb_options
def __get_field( self, index, **kwd ):
'''
This method retrieves all the user-entered details of a field and
returns a dict.
'''
params = util.Params( kwd )
label = util.restore_text( params.get( 'field_label_%i' % index, '' ) )
name = util.restore_text( params.get( 'field_name_%i' % index, '' ) )
helptext = util.restore_text( params.get( 'field_helptext_%i' % index, '' ) )
required = params.get( 'field_required_%i' % index, False )
field_type = util.restore_text( params.get( 'field_type_%i' % index, '' ) )
layout = params.get( 'field_layout_%i' % index, '0' )
default = util.restore_text( params.get( 'field_default_%i' % index, '' ) )
if not name.strip():
name = '%i_field_name' % index
if field_type == 'SelectField':
options = self.__get_select_field_options(index, **kwd)
return { 'name': name,
'label': label,
'helptext': helptext,
'visible': True,
'required': required,
'type': field_type,
'selectlist': options,
'layout': layout,
'default': default }
return { 'name': name,
'label': label,
'helptext': helptext,
'visible': True,
'required': required,
'type': field_type,
'layout': layout,
'default': default }
def __import_fields( self, trans, csv_file, form_type ):
'''
"company","name of the company", "True", "required", "TextField",,
"due date","turnaround time", "True", "optional", "SelectField","24 hours, 1 week, 1 month"
'''
import csv
fields = []
layouts = set()
try:
reader = csv.reader(csv_file.file)
index = 1
for row in reader:
if len(row) < 7: # ignore bogus rows
continue
options = row[5].split(',')
if len(row) >= 8:
fields.append( { 'name': '%i_field_name' % index,
'label': row[0],
'helptext': row[1],
'visible': row[2],
'required': row[3],
'type': row[4],
'selectlist': options,
'layout':row[6],
'default': row[7] } )
layouts.add(row[6])
else:
fields.append( { 'name': '%i_field_name' % index,
'label': row[0],
'helptext': row[1],
'visible': row[2],
'required': row[3],
'type': row[4],
'selectlist': options,
'default': row[6] } )
index = index + 1
except:
return trans.response.send_redirect( web.url_for( controller='forms',
action='create_form',
status='error',
message='Error in importing <b>%s</b> file' % csv_file.file))
self.__imported_from_file = True
return fields, list(layouts)
def __validate_form( self, **kwd ):
'''
This method checks the following text inputs are filled out by the user
- the name of form
- form type
'''
params = util.Params( kwd )
# form name
if not util.restore_text( params.name ):
return None, 'Form name must be filled.'
# form type
if util.restore_text( params.form_type_select_field ) == 'none':
return None, 'Form type must be selected.'
return True, ''
def __build_form_types_widget( self, trans, selected='none' ):
form_type_select_field = SelectField( 'form_type_select_field' )
if selected == 'none':
form_type_select_field.add_option( 'Select one', 'none', selected=True )
else:
form_type_select_field.add_option( 'Select one', 'none' )
fd_types = trans.app.model.FormDefinition.types.items()
fd_types.sort()
for ft in fd_types:
if selected == ft[1]:
form_type_select_field.add_option( ft[1], ft[1], selected=True )
else:
form_type_select_field.add_option( ft[1], ft[1] )
return form_type_select_field
| 54.639939 | 155 | 0.516543 |
d3cf4966899ed092196141ba7d345989194624ca | 1,221 | py | Python | CAM2ImageArchiver/error.py | cam2proj/CAM2ImageArchiver | a8c021bb9b622475799ee6ff49d09eabaf94bb92 | [
"Apache-2.0"
] | null | null | null | CAM2ImageArchiver/error.py | cam2proj/CAM2ImageArchiver | a8c021bb9b622475799ee6ff49d09eabaf94bb92 | [
"Apache-2.0"
] | 7 | 2018-01-04T03:27:14.000Z | 2021-06-01T21:55:34.000Z | CAM2ImageArchiver/error.py | cam2proj/CAM2ImageArchiver | a8c021bb9b622475799ee6ff49d09eabaf94bb92 | [
"Apache-2.0"
] | 2 | 2019-01-16T21:39:11.000Z | 2019-05-30T14:14:47.000Z | """
Copyright 2017 Purdue University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Define the exceptions raised by the camera package.
"""
class Error(Exception):
"""
Represent a generic error.
"""
pass
class UnreachableCameraError(Error):
"""
Represent an error when a camera is unreachable.
"""
pass
class CorruptedFrameError(Error):
"""
Represent an error when a camera stream frame is corrupted.
"""
pass
class ClosedStreamError(Error):
"""
Represent an error when a stream is closed and a frame is requested.
"""
pass
class ExpectedCAM2APIClientCameraObject(Error):
"""
CAM2 Image Archiver expects a CAM2 API Python Client Camera Object
"""
pass
| 23.037736 | 72 | 0.720721 |
a0a9f633171e8793f9c72bf96045fe211ae97320 | 994 | py | Python | Lib/test/test_compiler/compiler_runtest.py | mananpal1997/cinder | a8804cc6e3a5861463ff959abcd09ad60a0763e5 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/test/test_compiler/compiler_runtest.py | mananpal1997/cinder | a8804cc6e3a5861463ff959abcd09ad60a0763e5 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/test/test_compiler/compiler_runtest.py | mananpal1997/cinder | a8804cc6e3a5861463ff959abcd09ad60a0763e5 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | #
# Helper script for testsuite - generally, run a file thru compiler and
# disassemble using dis_stable.
#
import ast
import re
import sys
from compiler import dis_stable
from compiler.pycodegen import compile as py_compile
# https://www.python.org/dev/peps/pep-0263/
coding_re = re.compile(rb"^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)")
def open_with_coding(fname):
with open(fname, "rb") as f:
l = f.readline()
m = coding_re.match(l)
if not m:
l = f.readline()
m = coding_re.match(l)
encoding = "utf-8"
if m:
encoding = m.group(1).decode()
return open(fname, encoding=encoding)
if len(sys.argv) < 2:
print("no filename provided")
sys.exit(1)
peephole = True
if sys.argv[1] == "--peephole":
peephole = True
del sys.argv[1]
text = open_with_coding(sys.argv[1]).read()
codeobj = py_compile(text, sys.argv[1], "exec")
dis_stable.Disassembler().dump_code(codeobj, file=sys.stdout)
| 23.116279 | 74 | 0.632797 |
3f10a45b49fc0135ea5562bdd062f75f387b7c8c | 26,300 | py | Python | applications/ContactStructuralMechanicsApplication/python_scripts/alm_contact_process.py | clazaro/Kratos | b947b82c90dfcbf13d60511427f85990d36b90be | [
"BSD-4-Clause"
] | 2 | 2020-12-22T11:50:11.000Z | 2021-09-15T11:36:30.000Z | applications/ContactStructuralMechanicsApplication/python_scripts/alm_contact_process.py | clazaro/Kratos | b947b82c90dfcbf13d60511427f85990d36b90be | [
"BSD-4-Clause"
] | 3 | 2021-08-18T16:12:20.000Z | 2021-09-02T07:36:15.000Z | applications/ContactStructuralMechanicsApplication/python_scripts/alm_contact_process.py | clazaro/Kratos | b947b82c90dfcbf13d60511427f85990d36b90be | [
"BSD-4-Clause"
] | 1 | 2020-12-28T15:39:39.000Z | 2020-12-28T15:39:39.000Z | # Importing the Kratos Library
import KratosMultiphysics as KM
import KratosMultiphysics.StructuralMechanicsApplication as SMA
import KratosMultiphysics.ContactStructuralMechanicsApplication as CSMA
def Factory(settings, Model):
if not isinstance(settings, KM.Parameters):
raise Exception("Expected input shall be a Parameters object, encapsulating a json string")
return ALMContactProcess(Model, settings["Parameters"])
# Import sys
import sys
# Import base search process
import KratosMultiphysics.ContactStructuralMechanicsApplication.search_base_process as search_base_process
# Import auxiliar methods
from KratosMultiphysics.ContactStructuralMechanicsApplication import auxiliar_methods_solvers
class ALMContactProcess(search_base_process.SearchBaseProcess):
"""This class is used in order to compute the contact using a mortar ALM formulation
This class constructs the model parts containing the contact conditions and
initializes parameters and variables related with the contact. The class creates
search utilities to be used to create the contact pairs
Only the member variables listed below should be accessed directly.
Public member variables:
Model -- the container of the different model parts.
settings -- Kratos parameters containing solver settings.
"""
__normal_computation = {
# JSON input
"NO_DERIVATIVES_COMPUTATION": CSMA.NormalDerivativesComputation.NO_DERIVATIVES_COMPUTATION,
"no_derivatives_computation": CSMA.NormalDerivativesComputation.NO_DERIVATIVES_COMPUTATION,
"ELEMENTAL_DERIVATIVES": CSMA.NormalDerivativesComputation.ELEMENTAL_DERIVATIVES,
"elemental_derivatives": CSMA.NormalDerivativesComputation.ELEMENTAL_DERIVATIVES,
"NODAL_ELEMENTAL_DERIVATIVES": CSMA.NormalDerivativesComputation.NODAL_ELEMENTAL_DERIVATIVES,
"nodal_elemental_derivatives": CSMA.NormalDerivativesComputation.NODAL_ELEMENTAL_DERIVATIVES,
"NO_DERIVATIVES_COMPUTATION_WITH_NORMAL_UPDATE": CSMA.NormalDerivativesComputation.NO_DERIVATIVES_COMPUTATION_WITH_NORMAL_UPDATE,
"no_derivatives_computation_with_normal_update": CSMA.NormalDerivativesComputation.NO_DERIVATIVES_COMPUTATION_WITH_NORMAL_UPDATE
}
def __init__(self, Model, settings):
""" The default constructor of the class
Keyword arguments:
self -- It signifies an instance of a class.
Model -- the container of the different model parts.
settings -- Kratos parameters containing solver settings.
"""
# NOTE: Due to recursive check "contact_model_part" and "assume_master_slave" requires to pre-define configurations, if more that 10 pairs of contact are required, just add. I assume nobody needs that much
# Settings string in json format
default_parameters = KM.Parameters("""
{
"help" : "This class is used in order to compute the contact using a mortar ALM formulation. This class constructs the model parts containing the contact conditions and initializes parameters and variables related with the contact. The class creates search utilities to be used to create the contact pairs",
"mesh_id" : 0,
"model_part_name" : "Structure",
"contact_model_part" : {"0":[],"1":[],"2":[],"3":[],"4":[],"5":[],"6":[],"7":[],"8":[],"9":[]},
"assume_master_slave" : {"0":[],"1":[],"2":[],"3":[],"4":[],"5":[],"6":[],"7":[],"8":[],"9":[]},
"contact_property_ids" : {"0": 0,"1": 0,"2": 0,"3": 0,"4": 0,"5": 0,"6": 0,"7": 0,"8": 0,"9": 0},
"friction_coefficients" : {"0": 0.0,"1": 0.0,"2": 0.0,"3": 0.0,"4": 0.0,"5": 0.0,"6": 0.0,"7": 0.0,"8": 0.0,"9": 0.0},
"contact_type" : "Frictionless",
"not_normal_update_frictional" : false,
"interval" : [0.0,"End"],
"normal_variation" : "no_derivatives_computation",
"frictional_law" : "Coulomb",
"tangent_factor" : 2.5e-2,
"operator_threshold" : 1.0e-3,
"slip_augmentation_coefficient" : 0.0,
"slip_threshold" : 2.0e-2,
"zero_tolerance_factor" : 1.0,
"integration_order" : 2,
"consider_tessellation" : false,
"normal_check_proportion" : 0.1,
"clear_inactive_for_post" : true,
"slip_step_reset_frequency" : 1,
"search_parameters" : {
"type_search" : "in_radius_with_obb",
"simple_search" : false,
"adapt_search" : false,
"search_factor" : 3.5,
"active_check_factor" : 0.01,
"max_number_results" : 1000,
"bucket_size" : 4,
"dynamic_search" : false,
"static_check_movement" : false,
"database_step_update" : 1,
"normal_orientation_threshold" : 1.0e-1,
"consider_gap_threshold" : false,
"debug_mode" : false,
"predict_correct_lagrange_multiplier" : false,
"check_gap" : "check_mapping",
"octree_search_parameters" : {
"bounding_box_factor" : 0.1,
"debug_obb" : false,
"OBB_intersection_type" : "SeparatingAxisTheorem",
"build_from_bounding_box" : true,
"lower_bounding_box_coefficient" : 0.0,
"higher_bounding_box_coefficient" : 1.0
}
},
"advance_explicit_parameters" : {
"manual_max_gap_theshold" : false,
"automatic_gap_factor" : 1.0e-1,
"max_gap_threshold" : 5.0e-2,
"max_gap_factor" : 1.0e2,
"logistic_exponent_factor" : 6.0
},
"advance_ALM_parameters" : {
"manual_ALM" : false,
"stiffness_factor" : 1.0,
"penalty_scale_factor" : 1.0,
"use_scale_factor" : true,
"penalty" : 1.0e-12,
"scale_factor" : 1.0e0,
"adapt_penalty" : false,
"max_gap_factor" : 1.0e-3
},
"alternative_formulations" : {
"axisymmetric" : false
}
}
""")
# Overwrite the default settings with user-provided parameters
self.contact_settings = settings
self.contact_settings.RecursivelyValidateAndAssignDefaults(default_parameters)
# We transfer the parameters to the base class
base_process_settings = KM.Parameters("""{}""")
base_process_settings.AddValue("mesh_id", self.contact_settings["mesh_id"])
base_process_settings.AddValue("model_part_name", self.contact_settings["model_part_name"])
base_process_settings.AddValue("search_model_part", self.contact_settings["contact_model_part"])
base_process_settings.AddValue("assume_master_slave", self.contact_settings["assume_master_slave"])
base_process_settings.AddValue("search_property_ids", self.contact_settings["contact_property_ids"])
base_process_settings.AddValue("interval", self.contact_settings["interval"])
base_process_settings.AddValue("zero_tolerance_factor", self.contact_settings["zero_tolerance_factor"])
base_process_settings.AddValue("integration_order", self.contact_settings["integration_order"])
base_process_settings.AddValue("consider_tessellation", self.contact_settings["consider_tessellation"])
base_process_settings.AddValue("normal_check_proportion", self.contact_settings["normal_check_proportion"])
base_process_settings.AddValue("search_parameters", self.contact_settings["search_parameters"])
# Construct the base process.
super().__init__(Model, base_process_settings)
# A check necessary for axisymmetric cases (the domain can not be 3D)
if self.contact_settings["alternative_formulations"]["axisymmetric"].GetBool() and self.dimension == 3:
raise NameError("3D and axisymmetric makes no sense")
# Getting the normal variation flag
self.normal_variation = super()._get_enum_flag(self.contact_settings, "normal_variation", self.__normal_computation)
# Name of the frictional law
self.frictional_law = self.contact_settings["frictional_law"].GetString()
# If we compute a frictional contact simulation
contact_type = self.contact_settings["contact_type"].GetString()
if "Frictional" in contact_type:
not_normal_update_frictional = self.contact_settings["not_normal_update_frictional"].GetBool()
if not not_normal_update_frictional and not "WithNormalUpdate" in contact_type:
contact_type += "WithNormalUpdate"
self.is_frictional = True
self.slip_step_reset_frequency = self.contact_settings["slip_step_reset_frequency"].GetInt()
self.slip_step_reset_counter = 0
if "PureSlip" in contact_type:
self.pure_slip = True
else:
self.pure_slip = False
else:
self.is_frictional = False
self.pure_slip = False
# In case we want a normal update
if "WithNormalUpdate" in contact_type:
if self.normal_variation == CSMA.NormalDerivativesComputation.NO_DERIVATIVES_COMPUTATION:
self.normal_variation = CSMA.NormalDerivativesComputation.NO_DERIVATIVES_COMPUTATION_WITH_NORMAL_UPDATE
def ExecuteInitialize(self):
""" This method is executed at the begining to initialize the process
Keyword arguments:
self -- It signifies an instance of a class.
"""
# If we compute a frictional contact simulation we do an additional check
contact_type = self.contact_settings["contact_type"].GetString()
if "Frictional" in contact_type:
if "PureSlip" in contact_type:
self.pure_slip = True
else:
auxiliar_total_friction_coefficient = 0.0
for key in self.settings["search_model_part"].keys():
if self.settings["search_model_part"][key].size() > 0:
auxiliar_total_friction_coefficient += self.contact_settings["friction_coefficients"][key].GetDouble()
if auxiliar_total_friction_coefficient < sys.float_info.epsilon:
self.pure_slip = auxiliar_methods_solvers.AuxiliarPureSlipCheck(self.main_model_part)
else:
self.pure_slip = False
# Auxiliar initialize
KM.VariableUtils().SetNonHistoricalVariableToZero(CSMA.AUGMENTED_NORMAL_CONTACT_PRESSURE, self.main_model_part.Nodes)
if "Frictional" in contact_type:
KM.VariableUtils().SetNonHistoricalVariableToZero(CSMA.AUGMENTED_TANGENT_CONTACT_PRESSURE, self.main_model_part.Nodes)
# We call to the base process
super().ExecuteInitialize()
def ExecuteBeforeSolutionLoop(self):
""" This method is executed before starting the time loop
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We call to the base process
super().ExecuteBeforeSolutionLoop()
def ExecuteInitializeSolutionStep(self):
""" This method is executed in order to initialize the current step
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We call to the base process
super().ExecuteInitializeSolutionStep()
# Reset slip flag
self._reset_slip_flag()
def ExecuteFinalizeSolutionStep(self):
""" This method is executed in order to finalize the current step
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We call to the base process
super().ExecuteFinalizeSolutionStep()
# Debug we compute if the total load corresponds with the total contact force and the reactions
if self.settings["search_parameters"]["debug_mode"].GetBool():
total_load = KM.Vector(3)
total_load[0] = 0.0
total_load[1] = 0.0
total_load[2] = 0.0
total_reaction = KM.Vector(3)
total_reaction[0] = 0.0
total_reaction[1] = 0.0
total_reaction[2] = 0.0
total_contact_force = 0
# Computing total load applied (I will consider only surface loads for now)
for cond in self.main_model_part.Conditions:
geom = cond.GetGeometry()
if cond.Has(SMA.LINE_LOAD):
total_load += geom.Length() * cond.GetValue(SMA.LINE_LOAD)
if cond.Has(SMA.SURFACE_LOAD):
total_load += geom.Area() * cond.GetValue(SMA.SURFACE_LOAD)
for node in self.main_model_part.Nodes:
if node.Has(KM.NODAL_AREA) and node.Has(CSMA.AUGMENTED_NORMAL_CONTACT_PRESSURE):
total_contact_force += node.GetValue(KM.NODAL_AREA) * node.GetValue(CSMA.AUGMENTED_NORMAL_CONTACT_PRESSURE)
total_reaction += node.GetSolutionStepValue(KM.REACTION)
KM.Logger.PrintWarning("TOTAL LOAD: ", "X: {:.2e}".format(total_load[0]), "\t Y: {:.2e}".format(total_load[1]), "\tZ: {:.2e}".format(total_load[2]))
KM.Logger.PrintWarning("TOTAL REACTION: ", "X: {:.2e}".format(total_reaction[0]), "\t Y: {:.2e}".format(total_reaction[1]), "\tZ: {:.2e}".format(total_reaction[2]))
KM.Logger.PrintWarning("TOTAL CONTACT FORCE: ", "{:.2e}".format(total_contact_force))
def ExecuteBeforeOutputStep(self):
""" This method is executed right before the ouput process computation
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We call to the base process
super().ExecuteBeforeOutputStep()
if self.contact_settings["clear_inactive_for_post"].GetBool():
zero_vector = KM.Array3()
zero_vector[0] = 0.0
zero_vector[1] = 0.0
zero_vector[2] = 0.0
KM.VariableUtils().SetNonHistoricalVariable(CSMA.AUGMENTED_NORMAL_CONTACT_PRESSURE, 0.0, self.main_model_part.Nodes, KM.ACTIVE, False)
KM.VariableUtils().SetNonHistoricalVariable(CSMA.AUGMENTED_TANGENT_CONTACT_PRESSURE, zero_vector, self.main_model_part.Nodes, KM.ACTIVE, False)
def ExecuteAfterOutputStep(self):
""" This method is executed right after the ouput process computation
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We call to the base process
super().ExecuteAfterOutputStep()
def ExecuteFinalize(self):
""" This method is executed in order to finalize the current computation
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We call to the base process
super().ExecuteFinalize()
def _set_additional_parameters(self, param):
""" This sets additional parameters for the search
Keyword arguments:
self -- It signifies an instance of a class.
param -- The parameters where to set additional values
"""
param.AddEmptyValue("pure_slip")
param["pure_slip"].SetBool(self.pure_slip)
def _get_condition_name(self):
""" This method returns the condition name
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We define the condition name to be used
if self.contact_settings["contact_type"].GetString() == "Frictionless":
if self.normal_variation == CSMA.NormalDerivativesComputation.NODAL_ELEMENTAL_DERIVATIVES:
if self.contact_settings["alternative_formulations"]["axisymmetric"].GetBool():
condition_name = "ALMNVFrictionlessAxisymMortarContact"
else:
condition_name = "ALMNVFrictionlessMortarContact"
else:
if self.contact_settings["alternative_formulations"]["axisymmetric"].GetBool():
condition_name = "ALMFrictionlessAxisymMortarContact"
else:
condition_name = "ALMFrictionlessMortarContact"
elif self.contact_settings["contact_type"].GetString() == "FrictionlessComponents":
if self.normal_variation == CSMA.NormalDerivativesComputation.NODAL_ELEMENTAL_DERIVATIVES:
condition_name = "ALMNVFrictionlessComponentsMortarContact"
else:
condition_name = "ALMFrictionlessComponentsMortarContact"
elif self.is_frictional:
if self.normal_variation == CSMA.NormalDerivativesComputation.NODAL_ELEMENTAL_DERIVATIVES:
if self.contact_settings["alternative_formulations"]["axisymmetric"].GetBool():
condition_name = "ALMNVFrictionalAxisymMortarContact"
else:
condition_name = "ALMNVFrictionalMortarContact"
else:
if self.contact_settings["alternative_formulations"]["axisymmetric"].GetBool():
condition_name = "ALMFrictionalAxisymMortarContact"
else:
condition_name = "ALMFrictionalMortarContact"
return condition_name
def _get_final_string(self, key = "0"):
""" This method returns the final string of the condition name
Keyword arguments:
self -- It signifies an instance of a class.
key -- The key to identify the current pair
"""
# Determine the geometry of the element
super()._get_final_string(key)
# We compute the number of nodes of the conditions
number_nodes, number_nodes_master = super()._compute_number_nodes()
if number_nodes != number_nodes_master:
return str(number_nodes_master) + "N"
else:
return ""
def _get_problem_name(self):
""" This method returns the problem name to be solved
Keyword arguments:
self -- It signifies an instance of a class.
"""
return "Contact"
def _initialize_process_info(self):
""" This method initializes some values from the process info
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We call to the base process
super()._initialize_process_info()
# We call the process info
process_info = self.main_model_part.ProcessInfo
# We recompute the normal at each iteration (false by default)
process_info[CSMA.CONSIDER_NORMAL_VARIATION] = self.normal_variation
# Initialize ACTIVE_SET_CONVERGED
process_info[CSMA.ACTIVE_SET_CONVERGED] = True
# We set the max gap factor for the gap adaptation
max_gap_factor = self.contact_settings["advance_ALM_parameters"]["max_gap_factor"].GetDouble()
process_info[CSMA.ADAPT_PENALTY] = self.contact_settings["advance_ALM_parameters"]["adapt_penalty"].GetBool()
process_info[CSMA.MAX_GAP_FACTOR] = max_gap_factor
process_info[CSMA.OPERATOR_THRESHOLD] = self.contact_settings["operator_threshold"].GetDouble()
def _initialize_search_values(self):
""" This method initializes some values and variables used during contact computations
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We call to the base process
super()._initialize_search_values()
# We set the CONTACT flag
self.main_model_part.Set(KM.CONTACT, True)
self._get_process_model_part().Set(KM.CONTACT, True)
# We consider frictional contact (We use the SLIP flag because was the easiest way)
if self.is_frictional:
self.main_model_part.Set(KM.SLIP, True)
self._get_process_model_part().Set(KM.SLIP, True)
else:
self.main_model_part.Set(KM.SLIP, False)
self._get_process_model_part().Set(KM.SLIP, False)
# We call the process info
process_info = self.main_model_part.ProcessInfo
# We recompute the normal at each iteration (false by default)
process_info[CSMA.CONSIDER_NORMAL_VARIATION] = self.normal_variation
# We set the value that scales in the tangent direction the penalty and scale parameter
if self.is_frictional:
process_info[KM.TANGENT_FACTOR] = self.contact_settings["tangent_factor"].GetDouble()
process_info[CSMA.SLIP_AUGMENTATION_COEFFICIENT] = self.contact_settings["slip_augmentation_coefficient"].GetDouble()
process_info[CSMA.SLIP_THRESHOLD] = self.contact_settings["slip_threshold"].GetDouble()
def _initialize_problem_parameters(self):
""" This method initializes the ALM parameters from the process info
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We call to the base process
super()._initialize_problem_parameters()
# We call the process info
process_info = self.main_model_part.ProcessInfo
if not self.contact_settings["advance_ALM_parameters"]["manual_ALM"].GetBool():
# Computing the scale factors or the penalty parameters (StiffnessFactor * E_mean/h_mean)
alm_var_parameters = KM.Parameters("""{}""")
alm_var_parameters.AddValue("stiffness_factor", self.contact_settings["advance_ALM_parameters"]["stiffness_factor"])
alm_var_parameters.AddValue("penalty_scale_factor", self.contact_settings["advance_ALM_parameters"]["penalty_scale_factor"])
self.alm_var_process = CSMA.ALMVariablesCalculationProcess(self._get_process_model_part(), KM.NODAL_H, alm_var_parameters)
self.alm_var_process.Execute()
# We don't consider scale factor
if not self.contact_settings["advance_ALM_parameters"]["use_scale_factor"].GetBool():
process_info[KM.SCALE_FACTOR] = 1.0
else:
# We set the values in the process info
process_info[KM.INITIAL_PENALTY] = self.contact_settings["advance_ALM_parameters"]["penalty"].GetDouble()
process_info[KM.SCALE_FACTOR] = self.contact_settings["advance_ALM_parameters"]["scale_factor"].GetDouble()
# We set a minimum value
if process_info[KM.INITIAL_PENALTY] < sys.float_info.epsilon:
process_info[KM.INITIAL_PENALTY] = 1.0e0
if process_info[KM.SCALE_FACTOR] < sys.float_info.epsilon:
process_info[KM.SCALE_FACTOR] = 1.0e0
# We print the parameters considered
KM.Logger.PrintInfo("SCALE_FACTOR: ", "{:.2e}".format(process_info[KM.SCALE_FACTOR]))
KM.Logger.PrintInfo("INITIAL_PENALTY: ", "{:.2e}".format(process_info[KM.INITIAL_PENALTY]))
def _initialize_search_conditions(self):
""" This method initializes some conditions values
Keyword arguments:
self -- It signifies an instance of a class.
"""
# We call to the base process
super()._initialize_search_conditions()
# Assign the friction friction_coefficients
if self.is_frictional:
for key in self.settings["search_model_part"].keys():
if self.settings["search_model_part"][key].size() > 0:
sub_search_model_part_name = "ContactSub"+key
if self._get_process_model_part().HasSubModelPart(sub_search_model_part_name):
sub_search_model_part = self._get_process_model_part().GetSubModelPart(sub_search_model_part_name)
else:
sub_search_model_part = self._get_process_model_part().CreateSubModelPart(sub_search_model_part_name)
for prop in sub_search_model_part.GetProperties():
if not prop.Has(KM.FRICTION_COEFFICIENT):
prop[KM.FRICTION_COEFFICIENT] = self.contact_settings["friction_coefficients"][key].GetDouble()
else:
KM.Logger.PrintWarning("FRICTION_COEFFICIENT: ", "{:.2e}".format(prop[KM.FRICTION_COEFFICIENT]), " already defined in Properties, please define it as a condition pair property")
# Initialize the ALM parameters
alm_init_var = CSMA.ALMFastInit(self._get_process_model_part())
alm_init_var.Execute()
def _reset_slip_flag(self):
""" This method resets the SLIP flag
Keyword arguments:
self -- It signifies an instance of a class.
"""
if self.is_frictional:
if not self.pure_slip:
if self.slip_step_reset_frequency > 0:
self.slip_step_reset_counter += 1
if self.slip_step_reset_counter >= self.slip_step_reset_frequency:
KM.VariableUtils().SetFlag(KM.SLIP, False, self._get_process_model_part().Nodes)
self.slip_step_reset_counter = 0
else: # If zero never reseted, if negative will consider the direction of the WEIGHTED_SLIP
if self.slip_step_reset_frequency < 0: # Update using the slip direction directly (a la PureSlip style)
KM.MortarUtilities.ComputeNodesTangentModelPart(self._get_process_model_part(), CSMA.WEIGHTED_SLIP, 1.0, True)
| 50.286807 | 344 | 0.634867 |
4ced15ed8ab0e4024ad90a63fa9cbf4536100118 | 687 | py | Python | nemo/collections/nlp/models/question_answering/__init__.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 4,145 | 2019-09-13T08:29:43.000Z | 2022-03-31T18:31:44.000Z | nemo/collections/nlp/models/question_answering/__init__.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 2,031 | 2019-09-17T16:51:39.000Z | 2022-03-31T23:52:41.000Z | nemo/collections/nlp/models/question_answering/__init__.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 1,041 | 2019-09-13T10:08:21.000Z | 2022-03-30T06:37:38.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.models.question_answering.qa_model import QAModel
| 42.9375 | 75 | 0.772926 |
bc68b42808e1785abb5b38c05ed0ff9bbbe93923 | 916 | py | Python | blacklist/urls.py | milleruk/allianceauth-blacklist | cd58520f8362b5dd22f6249a89a829a6b79da61b | [
"MIT"
] | 1 | 2021-05-04T20:55:27.000Z | 2021-05-04T20:55:27.000Z | blacklist/urls.py | milleruk/allianceauth-blacklist | cd58520f8362b5dd22f6249a89a829a6b79da61b | [
"MIT"
] | 3 | 2021-02-01T13:26:15.000Z | 2021-09-09T19:41:01.000Z | blacklist/urls.py | milleruk/allianceauth-blacklist | cd58520f8362b5dd22f6249a89a829a6b79da61b | [
"MIT"
] | 4 | 2021-02-08T21:22:55.000Z | 2021-08-12T04:28:55.000Z | from django.conf.urls import url
from . import views
app_name = 'blacklist'
urlpatterns = [
url(r'^notes/$', views.note_board, name='note_board'),
url(r'^blacklist/$', views.blacklist, name='blacklist'),
url(r'^get_add_note/(?P<eve_id>(\d)*)/$', views.get_add_evenote, name='modal_add'),
url(r'^get_comments/(?P<evenote_id>(\d)*)/$', views.get_evenote_comments, name='modal_comment'),
url(r'^get_edit_note/(?P<evenote_id>(\d)*)/$', views.get_edit_evenote, name='modal_edit'),
url(r'^get_add_comment/(?P<evenote_id>(\d)*)/$', views.get_add_comment, name='modal_add_comment'),
url(r'^search_names/$', views.search_names, name='search_names'),
url(r'^add_comment/(?P<note_id>(\d)*)/$', views.add_comment, name='add_comment'),
url(r'^add_note/(?P<eve_id>(\d)*)/$', views.add_note, name='add_note'),
url(r'^edit_note/(?P<note_id>(\d)*)/$', views.edit_note, name='edit_note'),
]
| 48.210526 | 102 | 0.664847 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.