hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f709c1866b436cc5e4023f18c6372832500ce7ae
| 67
|
py
|
Python
|
pm4py/models/transition_system/__init__.py
|
edugonza/pm4py-source
|
b5ae5ea40799155d6540692f94be0444c16fd9d3
|
[
"Apache-2.0"
] | null | null | null |
pm4py/models/transition_system/__init__.py
|
edugonza/pm4py-source
|
b5ae5ea40799155d6540692f94be0444c16fd9d3
|
[
"Apache-2.0"
] | null | null | null |
pm4py/models/transition_system/__init__.py
|
edugonza/pm4py-source
|
b5ae5ea40799155d6540692f94be0444c16fd9d3
|
[
"Apache-2.0"
] | null | null | null |
from pm4py.models.transition_system import transition_system, utils
| 67
| 67
| 0.895522
|
from pm4py.models.transition_system import transition_system, utils
| true
| true
|
f709c266be7704361d822d91c5e72c7e402bb726
| 5,797
|
py
|
Python
|
spyder/plugins/editor/utils/debugger.py
|
seryj/spyder
|
acea4f501c1a04d57b02e5e817708a69b503f430
|
[
"MIT"
] | null | null | null |
spyder/plugins/editor/utils/debugger.py
|
seryj/spyder
|
acea4f501c1a04d57b02e5e817708a69b503f430
|
[
"MIT"
] | null | null | null |
spyder/plugins/editor/utils/debugger.py
|
seryj/spyder
|
acea4f501c1a04d57b02e5e817708a69b503f430
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Contains the text debugger manager.
"""
import os.path as osp
from qtpy.QtWidgets import QInputDialog, QLineEdit
from spyder.config.main import CONF
from spyder.config.base import _
from spyder.py3compat import to_text_string
from spyder.api.manager import Manager
from spyder.plugins.editor.utils.editor import BlockUserData
def _load_all_breakpoints():
bp_dict = CONF.get('run', 'breakpoints', {})
for filename in list(bp_dict.keys()):
if not osp.isfile(filename):
bp_dict.pop(filename)
return bp_dict
def load_breakpoints(filename):
breakpoints = _load_all_breakpoints().get(filename, [])
if breakpoints and isinstance(breakpoints[0], int):
# Old breakpoints format
breakpoints = [(lineno, None) for lineno in breakpoints]
return breakpoints
def save_breakpoints(filename, breakpoints):
if not osp.isfile(filename):
return
bp_dict = _load_all_breakpoints()
bp_dict[filename] = breakpoints
CONF.set('run', 'breakpoints', bp_dict)
def clear_all_breakpoints():
CONF.set('run', 'breakpoints', {})
def clear_breakpoint(filename, lineno):
breakpoints = load_breakpoints(filename)
if breakpoints:
for breakpoint in breakpoints[:]:
if breakpoint[0] == lineno:
breakpoints.remove(breakpoint)
save_breakpoints(filename, breakpoints)
class DebuggerManager(Manager):
"""
Manages adding/removing breakpoint from the editor.
"""
def __init__(self, editor):
super(DebuggerManager, self).__init__(editor)
self.filename = None
self.breakpoints = self.get_breakpoints()
self.editor.sig_breakpoints_changed.connect(self.breakpoints_changed)
self.editor.sig_filename_changed.connect(self.set_filename)
def set_filename(self, filename):
if filename is None:
return
if self.filename != filename:
old_filename = self.filename
self.filename = filename
if self.breakpoints:
save_breakpoints(old_filename, []) # clear old breakpoints
self.save_breakpoints()
def toogle_breakpoint(self, line_number=None, condition=None,
edit_condition=False):
"""Add/remove breakpoint."""
if not self.editor.is_python_like():
return
if line_number is None:
block = self.editor.textCursor().block()
else:
block = self.editor.document().findBlockByNumber(line_number-1)
data = block.userData()
if not data:
data = BlockUserData(self.editor)
data.breakpoint = True
elif not edit_condition:
data.breakpoint = not data.breakpoint
data.breakpoint_condition = None
if condition is not None:
data.breakpoint_condition = condition
if edit_condition:
condition = data.breakpoint_condition
condition, valid = QInputDialog.getText(self.editor,
_('Breakpoint'),
_("Condition:"),
QLineEdit.Normal,
condition)
if not valid:
return
data.breakpoint = True
data.breakpoint_condition = str(condition) if condition else None
if data.breakpoint:
text = to_text_string(block.text()).strip()
if len(text) == 0 or text.startswith(('#', '"', "'")):
data.breakpoint = False
block.setUserData(data)
self.editor.sig_flags_changed.emit()
self.editor.sig_breakpoints_changed.emit()
def get_breakpoints(self):
"""Get breakpoints"""
breakpoints = []
block = self.editor.document().firstBlock()
for line_number in range(1, self.editor.document().blockCount()+1):
data = block.userData()
if data and data.breakpoint:
breakpoints.append((line_number, data.breakpoint_condition))
block = block.next()
return breakpoints
def clear_breakpoints(self):
"""Clear breakpoints"""
self.breakpoints = []
for data in self.editor.blockuserdata_list():
data.breakpoint = False
# data.breakpoint_condition = None # not necessary, but logical
def set_breakpoints(self, breakpoints):
"""Set breakpoints"""
self.clear_breakpoints()
for line_number, condition in breakpoints:
self.toogle_breakpoint(line_number, condition)
self.breakpoints = self.get_breakpoints()
def update_breakpoints(self):
"""Update breakpoints"""
self.editor.sig_breakpoints_changed.emit()
def breakpoints_changed(self):
"""Breakpoint list has changed"""
breakpoints = self.get_breakpoints()
if self.breakpoints != breakpoints:
self.breakpoints = breakpoints
self.save_breakpoints()
def save_breakpoints(self):
breakpoints = repr(self.breakpoints)
filename = to_text_string(self.filename)
breakpoints = to_text_string(breakpoints)
filename = osp.normpath(osp.abspath(filename))
if breakpoints:
breakpoints = eval(breakpoints)
else:
breakpoints = []
save_breakpoints(filename, breakpoints)
self.editor.sig_breakpoints_saved.emit()
def load_breakpoints(self):
self.set_breakpoints(load_breakpoints(self.filename))
| 34.921687
| 77
| 0.621701
|
import os.path as osp
from qtpy.QtWidgets import QInputDialog, QLineEdit
from spyder.config.main import CONF
from spyder.config.base import _
from spyder.py3compat import to_text_string
from spyder.api.manager import Manager
from spyder.plugins.editor.utils.editor import BlockUserData
def _load_all_breakpoints():
bp_dict = CONF.get('run', 'breakpoints', {})
for filename in list(bp_dict.keys()):
if not osp.isfile(filename):
bp_dict.pop(filename)
return bp_dict
def load_breakpoints(filename):
breakpoints = _load_all_breakpoints().get(filename, [])
if breakpoints and isinstance(breakpoints[0], int):
breakpoints = [(lineno, None) for lineno in breakpoints]
return breakpoints
def save_breakpoints(filename, breakpoints):
if not osp.isfile(filename):
return
bp_dict = _load_all_breakpoints()
bp_dict[filename] = breakpoints
CONF.set('run', 'breakpoints', bp_dict)
def clear_all_breakpoints():
CONF.set('run', 'breakpoints', {})
def clear_breakpoint(filename, lineno):
breakpoints = load_breakpoints(filename)
if breakpoints:
for breakpoint in breakpoints[:]:
if breakpoint[0] == lineno:
breakpoints.remove(breakpoint)
save_breakpoints(filename, breakpoints)
class DebuggerManager(Manager):
def __init__(self, editor):
super(DebuggerManager, self).__init__(editor)
self.filename = None
self.breakpoints = self.get_breakpoints()
self.editor.sig_breakpoints_changed.connect(self.breakpoints_changed)
self.editor.sig_filename_changed.connect(self.set_filename)
def set_filename(self, filename):
if filename is None:
return
if self.filename != filename:
old_filename = self.filename
self.filename = filename
if self.breakpoints:
save_breakpoints(old_filename, []) self.save_breakpoints()
def toogle_breakpoint(self, line_number=None, condition=None,
edit_condition=False):
if not self.editor.is_python_like():
return
if line_number is None:
block = self.editor.textCursor().block()
else:
block = self.editor.document().findBlockByNumber(line_number-1)
data = block.userData()
if not data:
data = BlockUserData(self.editor)
data.breakpoint = True
elif not edit_condition:
data.breakpoint = not data.breakpoint
data.breakpoint_condition = None
if condition is not None:
data.breakpoint_condition = condition
if edit_condition:
condition = data.breakpoint_condition
condition, valid = QInputDialog.getText(self.editor,
_('Breakpoint'),
_("Condition:"),
QLineEdit.Normal,
condition)
if not valid:
return
data.breakpoint = True
data.breakpoint_condition = str(condition) if condition else None
if data.breakpoint:
text = to_text_string(block.text()).strip()
if len(text) == 0 or text.startswith(('#', '"', "'")):
data.breakpoint = False
block.setUserData(data)
self.editor.sig_flags_changed.emit()
self.editor.sig_breakpoints_changed.emit()
def get_breakpoints(self):
breakpoints = []
block = self.editor.document().firstBlock()
for line_number in range(1, self.editor.document().blockCount()+1):
data = block.userData()
if data and data.breakpoint:
breakpoints.append((line_number, data.breakpoint_condition))
block = block.next()
return breakpoints
def clear_breakpoints(self):
self.breakpoints = []
for data in self.editor.blockuserdata_list():
data.breakpoint = False
# data.breakpoint_condition = None # not necessary, but logical
def set_breakpoints(self, breakpoints):
self.clear_breakpoints()
for line_number, condition in breakpoints:
self.toogle_breakpoint(line_number, condition)
self.breakpoints = self.get_breakpoints()
def update_breakpoints(self):
self.editor.sig_breakpoints_changed.emit()
def breakpoints_changed(self):
breakpoints = self.get_breakpoints()
if self.breakpoints != breakpoints:
self.breakpoints = breakpoints
self.save_breakpoints()
def save_breakpoints(self):
breakpoints = repr(self.breakpoints)
filename = to_text_string(self.filename)
breakpoints = to_text_string(breakpoints)
filename = osp.normpath(osp.abspath(filename))
if breakpoints:
breakpoints = eval(breakpoints)
else:
breakpoints = []
save_breakpoints(filename, breakpoints)
self.editor.sig_breakpoints_saved.emit()
def load_breakpoints(self):
self.set_breakpoints(load_breakpoints(self.filename))
| true
| true
|
f709c3d8c452141544904aa3206512fef48698f7
| 1,900
|
py
|
Python
|
openstack/tests/unit/clustering/v1/test_profile_type.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/clustering/v1/test_profile_type.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/clustering/v1/test_profile_type.py
|
NeCTAR-RC/openstacksdk
|
60a24f6c4717a1f9a0e545c9a07e68afaedc5a27
|
[
"Apache-2.0"
] | 1
|
2020-07-21T02:18:23.000Z
|
2020-07-21T02:18:23.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from openstack.tests.unit import base
from openstack.clustering.v1 import profile_type
FAKE = {
'name': 'FAKE_PROFILE_TYPE',
'schema': {
'foo': 'bar'
},
'support_status': {
'1.0': [{
'status': 'supported',
'since': '2016.10',
}]
}
}
class TestProfileType(base.TestCase):
def test_basic(self):
sot = profile_type.ProfileType()
self.assertEqual('profile_type', sot.resource_key)
self.assertEqual('profile_types', sot.resources_key)
self.assertEqual('/profile-types', sot.base_path)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_list)
def test_instantiate(self):
sot = profile_type.ProfileType(**FAKE)
self.assertEqual(FAKE['name'], sot._get_id(sot))
self.assertEqual(FAKE['name'], sot.name)
self.assertEqual(FAKE['schema'], sot.schema)
self.assertEqual(FAKE['support_status'], sot.support_status)
def test_ops(self):
sot = profile_type.ProfileType(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.get = mock.Mock(return_value=resp)
self.assertEqual('', sot.type_ops(sess))
url = 'profile-types/%s/ops' % sot.id
sess.get.assert_called_once_with(url)
| 31.666667
| 75
| 0.659474
|
import mock
from openstack.tests.unit import base
from openstack.clustering.v1 import profile_type
FAKE = {
'name': 'FAKE_PROFILE_TYPE',
'schema': {
'foo': 'bar'
},
'support_status': {
'1.0': [{
'status': 'supported',
'since': '2016.10',
}]
}
}
class TestProfileType(base.TestCase):
def test_basic(self):
sot = profile_type.ProfileType()
self.assertEqual('profile_type', sot.resource_key)
self.assertEqual('profile_types', sot.resources_key)
self.assertEqual('/profile-types', sot.base_path)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_list)
def test_instantiate(self):
sot = profile_type.ProfileType(**FAKE)
self.assertEqual(FAKE['name'], sot._get_id(sot))
self.assertEqual(FAKE['name'], sot.name)
self.assertEqual(FAKE['schema'], sot.schema)
self.assertEqual(FAKE['support_status'], sot.support_status)
def test_ops(self):
sot = profile_type.ProfileType(**FAKE)
resp = mock.Mock()
resp.json = mock.Mock(return_value='')
sess = mock.Mock()
sess.get = mock.Mock(return_value=resp)
self.assertEqual('', sot.type_ops(sess))
url = 'profile-types/%s/ops' % sot.id
sess.get.assert_called_once_with(url)
| true
| true
|
f709c576d0c06e1b8e9c1dba47fe5262c19309d7
| 2,468
|
py
|
Python
|
examples/test_dump_kws_two_targets.py
|
sriiora/tcf
|
e607ce04f97dbb4910d94428c0600a6a7145a825
|
[
"Apache-2.0"
] | 24
|
2018-08-21T18:04:48.000Z
|
2022-02-07T22:50:06.000Z
|
examples/test_dump_kws_two_targets.py
|
sriiora/tcf
|
e607ce04f97dbb4910d94428c0600a6a7145a825
|
[
"Apache-2.0"
] | 16
|
2018-08-21T18:03:52.000Z
|
2022-03-01T17:15:42.000Z
|
examples/test_dump_kws_two_targets.py
|
sriiora/tcf
|
e607ce04f97dbb4910d94428c0600a6a7145a825
|
[
"Apache-2.0"
] | 29
|
2018-08-22T19:40:59.000Z
|
2021-12-21T11:13:23.000Z
|
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable = missing-docstring
"""Testcase using two targets
--------------------------
Note n this case the target group names are listing two targets and
each target obejct has different values.
.. literalinclude:: /examples/test_dump_kws_two_targets.py
:language: python
:pyobject: _test
Execute :download:`the testcase
<../examples/test_dump_kws_two_targets.py>` with::
$ tcf run -vv /usr/share/tcf/examples/test_dump_kws_twp_targets.py
INFO0/ato4B /usr/share/tcf/examples/test_dump_kws_two_targets.py#_test @2psg: Keywords for testcase:
{'cwd': '/home/inaky/z/s/local',
...}
INFO0/ato4B /usr/share/tcf/examples/test_dump_kws_two_targets.py#_test @2psg|localhost/qz33b-arm: Keywords for target 0:
{u'board': u'qemu_cortex_m3',
'bsp': u'arm',
u'bsp_models': {u'arm': [u'arm']},
u'bsps': {u'arm': {u'board': u'qemu_cortex_m3',
u'console': u'arm',
u'kernelname': u'zephyr.elf',
...
u'zephyr_board': u'qemu_cortex_m3',
u'zephyr_kernelname': u'zephyr.elf'}
INFO0/ato4B /usr/share/tcf/examples/test_dump_kws_two_targets.py#_test @2psg|localhost/qz31a-x86: Keywords for target 1:
{u'board': u'qemu_x86',
'bsp': u'x86',
u'bsp_models': {u'x86': [u'x86']},
u'bsps': {u'x86': {u'board': u'qemu_x86',
u'console': u'x86',
u'kernelname': u'zephyr.elf',
u'quark_se_stub': False,
...
u'zephyr_board': u'qemu_x86',
u'zephyr_kernelname': u'zephyr.elf'}
PASS0/ toplevel @local: 1 tests (1 passed, 0 error, 0 failed, 0 blocked, 0 skipped, in 0:00:00.417956) - passed
(depending on your installation method, location might be
*~/.local/share/tcf/examples*)
"""
import pprint
import tcfl.tc
@tcfl.tc.target()
@tcfl.tc.target()
@tcfl.tc.tags(build_only = True, ignore_example = True)
class _test(tcfl.tc.tc_c):
def build(self, target, target1):
self.report_info("Keywords for testcase:\n%s"
% pprint.pformat(self.kws),
level = 0)
target.report_info("Keywords for target 0:\n%s"
% pprint.pformat(target.kws),
level = 0)
target1.report_info("Keywords for target 1:\n%s"
% pprint.pformat(target1.kws),
level = 0)
| 35.257143
| 122
| 0.616288
|
import pprint
import tcfl.tc
@tcfl.tc.target()
@tcfl.tc.target()
@tcfl.tc.tags(build_only = True, ignore_example = True)
class _test(tcfl.tc.tc_c):
def build(self, target, target1):
self.report_info("Keywords for testcase:\n%s"
% pprint.pformat(self.kws),
level = 0)
target.report_info("Keywords for target 0:\n%s"
% pprint.pformat(target.kws),
level = 0)
target1.report_info("Keywords for target 1:\n%s"
% pprint.pformat(target1.kws),
level = 0)
| true
| true
|
f709c6ae72b126c7b0f1d53b851327f9981b2de1
| 29,835
|
py
|
Python
|
python/pulumi_eks/managed_node_group.py
|
fitzoh/pulumi-eks
|
865ccf14cdf6bec706e443c8fda44f85f388c9e1
|
[
"Apache-2.0"
] | 8
|
2018-09-27T21:31:46.000Z
|
2018-12-01T02:33:18.000Z
|
python/pulumi_eks/managed_node_group.py
|
fitzoh/pulumi-eks
|
865ccf14cdf6bec706e443c8fda44f85f388c9e1
|
[
"Apache-2.0"
] | 21
|
2018-08-23T16:47:29.000Z
|
2018-12-02T03:07:22.000Z
|
python/pulumi_eks/managed_node_group.py
|
fitzoh/pulumi-eks
|
865ccf14cdf6bec706e443c8fda44f85f388c9e1
|
[
"Apache-2.0"
] | 6
|
2018-09-05T16:21:21.000Z
|
2018-10-26T23:11:00.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi-gen-eks. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from ._inputs import *
from .vpc_cni import VpcCni
import pulumi_aws
import pulumi_kubernetes
__all__ = ['ManagedNodeGroupArgs', 'ManagedNodeGroup']
@pulumi.input_type
class ManagedNodeGroupArgs:
def __init__(__self__, *,
cluster: pulumi.Input['CoreDataArgs'],
ami_type: Optional[pulumi.Input[str]] = None,
capacity_type: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
force_update_version: Optional[pulumi.Input[bool]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
launch_template: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']] = None,
node_group_name: Optional[pulumi.Input[str]] = None,
node_group_name_prefix: Optional[pulumi.Input[str]] = None,
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
node_role_arn: Optional[pulumi.Input[str]] = None,
release_version: Optional[pulumi.Input[str]] = None,
remote_access: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']] = None,
scaling_config: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
taints: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ManagedNodeGroup resource.
:param pulumi.Input['CoreDataArgs'] cluster: The target EKS cluster.
:param pulumi.Input[str] ami_type: Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Defaults to `AL2_x86_64`. Valid values: `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[str] capacity_type: Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[str] cluster_name: Name of the EKS Cluster.
:param pulumi.Input[int] disk_size: Disk size in GiB for worker nodes. Defaults to `20`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[bool] force_update_version: Force version update if existing pods are unable to be drained due to a pod disruption budget issue.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_types: Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. This provider will only perform drift detection if a configuration value is provided. Currently, the EKS API only accepts a single value in the set.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed.
:param pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs'] launch_template: Launch Template settings.
:param pulumi.Input[str] node_group_name: Name of the EKS Node Group. If omitted, this provider will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`.
:param pulumi.Input[str] node_group_name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`.
:param pulumi.Input['pulumi_aws.iam.Role'] node_role: The IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRole` and `nodeRoleArn` are mutually exclusive, and a single option must be used.
:param pulumi.Input[str] node_role_arn: Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRoleArn` and `nodeRole` are mutually exclusive, and a single option must be used.
:param pulumi.Input[str] release_version: AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version.
:param pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs'] remote_access: Remote access settings.
:param pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs'] scaling_config: Scaling settings.
Default scaling amounts of the node group autoscaling group are:
- desiredSize: 2
- minSize: 1
- maxSize: 2
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster).
Default subnetIds is chosen from the following list, in order, if subnetIds arg is not set:
- core.subnetIds
- core.privateIds
- core.publicSubnetIds
This default logic is based on the existing subnet IDs logic of this package: https://git.io/JeM11
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]] taints: The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group.
"""
pulumi.set(__self__, "cluster", cluster)
if ami_type is not None:
pulumi.set(__self__, "ami_type", ami_type)
if capacity_type is not None:
pulumi.set(__self__, "capacity_type", capacity_type)
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if disk_size is not None:
pulumi.set(__self__, "disk_size", disk_size)
if force_update_version is not None:
pulumi.set(__self__, "force_update_version", force_update_version)
if instance_types is not None:
pulumi.set(__self__, "instance_types", instance_types)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if launch_template is not None:
pulumi.set(__self__, "launch_template", launch_template)
if node_group_name is not None:
pulumi.set(__self__, "node_group_name", node_group_name)
if node_group_name_prefix is not None:
pulumi.set(__self__, "node_group_name_prefix", node_group_name_prefix)
if node_role is not None:
pulumi.set(__self__, "node_role", node_role)
if node_role_arn is not None:
pulumi.set(__self__, "node_role_arn", node_role_arn)
if release_version is not None:
pulumi.set(__self__, "release_version", release_version)
if remote_access is not None:
pulumi.set(__self__, "remote_access", remote_access)
if scaling_config is not None:
pulumi.set(__self__, "scaling_config", scaling_config)
if subnet_ids is not None:
pulumi.set(__self__, "subnet_ids", subnet_ids)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if taints is not None:
pulumi.set(__self__, "taints", taints)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def cluster(self) -> pulumi.Input['CoreDataArgs']:
"""
The target EKS cluster.
"""
return pulumi.get(self, "cluster")
@cluster.setter
def cluster(self, value: pulumi.Input['CoreDataArgs']):
pulumi.set(self, "cluster", value)
@property
@pulumi.getter(name="amiType")
def ami_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Defaults to `AL2_x86_64`. Valid values: `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`. This provider will only perform drift detection if a configuration value is provided.
"""
return pulumi.get(self, "ami_type")
@ami_type.setter
def ami_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ami_type", value)
@property
@pulumi.getter(name="capacityType")
def capacity_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. This provider will only perform drift detection if a configuration value is provided.
"""
return pulumi.get(self, "capacity_type")
@capacity_type.setter
def capacity_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "capacity_type", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the EKS Cluster.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="diskSize")
def disk_size(self) -> Optional[pulumi.Input[int]]:
"""
Disk size in GiB for worker nodes. Defaults to `20`. This provider will only perform drift detection if a configuration value is provided.
"""
return pulumi.get(self, "disk_size")
@disk_size.setter
def disk_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_size", value)
@property
@pulumi.getter(name="forceUpdateVersion")
def force_update_version(self) -> Optional[pulumi.Input[bool]]:
"""
Force version update if existing pods are unable to be drained due to a pod disruption budget issue.
"""
return pulumi.get(self, "force_update_version")
@force_update_version.setter
def force_update_version(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_update_version", value)
@property
@pulumi.getter(name="instanceTypes")
def instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. This provider will only perform drift detection if a configuration value is provided. Currently, the EKS API only accepts a single value in the set.
"""
return pulumi.get(self, "instance_types")
@instance_types.setter
def instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "instance_types", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="launchTemplate")
def launch_template(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]:
"""
Launch Template settings.
"""
return pulumi.get(self, "launch_template")
@launch_template.setter
def launch_template(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]):
pulumi.set(self, "launch_template", value)
@property
@pulumi.getter(name="nodeGroupName")
def node_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the EKS Node Group. If omitted, this provider will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`.
"""
return pulumi.get(self, "node_group_name")
@node_group_name.setter
def node_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_group_name", value)
@property
@pulumi.getter(name="nodeGroupNamePrefix")
def node_group_name_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`.
"""
return pulumi.get(self, "node_group_name_prefix")
@node_group_name_prefix.setter
def node_group_name_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_group_name_prefix", value)
@property
@pulumi.getter(name="nodeRole")
def node_role(self) -> Optional[pulumi.Input['pulumi_aws.iam.Role']]:
"""
The IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRole` and `nodeRoleArn` are mutually exclusive, and a single option must be used.
"""
return pulumi.get(self, "node_role")
@node_role.setter
def node_role(self, value: Optional[pulumi.Input['pulumi_aws.iam.Role']]):
pulumi.set(self, "node_role", value)
@property
@pulumi.getter(name="nodeRoleArn")
def node_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRoleArn` and `nodeRole` are mutually exclusive, and a single option must be used.
"""
return pulumi.get(self, "node_role_arn")
@node_role_arn.setter
def node_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_role_arn", value)
@property
@pulumi.getter(name="releaseVersion")
def release_version(self) -> Optional[pulumi.Input[str]]:
"""
AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version.
"""
return pulumi.get(self, "release_version")
@release_version.setter
def release_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "release_version", value)
@property
@pulumi.getter(name="remoteAccess")
def remote_access(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]:
"""
Remote access settings.
"""
return pulumi.get(self, "remote_access")
@remote_access.setter
def remote_access(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]):
pulumi.set(self, "remote_access", value)
@property
@pulumi.getter(name="scalingConfig")
def scaling_config(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']]:
"""
Scaling settings.
Default scaling amounts of the node group autoscaling group are:
- desiredSize: 2
- minSize: 1
- maxSize: 2
"""
return pulumi.get(self, "scaling_config")
@scaling_config.setter
def scaling_config(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']]):
pulumi.set(self, "scaling_config", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster).
Default subnetIds is chosen from the following list, in order, if subnetIds arg is not set:
- core.subnetIds
- core.privateIds
- core.publicSubnetIds
This default logic is based on the existing subnet IDs logic of this package: https://git.io/JeM11
"""
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value mapping of resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]]:
"""
The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group.
"""
return pulumi.get(self, "taints")
@taints.setter
def taints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]]):
pulumi.set(self, "taints", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
class ManagedNodeGroup(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ami_type: Optional[pulumi.Input[str]] = None,
capacity_type: Optional[pulumi.Input[str]] = None,
cluster: Optional[pulumi.Input[pulumi.InputType['CoreDataArgs']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
force_update_version: Optional[pulumi.Input[bool]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
launch_template: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]] = None,
node_group_name: Optional[pulumi.Input[str]] = None,
node_group_name_prefix: Optional[pulumi.Input[str]] = None,
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
node_role_arn: Optional[pulumi.Input[str]] = None,
release_version: Optional[pulumi.Input[str]] = None,
remote_access: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]] = None,
scaling_config: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
ManagedNodeGroup is a component that wraps creating an AWS managed node group.
See for more details:
https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] ami_type: Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Defaults to `AL2_x86_64`. Valid values: `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[str] capacity_type: Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[pulumi.InputType['CoreDataArgs']] cluster: The target EKS cluster.
:param pulumi.Input[str] cluster_name: Name of the EKS Cluster.
:param pulumi.Input[int] disk_size: Disk size in GiB for worker nodes. Defaults to `20`. This provider will only perform drift detection if a configuration value is provided.
:param pulumi.Input[bool] force_update_version: Force version update if existing pods are unable to be drained due to a pod disruption budget issue.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_types: Set of instance types associated with the EKS Node Group. Defaults to `["t3.medium"]`. This provider will only perform drift detection if a configuration value is provided. Currently, the EKS API only accepts a single value in the set.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Key-value map of Kubernetes labels. Only labels that are applied with the EKS API are managed by this argument. Other Kubernetes labels applied to the EKS Node Group will not be managed.
:param pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']] launch_template: Launch Template settings.
:param pulumi.Input[str] node_group_name: Name of the EKS Node Group. If omitted, this provider will assign a random, unique name. Conflicts with `nodeGroupNamePrefix`.
:param pulumi.Input[str] node_group_name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `nodeGroupName`.
:param pulumi.Input['pulumi_aws.iam.Role'] node_role: The IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRole` and `nodeRoleArn` are mutually exclusive, and a single option must be used.
:param pulumi.Input[str] node_role_arn: Amazon Resource Name (ARN) of the IAM Role that provides permissions for the EKS Node Group.
Note, `nodeRoleArn` and `nodeRole` are mutually exclusive, and a single option must be used.
:param pulumi.Input[str] release_version: AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version.
:param pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']] remote_access: Remote access settings.
:param pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']] scaling_config: Scaling settings.
Default scaling amounts of the node group autoscaling group are:
- desiredSize: 2
- minSize: 1
- maxSize: 2
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` (where `CLUSTER_NAME` is replaced with the name of the EKS Cluster).
Default subnetIds is chosen from the following list, in order, if subnetIds arg is not set:
- core.subnetIds
- core.privateIds
- core.publicSubnetIds
This default logic is based on the existing subnet IDs logic of this package: https://git.io/JeM11
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]] taints: The Kubernetes taints to be applied to the nodes in the node group. Maximum of 50 taints per node group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ManagedNodeGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
ManagedNodeGroup is a component that wraps creating an AWS managed node group.
See for more details:
https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html
:param str resource_name: The name of the resource.
:param ManagedNodeGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ManagedNodeGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ami_type: Optional[pulumi.Input[str]] = None,
capacity_type: Optional[pulumi.Input[str]] = None,
cluster: Optional[pulumi.Input[pulumi.InputType['CoreDataArgs']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
force_update_version: Optional[pulumi.Input[bool]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
launch_template: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]] = None,
node_group_name: Optional[pulumi.Input[str]] = None,
node_group_name_prefix: Optional[pulumi.Input[str]] = None,
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
node_role_arn: Optional[pulumi.Input[str]] = None,
release_version: Optional[pulumi.Input[str]] = None,
remote_access: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]] = None,
scaling_config: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ManagedNodeGroupArgs.__new__(ManagedNodeGroupArgs)
__props__.__dict__["ami_type"] = ami_type
__props__.__dict__["capacity_type"] = capacity_type
if cluster is None and not opts.urn:
raise TypeError("Missing required property 'cluster'")
__props__.__dict__["cluster"] = cluster
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["disk_size"] = disk_size
__props__.__dict__["force_update_version"] = force_update_version
__props__.__dict__["instance_types"] = instance_types
__props__.__dict__["labels"] = labels
__props__.__dict__["launch_template"] = launch_template
__props__.__dict__["node_group_name"] = node_group_name
__props__.__dict__["node_group_name_prefix"] = node_group_name_prefix
__props__.__dict__["node_role"] = node_role
__props__.__dict__["node_role_arn"] = node_role_arn
__props__.__dict__["release_version"] = release_version
__props__.__dict__["remote_access"] = remote_access
__props__.__dict__["scaling_config"] = scaling_config
__props__.__dict__["subnet_ids"] = subnet_ids
__props__.__dict__["tags"] = tags
__props__.__dict__["taints"] = taints
__props__.__dict__["version"] = version
__props__.__dict__["node_group"] = None
super(ManagedNodeGroup, __self__).__init__(
'eks:index:ManagedNodeGroup',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter(name="nodeGroup")
def node_group(self) -> pulumi.Output['pulumi_aws.eks.NodeGroup']:
"""
The AWS managed node group.
"""
return pulumi.get(self, "node_group")
| 55.147874
| 308
| 0.668108
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from ._inputs import *
from .vpc_cni import VpcCni
import pulumi_aws
import pulumi_kubernetes
__all__ = ['ManagedNodeGroupArgs', 'ManagedNodeGroup']
@pulumi.input_type
class ManagedNodeGroupArgs:
def __init__(__self__, *,
cluster: pulumi.Input['CoreDataArgs'],
ami_type: Optional[pulumi.Input[str]] = None,
capacity_type: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
force_update_version: Optional[pulumi.Input[bool]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
launch_template: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']] = None,
node_group_name: Optional[pulumi.Input[str]] = None,
node_group_name_prefix: Optional[pulumi.Input[str]] = None,
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
node_role_arn: Optional[pulumi.Input[str]] = None,
release_version: Optional[pulumi.Input[str]] = None,
remote_access: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']] = None,
scaling_config: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
taints: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]] = None,
version: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "cluster", cluster)
if ami_type is not None:
pulumi.set(__self__, "ami_type", ami_type)
if capacity_type is not None:
pulumi.set(__self__, "capacity_type", capacity_type)
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if disk_size is not None:
pulumi.set(__self__, "disk_size", disk_size)
if force_update_version is not None:
pulumi.set(__self__, "force_update_version", force_update_version)
if instance_types is not None:
pulumi.set(__self__, "instance_types", instance_types)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if launch_template is not None:
pulumi.set(__self__, "launch_template", launch_template)
if node_group_name is not None:
pulumi.set(__self__, "node_group_name", node_group_name)
if node_group_name_prefix is not None:
pulumi.set(__self__, "node_group_name_prefix", node_group_name_prefix)
if node_role is not None:
pulumi.set(__self__, "node_role", node_role)
if node_role_arn is not None:
pulumi.set(__self__, "node_role_arn", node_role_arn)
if release_version is not None:
pulumi.set(__self__, "release_version", release_version)
if remote_access is not None:
pulumi.set(__self__, "remote_access", remote_access)
if scaling_config is not None:
pulumi.set(__self__, "scaling_config", scaling_config)
if subnet_ids is not None:
pulumi.set(__self__, "subnet_ids", subnet_ids)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if taints is not None:
pulumi.set(__self__, "taints", taints)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def cluster(self) -> pulumi.Input['CoreDataArgs']:
return pulumi.get(self, "cluster")
@cluster.setter
def cluster(self, value: pulumi.Input['CoreDataArgs']):
pulumi.set(self, "cluster", value)
@property
@pulumi.getter(name="amiType")
def ami_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ami_type")
@ami_type.setter
def ami_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ami_type", value)
@property
@pulumi.getter(name="capacityType")
def capacity_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "capacity_type")
@capacity_type.setter
def capacity_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "capacity_type", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="diskSize")
def disk_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "disk_size")
@disk_size.setter
def disk_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_size", value)
@property
@pulumi.getter(name="forceUpdateVersion")
def force_update_version(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "force_update_version")
@force_update_version.setter
def force_update_version(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_update_version", value)
@property
@pulumi.getter(name="instanceTypes")
def instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "instance_types")
@instance_types.setter
def instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "instance_types", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="launchTemplate")
def launch_template(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]:
return pulumi.get(self, "launch_template")
@launch_template.setter
def launch_template(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]):
pulumi.set(self, "launch_template", value)
@property
@pulumi.getter(name="nodeGroupName")
def node_group_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "node_group_name")
@node_group_name.setter
def node_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_group_name", value)
@property
@pulumi.getter(name="nodeGroupNamePrefix")
def node_group_name_prefix(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "node_group_name_prefix")
@node_group_name_prefix.setter
def node_group_name_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_group_name_prefix", value)
@property
@pulumi.getter(name="nodeRole")
def node_role(self) -> Optional[pulumi.Input['pulumi_aws.iam.Role']]:
return pulumi.get(self, "node_role")
@node_role.setter
def node_role(self, value: Optional[pulumi.Input['pulumi_aws.iam.Role']]):
pulumi.set(self, "node_role", value)
@property
@pulumi.getter(name="nodeRoleArn")
def node_role_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "node_role_arn")
@node_role_arn.setter
def node_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_role_arn", value)
@property
@pulumi.getter(name="releaseVersion")
def release_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "release_version")
@release_version.setter
def release_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "release_version", value)
@property
@pulumi.getter(name="remoteAccess")
def remote_access(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]:
return pulumi.get(self, "remote_access")
@remote_access.setter
def remote_access(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]):
pulumi.set(self, "remote_access", value)
@property
@pulumi.getter(name="scalingConfig")
def scaling_config(self) -> Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']]:
return pulumi.get(self, "scaling_config")
@scaling_config.setter
def scaling_config(self, value: Optional[pulumi.Input['pulumi_aws.eks.NodeGroupScalingConfigArgs']]):
pulumi.set(self, "scaling_config", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]]:
return pulumi.get(self, "taints")
@taints.setter
def taints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.NodeGroupTaintArgs']]]]):
pulumi.set(self, "taints", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
class ManagedNodeGroup(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ami_type: Optional[pulumi.Input[str]] = None,
capacity_type: Optional[pulumi.Input[str]] = None,
cluster: Optional[pulumi.Input[pulumi.InputType['CoreDataArgs']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
force_update_version: Optional[pulumi.Input[bool]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
launch_template: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]] = None,
node_group_name: Optional[pulumi.Input[str]] = None,
node_group_name_prefix: Optional[pulumi.Input[str]] = None,
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
node_role_arn: Optional[pulumi.Input[str]] = None,
release_version: Optional[pulumi.Input[str]] = None,
remote_access: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]] = None,
scaling_config: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: ManagedNodeGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ManagedNodeGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ami_type: Optional[pulumi.Input[str]] = None,
capacity_type: Optional[pulumi.Input[str]] = None,
cluster: Optional[pulumi.Input[pulumi.InputType['CoreDataArgs']]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
disk_size: Optional[pulumi.Input[int]] = None,
force_update_version: Optional[pulumi.Input[bool]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
launch_template: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupLaunchTemplateArgs']]] = None,
node_group_name: Optional[pulumi.Input[str]] = None,
node_group_name_prefix: Optional[pulumi.Input[str]] = None,
node_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
node_role_arn: Optional[pulumi.Input[str]] = None,
release_version: Optional[pulumi.Input[str]] = None,
remote_access: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupRemoteAccessArgs']]] = None,
scaling_config: Optional[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupScalingConfigArgs']]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_aws.eks.NodeGroupTaintArgs']]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ManagedNodeGroupArgs.__new__(ManagedNodeGroupArgs)
__props__.__dict__["ami_type"] = ami_type
__props__.__dict__["capacity_type"] = capacity_type
if cluster is None and not opts.urn:
raise TypeError("Missing required property 'cluster'")
__props__.__dict__["cluster"] = cluster
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["disk_size"] = disk_size
__props__.__dict__["force_update_version"] = force_update_version
__props__.__dict__["instance_types"] = instance_types
__props__.__dict__["labels"] = labels
__props__.__dict__["launch_template"] = launch_template
__props__.__dict__["node_group_name"] = node_group_name
__props__.__dict__["node_group_name_prefix"] = node_group_name_prefix
__props__.__dict__["node_role"] = node_role
__props__.__dict__["node_role_arn"] = node_role_arn
__props__.__dict__["release_version"] = release_version
__props__.__dict__["remote_access"] = remote_access
__props__.__dict__["scaling_config"] = scaling_config
__props__.__dict__["subnet_ids"] = subnet_ids
__props__.__dict__["tags"] = tags
__props__.__dict__["taints"] = taints
__props__.__dict__["version"] = version
__props__.__dict__["node_group"] = None
super(ManagedNodeGroup, __self__).__init__(
'eks:index:ManagedNodeGroup',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter(name="nodeGroup")
def node_group(self) -> pulumi.Output['pulumi_aws.eks.NodeGroup']:
return pulumi.get(self, "node_group")
| true
| true
|
f709c7766e0bf952395f474d0c6055d6f4fbf99a
| 37,773
|
py
|
Python
|
venv/Lib/site-packages/reportlab/graphics/renderSVG.py
|
lsmiley/esstools-adminlte-3
|
ec3a7334be0ab05e9d336e397d6edb881131b932
|
[
"MIT"
] | 55
|
2019-09-21T02:45:18.000Z
|
2021-12-10T13:38:51.000Z
|
venv/Lib/site-packages/reportlab/graphics/renderSVG.py
|
rashikbuksh/Optimal-Transportation-System
|
18c6d5341c6d3ecbb1c8fcfba8e46ca2ba493882
|
[
"MIT"
] | 4
|
2019-09-26T03:16:50.000Z
|
2021-12-10T13:40:49.000Z
|
venv/Lib/site-packages/reportlab/graphics/renderSVG.py
|
rashikbuksh/Optimal-Transportation-System
|
18c6d5341c6d3ecbb1c8fcfba8e46ca2ba493882
|
[
"MIT"
] | 26
|
2019-09-25T03:54:30.000Z
|
2022-03-21T14:03:12.000Z
|
__doc__="""An experimental SVG renderer for the ReportLab graphics framework.
This will create SVG code from the ReportLab Graphics API (RLG).
To read existing SVG code and convert it into ReportLab graphics
objects download the svglib module here:
http://python.net/~gherman/#svglib
"""
import math, types, sys, os, codecs, base64
from operator import getitem
from reportlab.pdfbase.pdfmetrics import stringWidth # for font info
from reportlab.lib.rl_accel import fp_str
from reportlab.lib.colors import black
from reportlab.lib.utils import asNative, getBytesIO
from reportlab.graphics.renderbase import StateTracker, getStateDelta, Renderer, renderScaledDrawing
from reportlab.graphics.shapes import STATE_DEFAULTS, Path, UserNode
from reportlab.graphics.shapes import * # (only for test0)
from reportlab import rl_config
from reportlab.lib.utils import getStringIO, RLString, isUnicode, isBytes
from reportlab.pdfgen.canvas import FILL_EVEN_ODD, FILL_NON_ZERO
from .renderPM import _getImage
from xml.dom import getDOMImplementation
### some constants ###
sin = math.sin
cos = math.cos
pi = math.pi
AREA_STYLES = 'stroke-width stroke-linecap stroke stroke-opacity fill fill-opacity stroke-dasharray stroke-dashoffset fill-rule id'.split()
LINE_STYLES = 'stroke-width stroke-linecap stroke stroke-opacity stroke-dasharray stroke-dashoffset id'.split()
TEXT_STYLES = 'font-family font-weight font-style font-variant font-size id'.split()
EXTRA_STROKE_STYLES = 'stroke-width stroke-linecap stroke stroke-opacity stroke-dasharray stroke-dashoffset'.split()
EXTRA_FILL_STYLES = 'fill fill-opacity'.split()
### top-level user function ###
def drawToString(d, showBoundary=rl_config.showBoundary,**kwds):
"Returns a SVG as a string in memory, without touching the disk"
s = getStringIO()
drawToFile(d, s, showBoundary=showBoundary,**kwds)
return s.getvalue()
def drawToFile(d, fn, showBoundary=rl_config.showBoundary,**kwds):
d = renderScaledDrawing(d)
c = SVGCanvas((d.width, d.height),**kwds)
draw(d, c, 0, 0, showBoundary=showBoundary)
c.save(fn)
def draw(drawing, canvas, x=0, y=0, showBoundary=rl_config.showBoundary):
"""As it says."""
r = _SVGRenderer()
r.draw(renderScaledDrawing(drawing), canvas, x, y, showBoundary=showBoundary)
### helper functions ###
def _pointsFromList(L):
"""
given a list of coordinates [x0, y0, x1, y1....]
produce a list of points [(x0,y0), (y1,y0),....]
"""
P=[]
for i in range(0,len(L), 2):
P.append((L[i], L[i+1]))
return P
def transformNode(doc, newTag, node=None, **attrDict):
"""Transform a DOM node into new node and copy selected attributes.
Creates a new DOM node with tag name 'newTag' for document 'doc'
and copies selected attributes from an existing 'node' as provided
in 'attrDict'. The source 'node' can be None. Attribute values will
be converted to strings.
E.g.
n = transformNode(doc, "node1", x="0", y="1")
-> DOM node for <node1 x="0" y="1"/>
n = transformNode(doc, "node1", x=0, y=1+1)
-> DOM node for <node1 x="0" y="2"/>
n = transformNode(doc, "node1", node0, x="x0", y="x0", zoo=bar())
-> DOM node for <node1 x="[node0.x0]" y="[node0.y0]" zoo="[bar()]"/>
"""
newNode = doc.createElement(newTag)
for newAttr, attr in attrDict.items():
sattr = str(attr)
if not node:
newNode.setAttribute(newAttr, sattr)
else:
attrVal = node.getAttribute(sattr)
newNode.setAttribute(newAttr, attrVal or sattr)
return newNode
class EncodedWriter(list):
'''
EncodedWriter(encoding) assumes .write will be called with
either unicode or utf8 encoded bytes. it will accumulate
unicode
'''
BOMS = {
'utf-32':codecs.BOM_UTF32,
'utf-32-be':codecs.BOM_UTF32_BE,
'utf-32-le':codecs.BOM_UTF32_LE,
'utf-16':codecs.BOM_UTF16,
'utf-16-be':codecs.BOM_UTF16_BE,
'utf-16-le':codecs.BOM_UTF16_LE,
}
def __init__(self,encoding,bom=False):
list.__init__(self)
self.encoding = encoding = codecs.lookup(encoding).name
if bom and '16' in encoding or '32' in encoding:
self.write(self.BOMS[encoding])
def write(self,u):
if isBytes(u):
try:
u = u.decode('utf-8')
except:
et, ev, tb = sys.exc_info()
ev = str(ev)
del et, tb
raise ValueError("String %r not encoded as 'utf-8'\nerror=%s" % (u,ev))
elif not isUnicode(u):
raise ValueError("EncodedWriter.write(%s) argument should be 'utf-8' bytes or str" % ascii(u))
self.append(u)
def getvalue(self):
r = ''.join(self)
del self[:]
return r
_fillRuleMap = {
FILL_NON_ZERO: 'nonzero',
'non-zero': 'nonzero',
'nonzero': 'nonzero',
FILL_EVEN_ODD: 'evenodd',
'even-odd': 'evenodd',
'evenodd': 'evenodd',
}
def py_fp_str(*args):
return ' '.join((('%f' % a).rstrip('0').rstrip('.') for a in args))
### classes ###
class SVGCanvas:
def __init__(self, size=(300,300), encoding='utf-8', verbose=0, bom=False, **kwds):
'''
verbose = 0 >0 means do verbose stuff
useClip = False True means don't use a clipPath definition put the global clip into the clip property
to get around an issue with safari
extraXmlDecl = '' use to add extra xml declarations
scaleGroupId = '' id of an extra group to add around the drawing to allow easy scaling
svgAttrs = {} dictionary of attributes to be applied to the svg tag itself
'''
self.verbose = verbose
self.encoding = codecs.lookup(encoding).name
self.bom = bom
useClip = kwds.pop('useClip',False)
self.fontHacks = kwds.pop('fontHacks',{})
self.extraXmlDecl = kwds.pop('extraXmlDecl','')
scaleGroupId = kwds.pop('scaleGroupId','')
self._fillMode = FILL_EVEN_ODD
self.width, self.height = self.size = size
# self.height = size[1]
self.code = []
self.style = {}
self.path = ''
self._strokeColor = self._fillColor = self._lineWidth = \
self._font = self._fontSize = self._lineCap = \
self._lineJoin = None
if kwds.pop('use_fp_str',False):
self.fp_str = fp_str
else:
self.fp_str = py_fp_str
self.cfp_str = lambda *args: self.fp_str(*args).replace(' ',',')
implementation = getDOMImplementation('minidom')
#Based on official example here http://www.w3.org/TR/SVG10/linking.html want:
#<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
# "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
#Thus,
#doctype = implementation.createDocumentType("svg",
# "-//W3C//DTD SVG 20010904//EN",
# "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd")
#
#However, putting that example through http://validator.w3.org/ recommends:
#<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN"
# "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
#So we'll use that for our SVG 1.0 output.
doctype = implementation.createDocumentType("svg",
"-//W3C//DTD SVG 1.0//EN",
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd")
self.doc = implementation.createDocument(None,"svg",doctype)
self.svg = self.doc.documentElement
svgAttrs = dict(
width = str(size[0]),
height=str(self.height),
preserveAspectRatio="xMinYMin meet",
viewBox="0 0 %d %d" % (self.width, self.height),
#baseProfile = "full", #disliked in V 1.0
#these suggested by Tim Roberts, as updated by peter@maubp.freeserve.co.uk
xmlns="http://www.w3.org/2000/svg",
version="1.0",
)
svgAttrs['fill-rule'] = _fillRuleMap[self._fillMode]
svgAttrs["xmlns:xlink"] = "http://www.w3.org/1999/xlink"
svgAttrs.update(kwds.pop('svgAttrs',{}))
for k,v in svgAttrs.items():
self.svg.setAttribute(k,v)
title = self.doc.createElement('title')
text = self.doc.createTextNode('...')
title.appendChild(text)
self.svg.appendChild(title)
desc = self.doc.createElement('desc')
text = self.doc.createTextNode('...')
desc.appendChild(text)
self.svg.appendChild(desc)
self.setFont(STATE_DEFAULTS['fontName'], STATE_DEFAULTS['fontSize'])
self.setStrokeColor(STATE_DEFAULTS['strokeColor'])
self.setLineCap(2)
self.setLineJoin(0)
self.setLineWidth(1)
if not useClip:
# Add a rectangular clipping path identical to view area.
clipPath = transformNode(self.doc, "clipPath", id="clip")
clipRect = transformNode(self.doc, "rect", x=0, y=0,
width=self.width, height=self.height)
clipPath.appendChild(clipRect)
self.svg.appendChild(clipPath)
gtkw = dict(style="clip-path: url(#clip)")
else:
gtkw = dict(clip="0 0 %d %d" % (self.width,self.height))
self.groupTree = transformNode(self.doc, "g",
id="group",
transform="scale(1,-1) translate(0,-%d)" % self.height,
**gtkw
)
if scaleGroupId:
self.scaleTree = transformNode(self.doc, "g", id=scaleGroupId, transform="scale(1,1)")
self.scaleTree.appendChild(self.groupTree)
self.svg.appendChild(self.scaleTree)
else:
self.svg.appendChild(self.groupTree)
self.currGroup = self.groupTree
def save(self, fn=None):
writer = EncodedWriter(self.encoding,bom=self.bom)
self.doc.writexml(writer,addindent="\t",newl="\n",encoding=self.encoding)
if hasattr(fn,'write'):
f = fn
else:
f = open(fn, 'w',encoding=self.encoding)
svg = writer.getvalue()
exd = self.extraXmlDecl
if exd:
svg = svg.replace('?>','?>'+exd)
f.write(svg)
if f is not fn:
f.close()
### helpers ###
def NOTUSED_stringWidth(self, s, font=None, fontSize=None):
"""Return the logical width of the string if it were drawn
in the current font (defaults to self.font).
"""
font = font or self._font
fontSize = fontSize or self._fontSize
return stringWidth(s, font, fontSize)
def _formatStyle(self, include=[], exclude='',**kwds):
style = self.style.copy()
style.update(kwds)
keys = list(style.keys())
if include:
keys = [k for k in keys if k in include]
if exclude:
exclude = exclude.split()
items = [k+': '+str(style[k]) for k in keys if k not in exclude]
else:
items = [k+': '+str(style[k]) for k in keys]
return '; '.join(items) + ';'
def _escape(self, s):
'''I don't think this was ever needed; seems to have been copied from renderPS'''
return s
def _genArcCode(self, x1, y1, x2, y2, startAng, extent):
"""Calculate the path for an arc inscribed in rectangle defined
by (x1,y1),(x2,y2)."""
return
#calculate semi-minor and semi-major axes of ellipse
xScale = abs((x2-x1)/2.0)
yScale = abs((y2-y1)/2.0)
#calculate centre of ellipse
x, y = (x1+x2)/2.0, (y1+y2)/2.0
codeline = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s %s setmatrix'
if extent >= 0:
arc='arc'
else:
arc='arcn'
data = (x,y, xScale, yScale, startAng, startAng+extent, arc)
return codeline % data
def _fillAndStroke(self, code, clip=0, link_info=None,styles=AREA_STYLES,fillMode=None):
xtra = {}
if fillMode:
xtra['fill-rule'] = _fillRuleMap[fillMode]
path = transformNode(self.doc, "path",
d=self.path, style=self._formatStyle(styles),
)
if link_info :
path = self._add_link(path, link_info)
self.currGroup.appendChild(path)
self.path = ''
### styles ###
def setLineCap(self, v):
vals = {0:'butt', 1:'round', 2:'square'}
if self._lineCap != v:
self._lineCap = v
self.style['stroke-linecap'] = vals[v]
def setLineJoin(self, v):
vals = {0:'miter', 1:'round', 2:'bevel'}
if self._lineJoin != v:
self._lineJoin = v
self.style['stroke-linecap'] = vals[v]
def setDash(self, array=[], phase=0):
"""Two notations. Pass two numbers, or an array and phase."""
if isinstance(array,(float,int)):
self.style['stroke-dasharray'] = ', '.join(map(str, ([array, phase])))
elif isinstance(array,(tuple,list)) and len(array) > 0:
assert phase >= 0, "phase is a length in user space"
self.style['stroke-dasharray'] = ', '.join(map(str, array))
if phase>0:
self.style['stroke-dashoffset'] = str(phase)
def setStrokeColor(self, color):
self._strokeColor = color
if color == None:
self.style['stroke'] = 'none'
else:
r, g, b = color.red, color.green, color.blue
self.style['stroke'] = 'rgb(%d%%,%d%%,%d%%)' % (r*100, g*100, b*100)
alpha = color.normalizedAlpha
if alpha!=1:
self.style['stroke-opacity'] = '%s' % alpha
elif 'stroke-opacity' in self.style:
del self.style['stroke-opacity']
def setFillColor(self, color):
self._fillColor = color
if color == None:
self.style['fill'] = 'none'
else:
r, g, b = color.red, color.green, color.blue
self.style['fill'] = 'rgb(%d%%,%d%%,%d%%)' % (r*100, g*100, b*100)
alpha = color.normalizedAlpha
if alpha!=1:
self.style['fill-opacity'] = '%s' % alpha
elif 'fill-opacity' in self.style:
del self.style['fill-opacity']
def setFillMode(self, v):
self._fillMode = v
self.style['fill-rule'] = _fillRuleMap[v]
def setLineWidth(self, width):
if width != self._lineWidth:
self._lineWidth = width
self.style['stroke-width'] = width
def setFont(self, font, fontSize):
if self._font != font or self._fontSize != fontSize:
self._font = font
self._fontSize = fontSize
style = self.style
for k in TEXT_STYLES:
if k in style:
del style[k]
svgAttrs = self.fontHacks[font] if font in self.fontHacks else {}
if isinstance(font,RLString):
svgAttrs.update(iter(font.svgAttrs.items()))
if svgAttrs:
for k,v in svgAttrs.items():
a = 'font-'+k
if a in TEXT_STYLES:
style[a] = v
if 'font-family' not in style:
style['font-family'] = font
style['font-size'] = '%spx' % fontSize
def _add_link(self, dom_object, link_info) :
assert isinstance(link_info, dict)
link = transformNode(self.doc, "a", **link_info)
link.appendChild(dom_object)
return link
### shapes ###
def rect(self, x1,y1, x2,y2, rx=8, ry=8, link_info=None, **_svgAttrs):
"Draw a rectangle between x1,y1 and x2,y2."
if self.verbose: print("+++ SVGCanvas.rect")
x = min(x1,x2)
y = min(y1,y2)
kwds = {}
rect = transformNode(self.doc, "rect",
x=x, y=y, width=max(x1,x2)-x, height=max(y1,y2)-y,
style=self._formatStyle(AREA_STYLES),**_svgAttrs)
if link_info :
rect = self._add_link(rect, link_info)
self.currGroup.appendChild(rect)
def roundRect(self, x1,y1, x2,y2, rx=8, ry=8, link_info=None, **_svgAttrs):
"""Draw a rounded rectangle between x1,y1 and x2,y2.
Corners inset as ellipses with x-radius rx and y-radius ry.
These should have x1<x2, y1<y2, rx>0, and ry>0.
"""
rect = transformNode(self.doc, "rect",
x=x1, y=y1, width=x2-x1, height=y2-y1, rx=rx, ry=ry,
style=self._formatStyle(AREA_STYLES), **_svgAttrs)
if link_info:
rect = self._add_link(rect, link_info)
self.currGroup.appendChild(rect)
def drawString(self, s, x, y, angle=0, link_info=None, text_anchor='left', textRenderMode=0, **_svgAttrs):
if textRenderMode==3: return #invisible
s = asNative(s)
if self.verbose: print("+++ SVGCanvas.drawString")
needFill = textRenderMode==0 or textRenderMode==2 or textRenderMode==4 or textRenderMode==6
needStroke = textRenderMode==1 or textRenderMode==2 or textRenderMode==5 or textRenderMode==6
if (self._fillColor!=None and needFill) or (self._strokeColor!=None and needStroke):
if not text_anchor in ['start', 'inherited', 'left']:
textLen = stringWidth(s,self._font,self._fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2.
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,s,textLen,self._font,self._fontSize)
else:
raise ValueError('bad value for text_anchor ' + str(text_anchor))
s = self._escape(s)
st = self._formatStyle(TEXT_STYLES)
if angle != 0:
st = st + " rotate(%s);" % self.fp_str(angle, x, y)
if needFill:
st += self._formatStyle(EXTRA_FILL_STYLES)
else:
st += " fill:none;"
if needStroke:
st += self._formatStyle(EXTRA_STROKE_STYLES)
else:
st += " stroke:none;"
#if textRenderMode>=4:
# _gstate_clipPathSetOrAddself, -1, 1, 0 /*we are adding*/
text = transformNode(self.doc, "text",
x=x, y=y, style=st,
transform="translate(0,%d) scale(1,-1)" % (2*y),
**_svgAttrs
)
content = self.doc.createTextNode(s)
text.appendChild(content)
if link_info:
text = self._add_link(text, link_info)
self.currGroup.appendChild(text)
def drawCentredString(self, s, x, y, angle=0, text_anchor='middle',
link_info=None, textRenderMode=0, **_svgAttrs):
if self.verbose: print("+++ SVGCanvas.drawCentredString")
self.drawString(s,x,y,angle=angle, link_info=link_info, text_anchor=text_anchor,
textRenderMode=textRenderMode, **_svgAttrs)
def drawRightString(self, text, x, y, angle=0,text_anchor='end',
link_info=None, textRenderMode=0, **_svgAttrs):
if self.verbose: print("+++ SVGCanvas.drawRightString")
self.drawString(text,x,y,angle=angle, link_info=link_info, text_anchor=text_anchor,
textRenderMode=textRenderMode, **_svgAttrs)
def comment(self, data):
"Add a comment."
comment = self.doc.createComment(data)
# self.currGroup.appendChild(comment)
def drawImage(self, image, x, y, width, height, embed=True):
buf = getBytesIO()
image.save(buf,'png')
buf = asNative(base64.b64encode(buf.getvalue()))
self.currGroup.appendChild(
transformNode(self.doc,'image',
x=x,y=y,width=width,height=height,
href="data:image/png;base64,"+buf,
transform="matrix(%s)" % self.cfp_str(1,0,0,-1,0,height+2*y),
)
)
def line(self, x1, y1, x2, y2):
if self._strokeColor != None:
if 0: # something is wrong with line in my SVG viewer...
line = transformNode(self.doc, "line",
x=x1, y=y1, x2=x2, y2=y2,
style=self._formatStyle(LINE_STYLES))
self.currGroup.appendChild(line)
path = transformNode(self.doc, "path",
d="M %s L %s Z" % (self.cfp_str(x1,y1),self.cfp_str(x2,y2)),
style=self._formatStyle(LINE_STYLES))
self.currGroup.appendChild(path)
def ellipse(self, x1, y1, x2, y2, link_info=None):
"""Draw an orthogonal ellipse inscribed within the rectangle x1,y1,x2,y2.
These should have x1<x2 and y1<y2.
"""
ellipse = transformNode(self.doc, "ellipse",
cx=(x1+x2)/2.0, cy=(y1+y2)/2.0, rx=(x2-x1)/2.0, ry=(y2-y1)/2.0,
style=self._formatStyle(AREA_STYLES))
if link_info:
ellipse = self._add_link(ellipse, link_info)
self.currGroup.appendChild(ellipse)
def circle(self, xc, yc, r, link_info=None):
circle = transformNode(self.doc, "circle",
cx=xc, cy=yc, r=r,
style=self._formatStyle(AREA_STYLES))
if link_info:
circle = self._add_link(circle, link_info)
self.currGroup.appendChild(circle)
def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4, closed=0):
pass
return
codeline = '%s m %s curveto'
data = (fp_str(x1, y1), fp_str(x2, y2, x3, y3, x4, y4))
if self._fillColor != None:
self.code.append((codeline % data) + ' eofill')
if self._strokeColor != None:
self.code.append((codeline % data)
+ ((closed and ' closepath') or '')
+ ' stroke')
def drawArc(self, x1,y1, x2,y2, startAng=0, extent=360, fromcenter=0):
"""Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2.
Starting at startAng degrees and covering extent degrees. Angles
start with 0 to the right (+x) and increase counter-clockwise.
These should have x1<x2 and y1<y2.
"""
cx, cy = (x1+x2)/2.0, (y1+y2)/2.0
rx, ry = (x2-x1)/2.0, (y2-y1)/2.0
mx = rx * cos(startAng*pi/180) + cx
my = ry * sin(startAng*pi/180) + cy
ax = rx * cos((startAng+extent)*pi/180) + cx
ay = ry * sin((startAng+extent)*pi/180) + cy
cfp_str = self.cfp_str
s = [].append
if fromcenter:
s("M %s L %s" % (cfp_str(cx, cy), cfp_str(ax, ay)))
if fromcenter:
s("A %s %d %d %d %s" % \
(cfp_str(rx, ry), 0, extent>=180, 0, cfp_str(mx, my)))
else:
s("M %s A %s %d %d %d %s Z" % \
(cfp_str(mx, my), cfp_str(rx, ry), 0, extent>=180, 0, cfp_str(mx, my)))
if fromcenter:
s("L %s Z" % cfp_str(cx, cy))
path = transformNode(self.doc, "path",
d=' '.join(s.__self__), style=self._formatStyle())
self.currGroup.appendChild(path)
def polygon(self, points, closed=0, link_info=None):
assert len(points) >= 2, 'Polygon must have 2 or more points'
if self._strokeColor!=None or self._fillColor!=None:
pts = ', '.join([fp_str(*p) for p in points])
polyline = transformNode(self.doc, "polygon",
points=pts, style=self._formatStyle(AREA_STYLES))
if link_info:
polyline = self._add_link(polyline, link_info)
self.currGroup.appendChild(polyline)
# self._fillAndStroke(polyCode)
def lines(self, lineList, color=None, width=None):
# print "### lineList", lineList
return
if self._strokeColor != None:
codeline = '%s m %s l stroke'
for line in lineList:
self.code.append(codeline % (fp_str(line[0]), fp_str(line[1])))
def polyLine(self, points):
assert len(points) >= 1, 'Polyline must have 1 or more points'
if self._strokeColor != None:
pts = ', '.join([fp_str(*p) for p in points])
polyline = transformNode(self.doc, "polyline",
points=pts, style=self._formatStyle(AREA_STYLES,fill=None))
self.currGroup.appendChild(polyline)
### groups ###
def startGroup(self,attrDict=dict(transform="")):
if self.verbose: print("+++ begin SVGCanvas.startGroup")
currGroup = self.currGroup
group = transformNode(self.doc, "g", **attrDict)
currGroup.appendChild(group)
self.currGroup = group
if self.verbose: print("+++ end SVGCanvas.startGroup")
return currGroup
def endGroup(self,currGroup):
if self.verbose: print("+++ begin SVGCanvas.endGroup")
self.currGroup = currGroup
if self.verbose: print("+++ end SVGCanvas.endGroup")
def transform(self, a, b, c, d, e, f):
if self.verbose: print("!!! begin SVGCanvas.transform", a, b, c, d, e, f)
tr = self.currGroup.getAttribute("transform")
if (a, b, c, d, e, f) != (1, 0, 0, 1, 0, 0):
t = 'matrix(%s)' % self.cfp_str(a,b,c,d,e,f)
self.currGroup.setAttribute("transform", "%s %s" % (tr, t))
def translate(self, x, y):
if (x,y) != (0,0):
self.currGroup.setAttribute("transform", "%s %s"
% (self.currGroup.getAttribute("transform"),
'translate(%s)' % self.cfp_str(x,y)))
def scale(self, sx, sy):
if (sx,sy) != (1,1):
self.currGroup.setAttribute("transform", "%s %s"
% (self.groups[-1].getAttribute("transform"),
'scale(%s)' % self.cfp_str(sx, sy)))
### paths ###
def moveTo(self, x, y):
self.path = self.path + 'M %s ' % self.fp_str(x, y)
def lineTo(self, x, y):
self.path = self.path + 'L %s ' % self.fp_str(x, y)
def curveTo(self, x1, y1, x2, y2, x3, y3):
self.path = self.path + 'C %s ' % self.fp_str(x1, y1, x2, y2, x3, y3)
def closePath(self):
self.path = self.path + 'Z '
def saveState(self):
pass
def restoreState(self):
pass
class _SVGRenderer(Renderer):
"""This draws onto an SVG document.
"""
def __init__(self):
self.verbose = 0
def drawNode(self, node):
"""This is the recursive method called for each node in the tree.
"""
if self.verbose: print("### begin _SVGRenderer.drawNode(%r)" % node)
self._canvas.comment('begin node %r'%node)
style = self._canvas.style.copy()
if not (isinstance(node, Path) and node.isClipPath):
pass # self._canvas.saveState()
#apply state changes
deltas = getStateDelta(node)
self._tracker.push(deltas)
self.applyStateChanges(deltas, {})
#draw the object, or recurse
self.drawNodeDispatcher(node)
rDeltas = self._tracker.pop()
if not (isinstance(node, Path) and node.isClipPath):
pass #self._canvas.restoreState()
self._canvas.comment('end node %r'%node)
#restore things we might have lost (without actually doing anything).
for k, v in rDeltas.items():
if k in self._restores:
setattr(self._canvas,self._restores[k],v)
self._canvas.style = style
if self.verbose: print("### end _SVGRenderer.drawNode(%r)" % node)
_restores = {'strokeColor':'_strokeColor','strokeWidth': '_lineWidth','strokeLineCap':'_lineCap',
'strokeLineJoin':'_lineJoin','fillColor':'_fillColor','fontName':'_font',
'fontSize':'_fontSize'}
def _get_link_info_dict(self, obj):
#We do not want None or False as the link, even if it is the
#attribute's value - use the empty string instead.
url = getattr(obj, "hrefURL", "") or ""
title = getattr(obj, "hrefTitle", "") or ""
if url :
#Is it valid to have a link with no href? The XML requires
#the xlink:href to be present, but you might just want a
#tool tip shown (via the xlink:title attribute). Note that
#giving an href of "" is equivalent to "the current page"
#(a relative link saying go nowhere).
return {"xlink:href":url, "xlink:title":title, "target":"_top"}
#Currently of all the mainstream browsers I have tested, only Safari/webkit
#will show SVG images embedded in HTML using a simple <img src="..." /> tag.
#However, the links don't work (Safari 3.2.1 on the Mac).
#
#Therefore I use the following, which also works for Firefox, Opera, and
#IE 6.0 with Adobe SVG Viewer 6 beta:
#<object data="..." type="image/svg+xml" width="430" height="150" class="img">
#
#Once displayed, Firefox and Safari treat the SVG like a frame, and
#by default clicking on links acts "in frame" and replaces the image.
#Opera does what I expect, and replaces the whole page with the link.
#
#Therefore I use target="_top" to force the links to replace the whole page.
#This now works as expected on Safari 3.2.1, Firefox 3.0.6, Opera 9.20.
#Perhaps the target attribute should be an option, perhaps defaulting to
#"_top" as used here?
else :
return None
def drawGroup(self, group):
if self.verbose: print("### begin _SVGRenderer.drawGroup")
currGroup = self._canvas.startGroup()
a, b, c, d, e, f = self._tracker.getState()['transform']
for childNode in group.getContents():
if isinstance(childNode, UserNode):
node2 = childNode.provideNode()
else:
node2 = childNode
self.drawNode(node2)
self._canvas.transform(a, b, c, d, e, f)
self._canvas.endGroup(currGroup)
if self.verbose: print("### end _SVGRenderer.drawGroup")
def drawRect(self, rect):
link_info = self._get_link_info_dict(rect)
svgAttrs = getattr(rect,'_svgAttrs',{})
if rect.rx == rect.ry == 0:
#plain old rectangle
self._canvas.rect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height, link_info=link_info, **svgAttrs)
else:
#cheat and assume ry = rx; better to generalize
#pdfgen roundRect function. TODO
self._canvas.roundRect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height,
rect.rx, rect.ry,
link_info=link_info, **svgAttrs)
def drawString(self, stringObj):
S = self._tracker.getState()
text_anchor, x, y, text = S['textAnchor'], stringObj.x, stringObj.y, stringObj.text
self._canvas.drawString(text,x,y,link_info=self._get_link_info_dict(stringObj),
text_anchor=text_anchor, textRenderMode=getattr(stringObj,'textRenderMode',0),
**getattr(stringObj,'_svgAttrs',{}))
def drawLine(self, line):
if self._canvas._strokeColor:
self._canvas.line(line.x1, line.y1, line.x2, line.y2)
def drawCircle(self, circle):
self._canvas.circle( circle.cx, circle.cy, circle.r, link_info=self._get_link_info_dict(circle))
def drawWedge(self, wedge):
yradius, radius1, yradius1 = wedge._xtraRadii()
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None) and not wedge.annular:
centerx, centery, radius, startangledegrees, endangledegrees = \
wedge.centerx, wedge.centery, wedge.radius, wedge.startangledegrees, wedge.endangledegrees
yradius = wedge.yradius or wedge.radius
(x1, y1) = (centerx-radius, centery-yradius)
(x2, y2) = (centerx+radius, centery+yradius)
extent = endangledegrees - startangledegrees
self._canvas.drawArc(x1, y1, x2, y2, startangledegrees, extent, fromcenter=1)
else:
P = wedge.asPolygon()
if isinstance(P,Path):
self.drawPath(P)
else:
self.drawPolygon(P)
def drawPolyLine(self, p):
if self._canvas._strokeColor:
self._canvas.polyLine(_pointsFromList(p.points))
def drawEllipse(self, ellipse):
#need to convert to pdfgen's bounding box representation
x1 = ellipse.cx - ellipse.rx
x2 = ellipse.cx + ellipse.rx
y1 = ellipse.cy - ellipse.ry
y2 = ellipse.cy + ellipse.ry
self._canvas.ellipse(x1,y1,x2,y2, link_info=self._get_link_info_dict(ellipse))
def drawPolygon(self, p):
self._canvas.polygon(_pointsFromList(p.points), closed=1, link_info=self._get_link_info_dict(p))
def drawPath(self, path, fillMode=FILL_EVEN_ODD):
# print "### drawPath", path.points
from reportlab.graphics.shapes import _renderPath
c = self._canvas
drawFuncs = (c.moveTo, c.lineTo, c.curveTo, c.closePath)
if fillMode is None:
fillMode = getattr(path,'fillMode',FILL_EVEN_ODD)
link_info = self._get_link_info_dict(path)
autoclose = getattr(path,'autoclose','')
def rP(**kwds):
return _renderPath(path, drawFuncs, **kwds)
if autoclose=='svg':
rP()
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
elif autoclose=='pdf':
rP(forceClose=True)
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
else:
isClosed = rP()
if not isClosed:
ofc = c._fillColor
c.setFillColor(None)
try:
link_info = None
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
finally:
c.setFillColor(ofc)
else:
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
def drawImage(self, image):
path = image.path
if isinstance(path,str):
if not (path and os.path.isfile(path)): return
im = _getImage().open(path)
elif hasattr(path,'convert'):
im = path
else:
return
srcW, srcH = im.size
dstW, dstH = image.width, image.height
if dstW is None: dstW = srcW
if dstH is None: dstH = srcH
self._canvas.drawImage(im, image.x, image.y, dstW, dstH, embed=True)
def applyStateChanges(self, delta, newState):
"""This takes a set of states, and outputs the operators
needed to set those properties"""
for key, value in delta.items():
if key == 'transform':
pass
#self._canvas.transform(value[0], value[1], value[2], value[3], value[4], value[5])
elif key == 'strokeColor':
self._canvas.setStrokeColor(value)
elif key == 'strokeWidth':
self._canvas.setLineWidth(value)
elif key == 'strokeLineCap': #0,1,2
self._canvas.setLineCap(value)
elif key == 'strokeLineJoin':
self._canvas.setLineJoin(value)
elif key == 'strokeDashArray':
if value:
if isinstance(value,(list,tuple)) and len(value)==2 and isinstance(value[1],(tuple,list)):
phase = value[0]
value = value[1]
else:
phase = 0
self._canvas.setDash(value,phase)
else:
self._canvas.setDash()
elif key == 'fillColor':
self._canvas.setFillColor(value)
elif key in ['fontSize', 'fontName']:
fontname = delta.get('fontName', self._canvas._font)
fontsize = delta.get('fontSize', self._canvas._fontSize)
self._canvas.setFont(fontname, fontsize)
elif key == 'fillMode':
self._canvas.setFillMode(value)
def test(outDir='out-svg'):
# print all drawings and their doc strings from the test
# file
if not os.path.isdir(outDir):
os.mkdir(outDir)
#grab all drawings from the test module
from reportlab.graphics import testshapes
drawings = []
for funcname in dir(testshapes):
if funcname[0:10] == 'getDrawing':
func = getattr(testshapes,funcname)
drawing = func()
docstring = getattr(func,'__doc__','')
drawings.append((drawing, docstring))
i = 0
for (d, docstring) in drawings:
filename = os.path.join(outDir,'renderSVG_%d.svg' % i)
drawToFile(d, filename)
i += 1
from reportlab.graphics.testshapes import getDrawing01
d = getDrawing01()
drawToFile(d, os.path.join(outDir,"test.svg"))
from reportlab.lib.corp import RL_CorpLogo
from reportlab.graphics.shapes import Drawing
rl = RL_CorpLogo()
d = Drawing(rl.width,rl.height)
d.add(rl)
drawToFile(d, os.path.join(outDir,"corplogo.svg"))
if __name__=='__main__':
test()
| 38.821172
| 139
| 0.575252
|
__doc__="""An experimental SVG renderer for the ReportLab graphics framework.
This will create SVG code from the ReportLab Graphics API (RLG).
To read existing SVG code and convert it into ReportLab graphics
objects download the svglib module here:
http://python.net/~gherman/#svglib
"""
import math, types, sys, os, codecs, base64
from operator import getitem
from reportlab.pdfbase.pdfmetrics import stringWidth from reportlab.lib.rl_accel import fp_str
from reportlab.lib.colors import black
from reportlab.lib.utils import asNative, getBytesIO
from reportlab.graphics.renderbase import StateTracker, getStateDelta, Renderer, renderScaledDrawing
from reportlab.graphics.shapes import STATE_DEFAULTS, Path, UserNode
from reportlab.graphics.shapes import * from reportlab import rl_config
from reportlab.lib.utils import getStringIO, RLString, isUnicode, isBytes
from reportlab.pdfgen.canvas import FILL_EVEN_ODD, FILL_NON_ZERO
from .renderPM import _getImage
from xml.dom import getDOMImplementation
sin = math.sin
cos = math.cos
pi = math.pi
AREA_STYLES = 'stroke-width stroke-linecap stroke stroke-opacity fill fill-opacity stroke-dasharray stroke-dashoffset fill-rule id'.split()
LINE_STYLES = 'stroke-width stroke-linecap stroke stroke-opacity stroke-dasharray stroke-dashoffset id'.split()
TEXT_STYLES = 'font-family font-weight font-style font-variant font-size id'.split()
EXTRA_STROKE_STYLES = 'stroke-width stroke-linecap stroke stroke-opacity stroke-dasharray stroke-dashoffset'.split()
EXTRA_FILL_STYLES = 'fill fill-opacity'.split()
def drawToString(d, showBoundary=rl_config.showBoundary,**kwds):
s = getStringIO()
drawToFile(d, s, showBoundary=showBoundary,**kwds)
return s.getvalue()
def drawToFile(d, fn, showBoundary=rl_config.showBoundary,**kwds):
d = renderScaledDrawing(d)
c = SVGCanvas((d.width, d.height),**kwds)
draw(d, c, 0, 0, showBoundary=showBoundary)
c.save(fn)
def draw(drawing, canvas, x=0, y=0, showBoundary=rl_config.showBoundary):
r = _SVGRenderer()
r.draw(renderScaledDrawing(drawing), canvas, x, y, showBoundary=showBoundary)
def _pointsFromList(L):
P=[]
for i in range(0,len(L), 2):
P.append((L[i], L[i+1]))
return P
def transformNode(doc, newTag, node=None, **attrDict):
newNode = doc.createElement(newTag)
for newAttr, attr in attrDict.items():
sattr = str(attr)
if not node:
newNode.setAttribute(newAttr, sattr)
else:
attrVal = node.getAttribute(sattr)
newNode.setAttribute(newAttr, attrVal or sattr)
return newNode
class EncodedWriter(list):
BOMS = {
'utf-32':codecs.BOM_UTF32,
'utf-32-be':codecs.BOM_UTF32_BE,
'utf-32-le':codecs.BOM_UTF32_LE,
'utf-16':codecs.BOM_UTF16,
'utf-16-be':codecs.BOM_UTF16_BE,
'utf-16-le':codecs.BOM_UTF16_LE,
}
def __init__(self,encoding,bom=False):
list.__init__(self)
self.encoding = encoding = codecs.lookup(encoding).name
if bom and '16' in encoding or '32' in encoding:
self.write(self.BOMS[encoding])
def write(self,u):
if isBytes(u):
try:
u = u.decode('utf-8')
except:
et, ev, tb = sys.exc_info()
ev = str(ev)
del et, tb
raise ValueError("String %r not encoded as 'utf-8'\nerror=%s" % (u,ev))
elif not isUnicode(u):
raise ValueError("EncodedWriter.write(%s) argument should be 'utf-8' bytes or str" % ascii(u))
self.append(u)
def getvalue(self):
r = ''.join(self)
del self[:]
return r
_fillRuleMap = {
FILL_NON_ZERO: 'nonzero',
'non-zero': 'nonzero',
'nonzero': 'nonzero',
FILL_EVEN_ODD: 'evenodd',
'even-odd': 'evenodd',
'evenodd': 'evenodd',
}
def py_fp_str(*args):
return ' '.join((('%f' % a).rstrip('0').rstrip('.') for a in args))
class SVGCanvas:
def __init__(self, size=(300,300), encoding='utf-8', verbose=0, bom=False, **kwds):
self.verbose = verbose
self.encoding = codecs.lookup(encoding).name
self.bom = bom
useClip = kwds.pop('useClip',False)
self.fontHacks = kwds.pop('fontHacks',{})
self.extraXmlDecl = kwds.pop('extraXmlDecl','')
scaleGroupId = kwds.pop('scaleGroupId','')
self._fillMode = FILL_EVEN_ODD
self.width, self.height = self.size = size
self.code = []
self.style = {}
self.path = ''
self._strokeColor = self._fillColor = self._lineWidth = \
self._font = self._fontSize = self._lineCap = \
self._lineJoin = None
if kwds.pop('use_fp_str',False):
self.fp_str = fp_str
else:
self.fp_str = py_fp_str
self.cfp_str = lambda *args: self.fp_str(*args).replace(' ',',')
implementation = getDOMImplementation('minidom')
doctype = implementation.createDocumentType("svg",
"-//W3C//DTD SVG 1.0//EN",
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd")
self.doc = implementation.createDocument(None,"svg",doctype)
self.svg = self.doc.documentElement
svgAttrs = dict(
width = str(size[0]),
height=str(self.height),
preserveAspectRatio="xMinYMin meet",
viewBox="0 0 %d %d" % (self.width, self.height),
#baseProfile = "full", #disliked in V 1.0
#these suggested by Tim Roberts, as updated by peter@maubp.freeserve.co.uk
xmlns="http://www.w3.org/2000/svg",
version="1.0",
)
svgAttrs['fill-rule'] = _fillRuleMap[self._fillMode]
svgAttrs["xmlns:xlink"] = "http://www.w3.org/1999/xlink"
svgAttrs.update(kwds.pop('svgAttrs',{}))
for k,v in svgAttrs.items():
self.svg.setAttribute(k,v)
title = self.doc.createElement('title')
text = self.doc.createTextNode('...')
title.appendChild(text)
self.svg.appendChild(title)
desc = self.doc.createElement('desc')
text = self.doc.createTextNode('...')
desc.appendChild(text)
self.svg.appendChild(desc)
self.setFont(STATE_DEFAULTS['fontName'], STATE_DEFAULTS['fontSize'])
self.setStrokeColor(STATE_DEFAULTS['strokeColor'])
self.setLineCap(2)
self.setLineJoin(0)
self.setLineWidth(1)
if not useClip:
# Add a rectangular clipping path identical to view area.
clipPath = transformNode(self.doc, "clipPath", id="clip")
clipRect = transformNode(self.doc, "rect", x=0, y=0,
width=self.width, height=self.height)
clipPath.appendChild(clipRect)
self.svg.appendChild(clipPath)
gtkw = dict(style="clip-path: url(#clip)")
else:
gtkw = dict(clip="0 0 %d %d" % (self.width,self.height))
self.groupTree = transformNode(self.doc, "g",
id="group",
transform="scale(1,-1) translate(0,-%d)" % self.height,
**gtkw
)
if scaleGroupId:
self.scaleTree = transformNode(self.doc, "g", id=scaleGroupId, transform="scale(1,1)")
self.scaleTree.appendChild(self.groupTree)
self.svg.appendChild(self.scaleTree)
else:
self.svg.appendChild(self.groupTree)
self.currGroup = self.groupTree
def save(self, fn=None):
writer = EncodedWriter(self.encoding,bom=self.bom)
self.doc.writexml(writer,addindent="\t",newl="\n",encoding=self.encoding)
if hasattr(fn,'write'):
f = fn
else:
f = open(fn, 'w',encoding=self.encoding)
svg = writer.getvalue()
exd = self.extraXmlDecl
if exd:
svg = svg.replace('?>','?>'+exd)
f.write(svg)
if f is not fn:
f.close()
### helpers ###
def NOTUSED_stringWidth(self, s, font=None, fontSize=None):
font = font or self._font
fontSize = fontSize or self._fontSize
return stringWidth(s, font, fontSize)
def _formatStyle(self, include=[], exclude='',**kwds):
style = self.style.copy()
style.update(kwds)
keys = list(style.keys())
if include:
keys = [k for k in keys if k in include]
if exclude:
exclude = exclude.split()
items = [k+': '+str(style[k]) for k in keys if k not in exclude]
else:
items = [k+': '+str(style[k]) for k in keys]
return '; '.join(items) + ';'
def _escape(self, s):
return s
def _genArcCode(self, x1, y1, x2, y2, startAng, extent):
return
#calculate semi-minor and semi-major axes of ellipse
xScale = abs((x2-x1)/2.0)
yScale = abs((y2-y1)/2.0)
#calculate centre of ellipse
x, y = (x1+x2)/2.0, (y1+y2)/2.0
codeline = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s %s setmatrix'
if extent >= 0:
arc='arc'
else:
arc='arcn'
data = (x,y, xScale, yScale, startAng, startAng+extent, arc)
return codeline % data
def _fillAndStroke(self, code, clip=0, link_info=None,styles=AREA_STYLES,fillMode=None):
xtra = {}
if fillMode:
xtra['fill-rule'] = _fillRuleMap[fillMode]
path = transformNode(self.doc, "path",
d=self.path, style=self._formatStyle(styles),
)
if link_info :
path = self._add_link(path, link_info)
self.currGroup.appendChild(path)
self.path = ''
### styles ###
def setLineCap(self, v):
vals = {0:'butt', 1:'round', 2:'square'}
if self._lineCap != v:
self._lineCap = v
self.style['stroke-linecap'] = vals[v]
def setLineJoin(self, v):
vals = {0:'miter', 1:'round', 2:'bevel'}
if self._lineJoin != v:
self._lineJoin = v
self.style['stroke-linecap'] = vals[v]
def setDash(self, array=[], phase=0):
if isinstance(array,(float,int)):
self.style['stroke-dasharray'] = ', '.join(map(str, ([array, phase])))
elif isinstance(array,(tuple,list)) and len(array) > 0:
assert phase >= 0, "phase is a length in user space"
self.style['stroke-dasharray'] = ', '.join(map(str, array))
if phase>0:
self.style['stroke-dashoffset'] = str(phase)
def setStrokeColor(self, color):
self._strokeColor = color
if color == None:
self.style['stroke'] = 'none'
else:
r, g, b = color.red, color.green, color.blue
self.style['stroke'] = 'rgb(%d%%,%d%%,%d%%)' % (r*100, g*100, b*100)
alpha = color.normalizedAlpha
if alpha!=1:
self.style['stroke-opacity'] = '%s' % alpha
elif 'stroke-opacity' in self.style:
del self.style['stroke-opacity']
def setFillColor(self, color):
self._fillColor = color
if color == None:
self.style['fill'] = 'none'
else:
r, g, b = color.red, color.green, color.blue
self.style['fill'] = 'rgb(%d%%,%d%%,%d%%)' % (r*100, g*100, b*100)
alpha = color.normalizedAlpha
if alpha!=1:
self.style['fill-opacity'] = '%s' % alpha
elif 'fill-opacity' in self.style:
del self.style['fill-opacity']
def setFillMode(self, v):
self._fillMode = v
self.style['fill-rule'] = _fillRuleMap[v]
def setLineWidth(self, width):
if width != self._lineWidth:
self._lineWidth = width
self.style['stroke-width'] = width
def setFont(self, font, fontSize):
if self._font != font or self._fontSize != fontSize:
self._font = font
self._fontSize = fontSize
style = self.style
for k in TEXT_STYLES:
if k in style:
del style[k]
svgAttrs = self.fontHacks[font] if font in self.fontHacks else {}
if isinstance(font,RLString):
svgAttrs.update(iter(font.svgAttrs.items()))
if svgAttrs:
for k,v in svgAttrs.items():
a = 'font-'+k
if a in TEXT_STYLES:
style[a] = v
if 'font-family' not in style:
style['font-family'] = font
style['font-size'] = '%spx' % fontSize
def _add_link(self, dom_object, link_info) :
assert isinstance(link_info, dict)
link = transformNode(self.doc, "a", **link_info)
link.appendChild(dom_object)
return link
### shapes ###
def rect(self, x1,y1, x2,y2, rx=8, ry=8, link_info=None, **_svgAttrs):
if self.verbose: print("+++ SVGCanvas.rect")
x = min(x1,x2)
y = min(y1,y2)
kwds = {}
rect = transformNode(self.doc, "rect",
x=x, y=y, width=max(x1,x2)-x, height=max(y1,y2)-y,
style=self._formatStyle(AREA_STYLES),**_svgAttrs)
if link_info :
rect = self._add_link(rect, link_info)
self.currGroup.appendChild(rect)
def roundRect(self, x1,y1, x2,y2, rx=8, ry=8, link_info=None, **_svgAttrs):
rect = transformNode(self.doc, "rect",
x=x1, y=y1, width=x2-x1, height=y2-y1, rx=rx, ry=ry,
style=self._formatStyle(AREA_STYLES), **_svgAttrs)
if link_info:
rect = self._add_link(rect, link_info)
self.currGroup.appendChild(rect)
def drawString(self, s, x, y, angle=0, link_info=None, text_anchor='left', textRenderMode=0, **_svgAttrs):
if textRenderMode==3: return #invisible
s = asNative(s)
if self.verbose: print("+++ SVGCanvas.drawString")
needFill = textRenderMode==0 or textRenderMode==2 or textRenderMode==4 or textRenderMode==6
needStroke = textRenderMode==1 or textRenderMode==2 or textRenderMode==5 or textRenderMode==6
if (self._fillColor!=None and needFill) or (self._strokeColor!=None and needStroke):
if not text_anchor in ['start', 'inherited', 'left']:
textLen = stringWidth(s,self._font,self._fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2.
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,s,textLen,self._font,self._fontSize)
else:
raise ValueError('bad value for text_anchor ' + str(text_anchor))
s = self._escape(s)
st = self._formatStyle(TEXT_STYLES)
if angle != 0:
st = st + " rotate(%s);" % self.fp_str(angle, x, y)
if needFill:
st += self._formatStyle(EXTRA_FILL_STYLES)
else:
st += " fill:none;"
if needStroke:
st += self._formatStyle(EXTRA_STROKE_STYLES)
else:
st += " stroke:none;"
#if textRenderMode>=4:
# _gstate_clipPathSetOrAddself, -1, 1, 0 /*we are adding*/
text = transformNode(self.doc, "text",
x=x, y=y, style=st,
transform="translate(0,%d) scale(1,-1)" % (2*y),
**_svgAttrs
)
content = self.doc.createTextNode(s)
text.appendChild(content)
if link_info:
text = self._add_link(text, link_info)
self.currGroup.appendChild(text)
def drawCentredString(self, s, x, y, angle=0, text_anchor='middle',
link_info=None, textRenderMode=0, **_svgAttrs):
if self.verbose: print("+++ SVGCanvas.drawCentredString")
self.drawString(s,x,y,angle=angle, link_info=link_info, text_anchor=text_anchor,
textRenderMode=textRenderMode, **_svgAttrs)
def drawRightString(self, text, x, y, angle=0,text_anchor='end',
link_info=None, textRenderMode=0, **_svgAttrs):
if self.verbose: print("+++ SVGCanvas.drawRightString")
self.drawString(text,x,y,angle=angle, link_info=link_info, text_anchor=text_anchor,
textRenderMode=textRenderMode, **_svgAttrs)
def comment(self, data):
comment = self.doc.createComment(data)
# self.currGroup.appendChild(comment)
def drawImage(self, image, x, y, width, height, embed=True):
buf = getBytesIO()
image.save(buf,'png')
buf = asNative(base64.b64encode(buf.getvalue()))
self.currGroup.appendChild(
transformNode(self.doc,'image',
x=x,y=y,width=width,height=height,
href="data:image/png;base64,"+buf,
transform="matrix(%s)" % self.cfp_str(1,0,0,-1,0,height+2*y),
)
)
def line(self, x1, y1, x2, y2):
if self._strokeColor != None:
if 0: # something is wrong with line in my SVG viewer...
line = transformNode(self.doc, "line",
x=x1, y=y1, x2=x2, y2=y2,
style=self._formatStyle(LINE_STYLES))
self.currGroup.appendChild(line)
path = transformNode(self.doc, "path",
d="M %s L %s Z" % (self.cfp_str(x1,y1),self.cfp_str(x2,y2)),
style=self._formatStyle(LINE_STYLES))
self.currGroup.appendChild(path)
def ellipse(self, x1, y1, x2, y2, link_info=None):
ellipse = transformNode(self.doc, "ellipse",
cx=(x1+x2)/2.0, cy=(y1+y2)/2.0, rx=(x2-x1)/2.0, ry=(y2-y1)/2.0,
style=self._formatStyle(AREA_STYLES))
if link_info:
ellipse = self._add_link(ellipse, link_info)
self.currGroup.appendChild(ellipse)
def circle(self, xc, yc, r, link_info=None):
circle = transformNode(self.doc, "circle",
cx=xc, cy=yc, r=r,
style=self._formatStyle(AREA_STYLES))
if link_info:
circle = self._add_link(circle, link_info)
self.currGroup.appendChild(circle)
def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4, closed=0):
pass
return
codeline = '%s m %s curveto'
data = (fp_str(x1, y1), fp_str(x2, y2, x3, y3, x4, y4))
if self._fillColor != None:
self.code.append((codeline % data) + ' eofill')
if self._strokeColor != None:
self.code.append((codeline % data)
+ ((closed and ' closepath') or '')
+ ' stroke')
def drawArc(self, x1,y1, x2,y2, startAng=0, extent=360, fromcenter=0):
cx, cy = (x1+x2)/2.0, (y1+y2)/2.0
rx, ry = (x2-x1)/2.0, (y2-y1)/2.0
mx = rx * cos(startAng*pi/180) + cx
my = ry * sin(startAng*pi/180) + cy
ax = rx * cos((startAng+extent)*pi/180) + cx
ay = ry * sin((startAng+extent)*pi/180) + cy
cfp_str = self.cfp_str
s = [].append
if fromcenter:
s("M %s L %s" % (cfp_str(cx, cy), cfp_str(ax, ay)))
if fromcenter:
s("A %s %d %d %d %s" % \
(cfp_str(rx, ry), 0, extent>=180, 0, cfp_str(mx, my)))
else:
s("M %s A %s %d %d %d %s Z" % \
(cfp_str(mx, my), cfp_str(rx, ry), 0, extent>=180, 0, cfp_str(mx, my)))
if fromcenter:
s("L %s Z" % cfp_str(cx, cy))
path = transformNode(self.doc, "path",
d=' '.join(s.__self__), style=self._formatStyle())
self.currGroup.appendChild(path)
def polygon(self, points, closed=0, link_info=None):
assert len(points) >= 2, 'Polygon must have 2 or more points'
if self._strokeColor!=None or self._fillColor!=None:
pts = ', '.join([fp_str(*p) for p in points])
polyline = transformNode(self.doc, "polygon",
points=pts, style=self._formatStyle(AREA_STYLES))
if link_info:
polyline = self._add_link(polyline, link_info)
self.currGroup.appendChild(polyline)
# self._fillAndStroke(polyCode)
def lines(self, lineList, color=None, width=None):
# print "### lineList", lineList
return
if self._strokeColor != None:
codeline = '%s m %s l stroke'
for line in lineList:
self.code.append(codeline % (fp_str(line[0]), fp_str(line[1])))
def polyLine(self, points):
assert len(points) >= 1, 'Polyline must have 1 or more points'
if self._strokeColor != None:
pts = ', '.join([fp_str(*p) for p in points])
polyline = transformNode(self.doc, "polyline",
points=pts, style=self._formatStyle(AREA_STYLES,fill=None))
self.currGroup.appendChild(polyline)
### groups ###
def startGroup(self,attrDict=dict(transform="")):
if self.verbose: print("+++ begin SVGCanvas.startGroup")
currGroup = self.currGroup
group = transformNode(self.doc, "g", **attrDict)
currGroup.appendChild(group)
self.currGroup = group
if self.verbose: print("+++ end SVGCanvas.startGroup")
return currGroup
def endGroup(self,currGroup):
if self.verbose: print("+++ begin SVGCanvas.endGroup")
self.currGroup = currGroup
if self.verbose: print("+++ end SVGCanvas.endGroup")
def transform(self, a, b, c, d, e, f):
if self.verbose: print("!!! begin SVGCanvas.transform", a, b, c, d, e, f)
tr = self.currGroup.getAttribute("transform")
if (a, b, c, d, e, f) != (1, 0, 0, 1, 0, 0):
t = 'matrix(%s)' % self.cfp_str(a,b,c,d,e,f)
self.currGroup.setAttribute("transform", "%s %s" % (tr, t))
def translate(self, x, y):
if (x,y) != (0,0):
self.currGroup.setAttribute("transform", "%s %s"
% (self.currGroup.getAttribute("transform"),
'translate(%s)' % self.cfp_str(x,y)))
def scale(self, sx, sy):
if (sx,sy) != (1,1):
self.currGroup.setAttribute("transform", "%s %s"
% (self.groups[-1].getAttribute("transform"),
'scale(%s)' % self.cfp_str(sx, sy)))
### paths ###
def moveTo(self, x, y):
self.path = self.path + 'M %s ' % self.fp_str(x, y)
def lineTo(self, x, y):
self.path = self.path + 'L %s ' % self.fp_str(x, y)
def curveTo(self, x1, y1, x2, y2, x3, y3):
self.path = self.path + 'C %s ' % self.fp_str(x1, y1, x2, y2, x3, y3)
def closePath(self):
self.path = self.path + 'Z '
def saveState(self):
pass
def restoreState(self):
pass
class _SVGRenderer(Renderer):
def __init__(self):
self.verbose = 0
def drawNode(self, node):
if self.verbose: print("### begin _SVGRenderer.drawNode(%r)" % node)
self._canvas.comment('begin node %r'%node)
style = self._canvas.style.copy()
if not (isinstance(node, Path) and node.isClipPath):
pass # self._canvas.saveState()
#apply state changes
deltas = getStateDelta(node)
self._tracker.push(deltas)
self.applyStateChanges(deltas, {})
#draw the object, or recurse
self.drawNodeDispatcher(node)
rDeltas = self._tracker.pop()
if not (isinstance(node, Path) and node.isClipPath):
pass #self._canvas.restoreState()
self._canvas.comment('end node %r'%node)
#restore things we might have lost (without actually doing anything).
for k, v in rDeltas.items():
if k in self._restores:
setattr(self._canvas,self._restores[k],v)
self._canvas.style = style
if self.verbose: print("### end _SVGRenderer.drawNode(%r)" % node)
_restores = {'strokeColor':'_strokeColor','strokeWidth': '_lineWidth','strokeLineCap':'_lineCap',
'strokeLineJoin':'_lineJoin','fillColor':'_fillColor','fontName':'_font',
'fontSize':'_fontSize'}
def _get_link_info_dict(self, obj):
#We do not want None or False as the link, even if it is the
#attribute's value - use the empty string instead.
url = getattr(obj, "hrefURL", "") or ""
title = getattr(obj, "hrefTitle", "") or ""
if url :
return {"xlink:href":url, "xlink:title":title, "target":"_top"}
#
#Therefore I use the following, which also works for Firefox, Opera, and
#IE 6.0 with Adobe SVG Viewer 6 beta:
#<object data="..." type="image/svg+xml" width="430" height="150" class="img">
#
#Once displayed, Firefox and Safari treat the SVG like a frame, and
#by default clicking on links acts "in frame" and replaces the image.
#Opera does what I expect, and replaces the whole page with the link.
#
#Therefore I use target="_top" to force the links to replace the whole page.
#This now works as expected on Safari 3.2.1, Firefox 3.0.6, Opera 9.20.
#Perhaps the target attribute should be an option, perhaps defaulting to
#"_top" as used here?
else :
return None
def drawGroup(self, group):
if self.verbose: print("### begin _SVGRenderer.drawGroup")
currGroup = self._canvas.startGroup()
a, b, c, d, e, f = self._tracker.getState()['transform']
for childNode in group.getContents():
if isinstance(childNode, UserNode):
node2 = childNode.provideNode()
else:
node2 = childNode
self.drawNode(node2)
self._canvas.transform(a, b, c, d, e, f)
self._canvas.endGroup(currGroup)
if self.verbose: print("### end _SVGRenderer.drawGroup")
def drawRect(self, rect):
link_info = self._get_link_info_dict(rect)
svgAttrs = getattr(rect,'_svgAttrs',{})
if rect.rx == rect.ry == 0:
#plain old rectangle
self._canvas.rect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height, link_info=link_info, **svgAttrs)
else:
#cheat and assume ry = rx; better to generalize
#pdfgen roundRect function. TODO
self._canvas.roundRect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height,
rect.rx, rect.ry,
link_info=link_info, **svgAttrs)
def drawString(self, stringObj):
S = self._tracker.getState()
text_anchor, x, y, text = S['textAnchor'], stringObj.x, stringObj.y, stringObj.text
self._canvas.drawString(text,x,y,link_info=self._get_link_info_dict(stringObj),
text_anchor=text_anchor, textRenderMode=getattr(stringObj,'textRenderMode',0),
**getattr(stringObj,'_svgAttrs',{}))
def drawLine(self, line):
if self._canvas._strokeColor:
self._canvas.line(line.x1, line.y1, line.x2, line.y2)
def drawCircle(self, circle):
self._canvas.circle( circle.cx, circle.cy, circle.r, link_info=self._get_link_info_dict(circle))
def drawWedge(self, wedge):
yradius, radius1, yradius1 = wedge._xtraRadii()
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None) and not wedge.annular:
centerx, centery, radius, startangledegrees, endangledegrees = \
wedge.centerx, wedge.centery, wedge.radius, wedge.startangledegrees, wedge.endangledegrees
yradius = wedge.yradius or wedge.radius
(x1, y1) = (centerx-radius, centery-yradius)
(x2, y2) = (centerx+radius, centery+yradius)
extent = endangledegrees - startangledegrees
self._canvas.drawArc(x1, y1, x2, y2, startangledegrees, extent, fromcenter=1)
else:
P = wedge.asPolygon()
if isinstance(P,Path):
self.drawPath(P)
else:
self.drawPolygon(P)
def drawPolyLine(self, p):
if self._canvas._strokeColor:
self._canvas.polyLine(_pointsFromList(p.points))
def drawEllipse(self, ellipse):
#need to convert to pdfgen's bounding box representation
x1 = ellipse.cx - ellipse.rx
x2 = ellipse.cx + ellipse.rx
y1 = ellipse.cy - ellipse.ry
y2 = ellipse.cy + ellipse.ry
self._canvas.ellipse(x1,y1,x2,y2, link_info=self._get_link_info_dict(ellipse))
def drawPolygon(self, p):
self._canvas.polygon(_pointsFromList(p.points), closed=1, link_info=self._get_link_info_dict(p))
def drawPath(self, path, fillMode=FILL_EVEN_ODD):
from reportlab.graphics.shapes import _renderPath
c = self._canvas
drawFuncs = (c.moveTo, c.lineTo, c.curveTo, c.closePath)
if fillMode is None:
fillMode = getattr(path,'fillMode',FILL_EVEN_ODD)
link_info = self._get_link_info_dict(path)
autoclose = getattr(path,'autoclose','')
def rP(**kwds):
return _renderPath(path, drawFuncs, **kwds)
if autoclose=='svg':
rP()
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
elif autoclose=='pdf':
rP(forceClose=True)
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
else:
isClosed = rP()
if not isClosed:
ofc = c._fillColor
c.setFillColor(None)
try:
link_info = None
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
finally:
c.setFillColor(ofc)
else:
c._fillAndStroke([], clip=path.isClipPath, link_info=link_info, fillMode=fillMode)
def drawImage(self, image):
path = image.path
if isinstance(path,str):
if not (path and os.path.isfile(path)): return
im = _getImage().open(path)
elif hasattr(path,'convert'):
im = path
else:
return
srcW, srcH = im.size
dstW, dstH = image.width, image.height
if dstW is None: dstW = srcW
if dstH is None: dstH = srcH
self._canvas.drawImage(im, image.x, image.y, dstW, dstH, embed=True)
def applyStateChanges(self, delta, newState):
for key, value in delta.items():
if key == 'transform':
pass
elif key == 'strokeColor':
self._canvas.setStrokeColor(value)
elif key == 'strokeWidth':
self._canvas.setLineWidth(value)
elif key == 'strokeLineCap': self._canvas.setLineCap(value)
elif key == 'strokeLineJoin':
self._canvas.setLineJoin(value)
elif key == 'strokeDashArray':
if value:
if isinstance(value,(list,tuple)) and len(value)==2 and isinstance(value[1],(tuple,list)):
phase = value[0]
value = value[1]
else:
phase = 0
self._canvas.setDash(value,phase)
else:
self._canvas.setDash()
elif key == 'fillColor':
self._canvas.setFillColor(value)
elif key in ['fontSize', 'fontName']:
fontname = delta.get('fontName', self._canvas._font)
fontsize = delta.get('fontSize', self._canvas._fontSize)
self._canvas.setFont(fontname, fontsize)
elif key == 'fillMode':
self._canvas.setFillMode(value)
def test(outDir='out-svg'):
if not os.path.isdir(outDir):
os.mkdir(outDir)
from reportlab.graphics import testshapes
drawings = []
for funcname in dir(testshapes):
if funcname[0:10] == 'getDrawing':
func = getattr(testshapes,funcname)
drawing = func()
docstring = getattr(func,'__doc__','')
drawings.append((drawing, docstring))
i = 0
for (d, docstring) in drawings:
filename = os.path.join(outDir,'renderSVG_%d.svg' % i)
drawToFile(d, filename)
i += 1
from reportlab.graphics.testshapes import getDrawing01
d = getDrawing01()
drawToFile(d, os.path.join(outDir,"test.svg"))
from reportlab.lib.corp import RL_CorpLogo
from reportlab.graphics.shapes import Drawing
rl = RL_CorpLogo()
d = Drawing(rl.width,rl.height)
d.add(rl)
drawToFile(d, os.path.join(outDir,"corplogo.svg"))
if __name__=='__main__':
test()
| true
| true
|
f709c78660219ac4debdf4bfe4edf56a0cc485d8
| 6,193
|
py
|
Python
|
pyqtgraph/configfile.py
|
Tillsten/pyqtgraph
|
0045863165fe526988c58cf4f8232ae2d261a5ee
|
[
"MIT"
] | 5
|
2019-03-08T05:30:20.000Z
|
2021-05-15T07:33:50.000Z
|
pyqtgraph/configfile.py
|
Tillsten/pyqtgraph
|
0045863165fe526988c58cf4f8232ae2d261a5ee
|
[
"MIT"
] | 1
|
2019-01-14T09:00:21.000Z
|
2019-01-14T09:00:21.000Z
|
pyqtgraph/configfile.py
|
Tillsten/pyqtgraph
|
0045863165fe526988c58cf4f8232ae2d261a5ee
|
[
"MIT"
] | 1
|
2022-02-01T12:45:29.000Z
|
2022-02-01T12:45:29.000Z
|
# -*- coding: utf-8 -*-
"""
configfile.py - Human-readable text configuration file library
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
Used for reading and writing dictionary objects to a python-like configuration
file format. Data structures may be nested and contain any data type as long
as it can be converted to/from a string using repr and eval.
"""
import re, os, sys
from .pgcollections import OrderedDict
GLOBAL_PATH = None # so not thread safe.
from . import units
from .python2_3 import asUnicode
class ParseError(Exception):
def __init__(self, message, lineNum, line, fileName=None):
self.lineNum = lineNum
self.line = line
#self.message = message
self.fileName = fileName
Exception.__init__(self, message)
def __str__(self):
if self.fileName is None:
msg = "Error parsing string at line %d:\n" % self.lineNum
else:
msg = "Error parsing config file '%s' at line %d:\n" % (self.fileName, self.lineNum)
msg += "%s\n%s" % (self.line, self.message)
return msg
#raise Exception()
def writeConfigFile(data, fname):
s = genString(data)
fd = open(fname, 'w')
fd.write(s)
fd.close()
def readConfigFile(fname):
#cwd = os.getcwd()
global GLOBAL_PATH
if GLOBAL_PATH is not None:
fname2 = os.path.join(GLOBAL_PATH, fname)
if os.path.exists(fname2):
fname = fname2
GLOBAL_PATH = os.path.dirname(os.path.abspath(fname))
try:
#os.chdir(newDir) ## bad.
fd = open(fname)
s = asUnicode(fd.read())
fd.close()
s = s.replace("\r\n", "\n")
s = s.replace("\r", "\n")
data = parseString(s)[1]
except ParseError:
sys.exc_info()[1].fileName = fname
raise
except:
print("Error while reading config file %s:"% fname)
raise
#finally:
#os.chdir(cwd)
return data
def appendConfigFile(data, fname):
s = genString(data)
fd = open(fname, 'a')
fd.write(s)
fd.close()
def genString(data, indent=''):
s = ''
for k in data:
sk = str(k)
if len(sk) == 0:
print(data)
raise Exception('blank dict keys not allowed (see data above)')
if sk[0] == ' ' or ':' in sk:
print(data)
raise Exception('dict keys must not contain ":" or start with spaces [offending key is "%s"]' % sk)
if isinstance(data[k], dict):
s += indent + sk + ':\n'
s += genString(data[k], indent + ' ')
else:
s += indent + sk + ': ' + repr(data[k]) + '\n'
return s
def parseString(lines, start=0):
data = OrderedDict()
if isinstance(lines, basestring):
lines = lines.split('\n')
lines = [l for l in lines if re.search(r'\S', l) and not re.match(r'\s*#', l)] ## remove empty lines
indent = measureIndent(lines[start])
ln = start - 1
try:
while True:
ln += 1
#print ln
if ln >= len(lines):
break
l = lines[ln]
## Skip blank lines or lines starting with #
if re.match(r'\s*#', l) or not re.search(r'\S', l):
continue
## Measure line indentation, make sure it is correct for this level
lineInd = measureIndent(l)
if lineInd < indent:
ln -= 1
break
if lineInd > indent:
#print lineInd, indent
raise ParseError('Indentation is incorrect. Expected %d, got %d' % (indent, lineInd), ln+1, l)
if ':' not in l:
raise ParseError('Missing colon', ln+1, l)
(k, p, v) = l.partition(':')
k = k.strip()
v = v.strip()
## set up local variables to use for eval
local = units.allUnits.copy()
local['OrderedDict'] = OrderedDict
local['readConfigFile'] = readConfigFile
if len(k) < 1:
raise ParseError('Missing name preceding colon', ln+1, l)
if k[0] == '(' and k[-1] == ')': ## If the key looks like a tuple, try evaluating it.
try:
k1 = eval(k, local)
if type(k1) is tuple:
k = k1
except:
pass
if re.search(r'\S', v) and v[0] != '#': ## eval the value
try:
val = eval(v, local)
except:
ex = sys.exc_info()[1]
raise ParseError("Error evaluating expression '%s': [%s: %s]" % (v, ex.__class__.__name__, str(ex)), (ln+1), l)
else:
if ln+1 >= len(lines) or measureIndent(lines[ln+1]) <= indent:
#print "blank dict"
val = {}
else:
#print "Going deeper..", ln+1
(ln, val) = parseString(lines, start=ln+1)
data[k] = val
#print k, repr(val)
except ParseError:
raise
except:
ex = sys.exc_info()[1]
raise ParseError("%s: %s" % (ex.__class__.__name__, str(ex)), ln+1, l)
#print "Returning shallower..", ln+1
return (ln, data)
def measureIndent(s):
n = 0
while n < len(s) and s[n] == ' ':
n += 1
return n
if __name__ == '__main__':
import tempfile
fn = tempfile.mktemp()
tf = open(fn, 'w')
cf = """
key: 'value'
key2: ##comment
##comment
key21: 'value' ## comment
##comment
key22: [1,2,3]
key23: 234 #comment
"""
tf.write(cf)
tf.close()
print("=== Test:===")
num = 1
for line in cf.split('\n'):
print("%02d %s" % (num, line))
num += 1
print(cf)
print("============")
data = readConfigFile(fn)
print(data)
os.remove(fn)
| 30.507389
| 131
| 0.501211
|
import re, os, sys
from .pgcollections import OrderedDict
GLOBAL_PATH = None from . import units
from .python2_3 import asUnicode
class ParseError(Exception):
def __init__(self, message, lineNum, line, fileName=None):
self.lineNum = lineNum
self.line = line
self.fileName = fileName
Exception.__init__(self, message)
def __str__(self):
if self.fileName is None:
msg = "Error parsing string at line %d:\n" % self.lineNum
else:
msg = "Error parsing config file '%s' at line %d:\n" % (self.fileName, self.lineNum)
msg += "%s\n%s" % (self.line, self.message)
return msg
def writeConfigFile(data, fname):
s = genString(data)
fd = open(fname, 'w')
fd.write(s)
fd.close()
def readConfigFile(fname):
global GLOBAL_PATH
if GLOBAL_PATH is not None:
fname2 = os.path.join(GLOBAL_PATH, fname)
if os.path.exists(fname2):
fname = fname2
GLOBAL_PATH = os.path.dirname(os.path.abspath(fname))
try:
fd = open(fname)
s = asUnicode(fd.read())
fd.close()
s = s.replace("\r\n", "\n")
s = s.replace("\r", "\n")
data = parseString(s)[1]
except ParseError:
sys.exc_info()[1].fileName = fname
raise
except:
print("Error while reading config file %s:"% fname)
raise
return data
def appendConfigFile(data, fname):
s = genString(data)
fd = open(fname, 'a')
fd.write(s)
fd.close()
def genString(data, indent=''):
s = ''
for k in data:
sk = str(k)
if len(sk) == 0:
print(data)
raise Exception('blank dict keys not allowed (see data above)')
if sk[0] == ' ' or ':' in sk:
print(data)
raise Exception('dict keys must not contain ":" or start with spaces [offending key is "%s"]' % sk)
if isinstance(data[k], dict):
s += indent + sk + ':\n'
s += genString(data[k], indent + ' ')
else:
s += indent + sk + ': ' + repr(data[k]) + '\n'
return s
def parseString(lines, start=0):
data = OrderedDict()
if isinstance(lines, basestring):
lines = lines.split('\n')
lines = [l for l in lines if re.search(r'\S', l) and not re.match(r'\s*#', l)] ## remove empty lines
indent = measureIndent(lines[start])
ln = start - 1
try:
while True:
ln += 1
if ln >= len(lines):
break
l = lines[ln]
if re.match(r'\s*#', l) or not re.search(r'\S', l):
continue
lineInd = measureIndent(l)
if lineInd < indent:
ln -= 1
break
if lineInd > indent:
raise ParseError('Indentation is incorrect. Expected %d, got %d' % (indent, lineInd), ln+1, l)
if ':' not in l:
raise ParseError('Missing colon', ln+1, l)
(k, p, v) = l.partition(':')
k = k.strip()
v = v.strip()
local = units.allUnits.copy()
local['OrderedDict'] = OrderedDict
local['readConfigFile'] = readConfigFile
if len(k) < 1:
raise ParseError('Missing name preceding colon', ln+1, l)
if k[0] == '(' and k[-1] == ')': try:
k1 = eval(k, local)
if type(k1) is tuple:
k = k1
except:
pass
if re.search(r'\S', v) and v[0] != '#': ## eval the value
try:
val = eval(v, local)
except:
ex = sys.exc_info()[1]
raise ParseError("Error evaluating expression '%s': [%s: %s]" % (v, ex.__class__.__name__, str(ex)), (ln+1), l)
else:
if ln+1 >= len(lines) or measureIndent(lines[ln+1]) <= indent:
val = {}
else:
(ln, val) = parseString(lines, start=ln+1)
data[k] = val
except ParseError:
raise
except:
ex = sys.exc_info()[1]
raise ParseError("%s: %s" % (ex.__class__.__name__, str(ex)), ln+1, l)
return (ln, data)
def measureIndent(s):
n = 0
while n < len(s) and s[n] == ' ':
n += 1
return n
if __name__ == '__main__':
import tempfile
fn = tempfile.mktemp()
tf = open(fn, 'w')
cf = """
key: 'value'
key2: ##comment
##comment
key21: 'value' ## comment
##comment
key22: [1,2,3]
key23: 234 #comment
"""
tf.write(cf)
tf.close()
print("=== Test:===")
num = 1
for line in cf.split('\n'):
print("%02d %s" % (num, line))
num += 1
print(cf)
print("============")
data = readConfigFile(fn)
print(data)
os.remove(fn)
| true
| true
|
f709c91120596c3ea3b45b02c68538a4ccfbd7ce
| 7,723
|
py
|
Python
|
tempest/api/compute/volumes/test_volumes_list.py
|
ssameerr/tempest
|
e413f28661c2aab3f8da8d005db1fa5c59cc6b68
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/compute/volumes/test_volumes_list.py
|
ssameerr/tempest
|
e413f28661c2aab3f8da8d005db1fa5c59cc6b68
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/compute/volumes/test_volumes_list.py
|
ssameerr/tempest
|
e413f28661c2aab3f8da8d005db1fa5c59cc6b68
|
[
"Apache-2.0"
] | 2
|
2015-04-30T08:46:29.000Z
|
2020-03-01T17:05:23.000Z
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesTestJSON(base.BaseV2ComputeTest):
# NOTE: This test creates a number of 1G volumes. To run successfully,
# ensure that the backing file for the volume group that Nova uses
# has space for at least 3 1G volumes!
# If you are running a Devstack environment, ensure that the
# VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
@classmethod
def skip_checks(cls):
super(VolumesTestJSON, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(VolumesTestJSON, cls).setup_clients()
cls.client = cls.volumes_extensions_client
@classmethod
def resource_setup(cls):
super(VolumesTestJSON, cls).resource_setup()
# Create 3 Volumes
cls.volume_list = []
cls.volume_id_list = []
for i in range(3):
v_name = data_utils.rand_name('volume')
metadata = {'Type': 'work'}
try:
volume = cls.client.create_volume(size=CONF.volume.volume_size,
display_name=v_name,
metadata=metadata)['volume']
waiters.wait_for_volume_status(cls.client,
volume['id'], 'available')
volume = cls.client.show_volume(volume['id'])['volume']
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
except Exception:
if cls.volume_list:
# We could not create all the volumes, though we were able
# to create *some* of the volumes. This is typically
# because the backing file size of the volume group is
# too small. So, here, we clean up whatever we did manage
# to create and raise a SkipTest
for volume in cls.volume_list:
cls.delete_volume(volume['id'])
msg = ("Failed to create ALL necessary volumes to run "
"test. This typically means that the backing file "
"size of the nova-volumes group is too small to "
"create the 3 volumes needed by this test case")
raise cls.skipException(msg)
raise
@classmethod
def resource_cleanup(cls):
# Delete the created Volumes
for volume in cls.volume_list:
cls.delete_volume(volume['id'])
super(VolumesTestJSON, cls).resource_cleanup()
@test.idempotent_id('bc2dd1a0-15af-48e5-9990-f2e75a48325d')
def test_volume_list(self):
# Should return the list of Volumes
# Fetch all Volumes
fetched_list = self.client.list_volumes()['volumes']
# Now check if all the Volumes created in setup are in fetched list
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
self.assertFalse(missing_volumes,
"Failed to find volume %s in fetched list" %
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
@test.idempotent_id('bad0567a-5a4f-420b-851e-780b55bb867c')
def test_volume_list_with_details(self):
# Should return the list of Volumes with details
# Fetch all Volumes
fetched_list = self.client.list_volumes(detail=True)['volumes']
# Now check if all the Volumes created in setup are in fetched list
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
self.assertFalse(missing_volumes,
"Failed to find volume %s in fetched list" %
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
@test.idempotent_id('1048ed81-2baf-487a-b284-c0622b86e7b8')
def test_volume_list_param_limit(self):
# Return the list of volumes based on limit set
params = {'limit': 2}
fetched_vol_list = self.client.list_volumes(**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by limit set")
@test.idempotent_id('33985568-4965-49d5-9bcc-0aa007ca5b7a')
def test_volume_list_with_detail_param_limit(self):
# Return the list of volumes with details based on limit set.
params = {'limit': 2}
fetched_vol_list = self.client.list_volumes(detail=True,
**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by limit set")
@test.idempotent_id('51c22651-a074-4ea7-af0b-094f9331303e')
def test_volume_list_param_offset_and_limit(self):
# Return the list of volumes based on offset and limit set.
# get all volumes list
all_vol_list = self.client.list_volumes()['volumes']
params = {'offset': 1, 'limit': 1}
fetched_vol_list = self.client.list_volumes(**params)['volumes']
# Validating length of the fetched volumes
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by offset and limit")
# Validating offset of fetched volume
for index, volume in enumerate(fetched_vol_list):
self.assertEqual(volume['id'],
all_vol_list[index + params['offset']]['id'],
"Failed to list volumes by offset and limit")
@test.idempotent_id('06b6abc4-3f10-48e9-a7a1-3facc98f03e5')
def test_volume_list_with_detail_param_offset_and_limit(self):
# Return the list of volumes details based on offset and limit set.
# get all volumes list
all_vol_list = self.client.list_volumes(detail=True)['volumes']
params = {'offset': 1, 'limit': 1}
fetched_vol_list = self.client.list_volumes(detail=True,
**params)['volumes']
# Validating length of the fetched volumes
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by offset and limit")
# Validating offset of fetched volume
for index, volume in enumerate(fetched_vol_list):
self.assertEqual(volume['id'],
all_vol_list[index + params['offset']]['id'],
"Failed to list volume details by "
"offset and limit")
| 45.698225
| 79
| 0.604299
|
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesTestJSON(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(VolumesTestJSON, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(VolumesTestJSON, cls).setup_clients()
cls.client = cls.volumes_extensions_client
@classmethod
def resource_setup(cls):
super(VolumesTestJSON, cls).resource_setup()
cls.volume_list = []
cls.volume_id_list = []
for i in range(3):
v_name = data_utils.rand_name('volume')
metadata = {'Type': 'work'}
try:
volume = cls.client.create_volume(size=CONF.volume.volume_size,
display_name=v_name,
metadata=metadata)['volume']
waiters.wait_for_volume_status(cls.client,
volume['id'], 'available')
volume = cls.client.show_volume(volume['id'])['volume']
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
except Exception:
if cls.volume_list:
for volume in cls.volume_list:
cls.delete_volume(volume['id'])
msg = ("Failed to create ALL necessary volumes to run "
"test. This typically means that the backing file "
"size of the nova-volumes group is too small to "
"create the 3 volumes needed by this test case")
raise cls.skipException(msg)
raise
@classmethod
def resource_cleanup(cls):
for volume in cls.volume_list:
cls.delete_volume(volume['id'])
super(VolumesTestJSON, cls).resource_cleanup()
@test.idempotent_id('bc2dd1a0-15af-48e5-9990-f2e75a48325d')
def test_volume_list(self):
fetched_list = self.client.list_volumes()['volumes']
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
self.assertFalse(missing_volumes,
"Failed to find volume %s in fetched list" %
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
@test.idempotent_id('bad0567a-5a4f-420b-851e-780b55bb867c')
def test_volume_list_with_details(self):
fetched_list = self.client.list_volumes(detail=True)['volumes']
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
self.assertFalse(missing_volumes,
"Failed to find volume %s in fetched list" %
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
@test.idempotent_id('1048ed81-2baf-487a-b284-c0622b86e7b8')
def test_volume_list_param_limit(self):
params = {'limit': 2}
fetched_vol_list = self.client.list_volumes(**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by limit set")
@test.idempotent_id('33985568-4965-49d5-9bcc-0aa007ca5b7a')
def test_volume_list_with_detail_param_limit(self):
params = {'limit': 2}
fetched_vol_list = self.client.list_volumes(detail=True,
**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by limit set")
@test.idempotent_id('51c22651-a074-4ea7-af0b-094f9331303e')
def test_volume_list_param_offset_and_limit(self):
all_vol_list = self.client.list_volumes()['volumes']
params = {'offset': 1, 'limit': 1}
fetched_vol_list = self.client.list_volumes(**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by offset and limit")
for index, volume in enumerate(fetched_vol_list):
self.assertEqual(volume['id'],
all_vol_list[index + params['offset']]['id'],
"Failed to list volumes by offset and limit")
@test.idempotent_id('06b6abc4-3f10-48e9-a7a1-3facc98f03e5')
def test_volume_list_with_detail_param_offset_and_limit(self):
all_vol_list = self.client.list_volumes(detail=True)['volumes']
params = {'offset': 1, 'limit': 1}
fetched_vol_list = self.client.list_volumes(detail=True,
**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by offset and limit")
for index, volume in enumerate(fetched_vol_list):
self.assertEqual(volume['id'],
all_vol_list[index + params['offset']]['id'],
"Failed to list volume details by "
"offset and limit")
| true
| true
|
f709ca6f7784c7430429a65498dd7dcee60b9751
| 468
|
py
|
Python
|
src/providers/__init__.py
|
abdellatifLabr/social-media-stocks-tracker
|
b54f1db488d8b26e292ec025d1af7f8d4b5a94da
|
[
"MIT"
] | null | null | null |
src/providers/__init__.py
|
abdellatifLabr/social-media-stocks-tracker
|
b54f1db488d8b26e292ec025d1af7f8d4b5a94da
|
[
"MIT"
] | null | null | null |
src/providers/__init__.py
|
abdellatifLabr/social-media-stocks-tracker
|
b54f1db488d8b26e292ec025d1af7f8d4b5a94da
|
[
"MIT"
] | null | null | null |
import os
from importlib import import_module
def get_providers():
for provider_file in os.listdir(os.path.dirname(os.path.abspath(__file__))):
if provider_file[0] != '$':
continue
provider = provider_file.replace('.py', '')
yield import_module(f'{__package__}.{provider}')
def get_prodvider(name, *args, **kwargs):
provider_module = import_module(f'{__name__}.${name}')
return provider_module.run(*args, **kwargs)
| 27.529412
| 80
| 0.67094
|
import os
from importlib import import_module
def get_providers():
for provider_file in os.listdir(os.path.dirname(os.path.abspath(__file__))):
if provider_file[0] != '$':
continue
provider = provider_file.replace('.py', '')
yield import_module(f'{__package__}.{provider}')
def get_prodvider(name, *args, **kwargs):
provider_module = import_module(f'{__name__}.${name}')
return provider_module.run(*args, **kwargs)
| true
| true
|
f709cc9fb30c63d9a152a36daa62b63547568b39
| 6,798
|
py
|
Python
|
userbot/modules/updater.py
|
badwordking/import
|
612b6c386a23a925fa44384c93f8fecc7160bee4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/updater.py
|
badwordking/import
|
612b6c386a23a925fa44384c93f8fecc7160bee4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/updater.py
|
badwordking/import
|
612b6c386a23a925fa44384c93f8fecc7160bee4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
"""
This module updates the userbot based on Upstream revision
"""
from os import remove, execle, path, makedirs, getenv
from shutil import rmtree
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from userbot import CMD_HELP, bot, HEROKU_APIKEY, HEROKU_APPNAME, UPSTREAM_REPO_URL
from userbot.events import register
requirements_path = path.join(
path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt')
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} <{c.author}>\n'
return ch_log
async def update_requirements():
reqs = str(requirements_path)
try:
process = await asyncio.create_subprocess_shell(
' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
await process.communicate()
return process.returncode
except Exception as e:
return repr(e)
@register(outgoing=True, pattern="^.update(?: |$)(.*)")
async def upstream(ups):
"For .update command, check if the bot is up to date, update if specified"
await ups.edit("`Checking for updates, please wait....`")
conf = ups.pattern_match.group(1)
off_repo = UPSTREAM_REPO_URL
force_update = False
try:
txt = "`Oops.. Updater cannot continue due to "
txt += "some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await ups.edit(f'{txt}\n`directory {error} is not found`')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\n`Early failure! {error}`')
repo.__del__()
return
except InvalidGitRepositoryError as error:
if conf != "now":
await ups.edit(
f"`Unfortunately, the directory {error} does not seem to be a git repository.\
\nBut we can fix that by force updating the userbot using .update now.`"
)
return
repo = Repo.init()
origin = repo.create_remote('upstream', off_repo)
origin.fetch()
force_update = True
repo.create_head('master', origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != 'master':
await ups.edit(
f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). '
'in that case, Updater is unable to identify '
'which branch is to be merged. '
'please checkout to any official branch`')
repo.__del__()
return
try:
repo.create_remote('upstream', off_repo)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog and not force_update:
await ups.edit(
f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n')
repo.__del__()
return
if conf != "now" and not force_update:
changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit("`Changelog is too big, view the file to see it.`")
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond('`do \".update now\" to update`')
return
if force_update:
await ups.edit(
'`Force-Syncing to latest stable userbot code, please wait...`')
else:
await ups.edit('`Updating userbot, please wait....`')
# We're in a Heroku Dyno, handle it's memez.
if HEROKU_APIKEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_APIKEY)
heroku_app = None
heroku_applications = heroku.apps()
if not HEROKU_APPNAME:
await ups.edit(
'`[HEROKU MEMEZ] Please set up the HEROKU_APPNAME variable to be able to update userbot.`'
)
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APPNAME:
heroku_app = app
break
if heroku_app is None:
await ups.edit(
f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`'
)
repo.__del__()
return
await ups.edit('`[HEROKU MEMEZ]\
\nUserbot dyno build in progress, please wait for it to complete.`'
)
ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_APIKEY + "@")
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
try:
remote.push(refspec="HEAD:refs/heads/master", force=True)
except GitCommandError as error:
await ups.edit(f'{txt}\n`Here is the error log:\n{error}`')
repo.__del__()
return
await ups.edit('`Successfully Updated!\n'
'Restarting, please wait...`')
else:
# Classic Updater, pretty straightforward.
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
reqs_upgrade = await update_requirements()
await ups.edit('`Successfully Updated!\n'
'Bot is restarting... Wait for a second!`')
# Spin a new instance of bot
args = [sys.executable, "-m", "userbot"]
execle(sys.executable, *args, os.environ)
return
CMD_HELP.update({
'update':
".update\
\nUsage: Checks if the main userbot repository has any updates and shows a changelog if so.\
\n\n.update now\
\nUsage: Updates your userbot, if there are any updates in the main userbot repository."
})
| 35.041237
| 106
| 0.598117
|
from os import remove, execle, path, makedirs, getenv
from shutil import rmtree
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from userbot import CMD_HELP, bot, HEROKU_APIKEY, HEROKU_APPNAME, UPSTREAM_REPO_URL
from userbot.events import register
requirements_path = path.join(
path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt')
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} <{c.author}>\n'
return ch_log
async def update_requirements():
reqs = str(requirements_path)
try:
process = await asyncio.create_subprocess_shell(
' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
await process.communicate()
return process.returncode
except Exception as e:
return repr(e)
@register(outgoing=True, pattern="^.update(?: |$)(.*)")
async def upstream(ups):
await ups.edit("`Checking for updates, please wait....`")
conf = ups.pattern_match.group(1)
off_repo = UPSTREAM_REPO_URL
force_update = False
try:
txt = "`Oops.. Updater cannot continue due to "
txt += "some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await ups.edit(f'{txt}\n`directory {error} is not found`')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\n`Early failure! {error}`')
repo.__del__()
return
except InvalidGitRepositoryError as error:
if conf != "now":
await ups.edit(
f"`Unfortunately, the directory {error} does not seem to be a git repository.\
\nBut we can fix that by force updating the userbot using .update now.`"
)
return
repo = Repo.init()
origin = repo.create_remote('upstream', off_repo)
origin.fetch()
force_update = True
repo.create_head('master', origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != 'master':
await ups.edit(
f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). '
'in that case, Updater is unable to identify '
'which branch is to be merged. '
'please checkout to any official branch`')
repo.__del__()
return
try:
repo.create_remote('upstream', off_repo)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog and not force_update:
await ups.edit(
f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n')
repo.__del__()
return
if conf != "now" and not force_update:
changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit("`Changelog is too big, view the file to see it.`")
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond('`do \".update now\" to update`')
return
if force_update:
await ups.edit(
'`Force-Syncing to latest stable userbot code, please wait...`')
else:
await ups.edit('`Updating userbot, please wait....`')
if HEROKU_APIKEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_APIKEY)
heroku_app = None
heroku_applications = heroku.apps()
if not HEROKU_APPNAME:
await ups.edit(
'`[HEROKU MEMEZ] Please set up the HEROKU_APPNAME variable to be able to update userbot.`'
)
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APPNAME:
heroku_app = app
break
if heroku_app is None:
await ups.edit(
f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`'
)
repo.__del__()
return
await ups.edit('`[HEROKU MEMEZ]\
\nUserbot dyno build in progress, please wait for it to complete.`'
)
ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_APIKEY + "@")
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
try:
remote.push(refspec="HEAD:refs/heads/master", force=True)
except GitCommandError as error:
await ups.edit(f'{txt}\n`Here is the error log:\n{error}`')
repo.__del__()
return
await ups.edit('`Successfully Updated!\n'
'Restarting, please wait...`')
else:
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
reqs_upgrade = await update_requirements()
await ups.edit('`Successfully Updated!\n'
'Bot is restarting... Wait for a second!`')
args = [sys.executable, "-m", "userbot"]
execle(sys.executable, *args, os.environ)
return
CMD_HELP.update({
'update':
".update\
\nUsage: Checks if the main userbot repository has any updates and shows a changelog if so.\
\n\n.update now\
\nUsage: Updates your userbot, if there are any updates in the main userbot repository."
})
| true
| true
|
f709cf5255d5c1ff9141ddd1b67349a4b9fdd3fb
| 8,536
|
pyw
|
Python
|
venv/Lib/site-packages/PyQt4/examples/widgets/imageviewer.pyw
|
prateekfxtd/ns_Startup
|
095a62b3a8c7bf0ff7b767355d57d993bbd2423d
|
[
"MIT"
] | 1
|
2022-03-16T02:10:30.000Z
|
2022-03-16T02:10:30.000Z
|
venv/Lib/site-packages/PyQt4/examples/widgets/imageviewer.pyw
|
prateekfxtd/ns_Startup
|
095a62b3a8c7bf0ff7b767355d57d993bbd2423d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/PyQt4/examples/widgets/imageviewer.pyw
|
prateekfxtd/ns_Startup
|
095a62b3a8c7bf0ff7b767355d57d993bbd2423d
|
[
"MIT"
] | 2
|
2019-05-28T11:58:59.000Z
|
2020-09-23T17:21:19.000Z
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtCore, QtGui
class ImageViewer(QtGui.QMainWindow):
def __init__(self):
super(ImageViewer, self).__init__()
self.printer = QtGui.QPrinter()
self.scaleFactor = 0.0
self.imageLabel = QtGui.QLabel()
self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)
self.imageLabel.setSizePolicy(QtGui.QSizePolicy.Ignored,
QtGui.QSizePolicy.Ignored)
self.imageLabel.setScaledContents(True)
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setBackgroundRole(QtGui.QPalette.Dark)
self.scrollArea.setWidget(self.imageLabel)
self.setCentralWidget(self.scrollArea)
self.createActions()
self.createMenus()
self.setWindowTitle("Image Viewer")
self.resize(500, 400)
def open(self):
fileName = QtGui.QFileDialog.getOpenFileName(self, "Open File",
QtCore.QDir.currentPath())
if fileName:
image = QtGui.QImage(fileName)
if image.isNull():
QtGui.QMessageBox.information(self, "Image Viewer",
"Cannot load %s." % fileName)
return
self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
self.scaleFactor = 1.0
self.printAct.setEnabled(True)
self.fitToWindowAct.setEnabled(True)
self.updateActions()
if not self.fitToWindowAct.isChecked():
self.imageLabel.adjustSize()
def print_(self):
dialog = QtGui.QPrintDialog(self.printer, self)
if dialog.exec_():
painter = QtGui.QPainter(self.printer)
rect = painter.viewport()
size = self.imageLabel.pixmap().size()
size.scale(rect.size(), QtCore.Qt.KeepAspectRatio)
painter.setViewport(rect.x(), rect.y(), size.width(), size.height())
painter.setWindow(self.imageLabel.pixmap().rect())
painter.drawPixmap(0, 0, self.imageLabel.pixmap())
def zoomIn(self):
self.scaleImage(1.25)
def zoomOut(self):
self.scaleImage(0.8)
def normalSize(self):
self.imageLabel.adjustSize()
self.scaleFactor = 1.0
def fitToWindow(self):
fitToWindow = self.fitToWindowAct.isChecked()
self.scrollArea.setWidgetResizable(fitToWindow)
if not fitToWindow:
self.normalSize()
self.updateActions()
def about(self):
QtGui.QMessageBox.about(self, "About Image Viewer",
"<p>The <b>Image Viewer</b> example shows how to combine "
"QLabel and QScrollArea to display an image. QLabel is "
"typically used for displaying text, but it can also display "
"an image. QScrollArea provides a scrolling view around "
"another widget. If the child widget exceeds the size of the "
"frame, QScrollArea automatically provides scroll bars.</p>"
"<p>The example demonstrates how QLabel's ability to scale "
"its contents (QLabel.scaledContents), and QScrollArea's "
"ability to automatically resize its contents "
"(QScrollArea.widgetResizable), can be used to implement "
"zooming and scaling features.</p>"
"<p>In addition the example shows how to use QPainter to "
"print an image.</p>")
def createActions(self):
self.openAct = QtGui.QAction("&Open...", self, shortcut="Ctrl+O",
triggered=self.open)
self.printAct = QtGui.QAction("&Print...", self, shortcut="Ctrl+P",
enabled=False, triggered=self.print_)
self.exitAct = QtGui.QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=self.close)
self.zoomInAct = QtGui.QAction("Zoom &In (25%)", self,
shortcut="Ctrl++", enabled=False, triggered=self.zoomIn)
self.zoomOutAct = QtGui.QAction("Zoom &Out (25%)", self,
shortcut="Ctrl+-", enabled=False, triggered=self.zoomOut)
self.normalSizeAct = QtGui.QAction("&Normal Size", self,
shortcut="Ctrl+S", enabled=False, triggered=self.normalSize)
self.fitToWindowAct = QtGui.QAction("&Fit to Window", self,
enabled=False, checkable=True, shortcut="Ctrl+F",
triggered=self.fitToWindow)
self.aboutAct = QtGui.QAction("&About", self, triggered=self.about)
self.aboutQtAct = QtGui.QAction("About &Qt", self,
triggered=QtGui.qApp.aboutQt)
def createMenus(self):
self.fileMenu = QtGui.QMenu("&File", self)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.viewMenu = QtGui.QMenu("&View", self)
self.viewMenu.addAction(self.zoomInAct)
self.viewMenu.addAction(self.zoomOutAct)
self.viewMenu.addAction(self.normalSizeAct)
self.viewMenu.addSeparator()
self.viewMenu.addAction(self.fitToWindowAct)
self.helpMenu = QtGui.QMenu("&Help", self)
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
self.menuBar().addMenu(self.fileMenu)
self.menuBar().addMenu(self.viewMenu)
self.menuBar().addMenu(self.helpMenu)
def updateActions(self):
self.zoomInAct.setEnabled(not self.fitToWindowAct.isChecked())
self.zoomOutAct.setEnabled(not self.fitToWindowAct.isChecked())
self.normalSizeAct.setEnabled(not self.fitToWindowAct.isChecked())
def scaleImage(self, factor):
self.scaleFactor *= factor
self.imageLabel.resize(self.scaleFactor * self.imageLabel.pixmap().size())
self.adjustScrollBar(self.scrollArea.horizontalScrollBar(), factor)
self.adjustScrollBar(self.scrollArea.verticalScrollBar(), factor)
self.zoomInAct.setEnabled(self.scaleFactor < 3.0)
self.zoomOutAct.setEnabled(self.scaleFactor > 0.333)
def adjustScrollBar(self, scrollBar, factor):
scrollBar.setValue(int(factor * scrollBar.value()
+ ((factor - 1) * scrollBar.pageStep()/2)))
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
imageViewer = ImageViewer()
imageViewer.show()
sys.exit(app.exec_())
| 39.702326
| 82
| 0.641167
|
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
from PyQt4 import QtCore, QtGui
class ImageViewer(QtGui.QMainWindow):
def __init__(self):
super(ImageViewer, self).__init__()
self.printer = QtGui.QPrinter()
self.scaleFactor = 0.0
self.imageLabel = QtGui.QLabel()
self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)
self.imageLabel.setSizePolicy(QtGui.QSizePolicy.Ignored,
QtGui.QSizePolicy.Ignored)
self.imageLabel.setScaledContents(True)
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setBackgroundRole(QtGui.QPalette.Dark)
self.scrollArea.setWidget(self.imageLabel)
self.setCentralWidget(self.scrollArea)
self.createActions()
self.createMenus()
self.setWindowTitle("Image Viewer")
self.resize(500, 400)
def open(self):
fileName = QtGui.QFileDialog.getOpenFileName(self, "Open File",
QtCore.QDir.currentPath())
if fileName:
image = QtGui.QImage(fileName)
if image.isNull():
QtGui.QMessageBox.information(self, "Image Viewer",
"Cannot load %s." % fileName)
return
self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
self.scaleFactor = 1.0
self.printAct.setEnabled(True)
self.fitToWindowAct.setEnabled(True)
self.updateActions()
if not self.fitToWindowAct.isChecked():
self.imageLabel.adjustSize()
def print_(self):
dialog = QtGui.QPrintDialog(self.printer, self)
if dialog.exec_():
painter = QtGui.QPainter(self.printer)
rect = painter.viewport()
size = self.imageLabel.pixmap().size()
size.scale(rect.size(), QtCore.Qt.KeepAspectRatio)
painter.setViewport(rect.x(), rect.y(), size.width(), size.height())
painter.setWindow(self.imageLabel.pixmap().rect())
painter.drawPixmap(0, 0, self.imageLabel.pixmap())
def zoomIn(self):
self.scaleImage(1.25)
def zoomOut(self):
self.scaleImage(0.8)
def normalSize(self):
self.imageLabel.adjustSize()
self.scaleFactor = 1.0
def fitToWindow(self):
fitToWindow = self.fitToWindowAct.isChecked()
self.scrollArea.setWidgetResizable(fitToWindow)
if not fitToWindow:
self.normalSize()
self.updateActions()
def about(self):
QtGui.QMessageBox.about(self, "About Image Viewer",
"<p>The <b>Image Viewer</b> example shows how to combine "
"QLabel and QScrollArea to display an image. QLabel is "
"typically used for displaying text, but it can also display "
"an image. QScrollArea provides a scrolling view around "
"another widget. If the child widget exceeds the size of the "
"frame, QScrollArea automatically provides scroll bars.</p>"
"<p>The example demonstrates how QLabel's ability to scale "
"its contents (QLabel.scaledContents), and QScrollArea's "
"ability to automatically resize its contents "
"(QScrollArea.widgetResizable), can be used to implement "
"zooming and scaling features.</p>"
"<p>In addition the example shows how to use QPainter to "
"print an image.</p>")
def createActions(self):
self.openAct = QtGui.QAction("&Open...", self, shortcut="Ctrl+O",
triggered=self.open)
self.printAct = QtGui.QAction("&Print...", self, shortcut="Ctrl+P",
enabled=False, triggered=self.print_)
self.exitAct = QtGui.QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=self.close)
self.zoomInAct = QtGui.QAction("Zoom &In (25%)", self,
shortcut="Ctrl++", enabled=False, triggered=self.zoomIn)
self.zoomOutAct = QtGui.QAction("Zoom &Out (25%)", self,
shortcut="Ctrl+-", enabled=False, triggered=self.zoomOut)
self.normalSizeAct = QtGui.QAction("&Normal Size", self,
shortcut="Ctrl+S", enabled=False, triggered=self.normalSize)
self.fitToWindowAct = QtGui.QAction("&Fit to Window", self,
enabled=False, checkable=True, shortcut="Ctrl+F",
triggered=self.fitToWindow)
self.aboutAct = QtGui.QAction("&About", self, triggered=self.about)
self.aboutQtAct = QtGui.QAction("About &Qt", self,
triggered=QtGui.qApp.aboutQt)
def createMenus(self):
self.fileMenu = QtGui.QMenu("&File", self)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.printAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.viewMenu = QtGui.QMenu("&View", self)
self.viewMenu.addAction(self.zoomInAct)
self.viewMenu.addAction(self.zoomOutAct)
self.viewMenu.addAction(self.normalSizeAct)
self.viewMenu.addSeparator()
self.viewMenu.addAction(self.fitToWindowAct)
self.helpMenu = QtGui.QMenu("&Help", self)
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
self.menuBar().addMenu(self.fileMenu)
self.menuBar().addMenu(self.viewMenu)
self.menuBar().addMenu(self.helpMenu)
def updateActions(self):
self.zoomInAct.setEnabled(not self.fitToWindowAct.isChecked())
self.zoomOutAct.setEnabled(not self.fitToWindowAct.isChecked())
self.normalSizeAct.setEnabled(not self.fitToWindowAct.isChecked())
def scaleImage(self, factor):
self.scaleFactor *= factor
self.imageLabel.resize(self.scaleFactor * self.imageLabel.pixmap().size())
self.adjustScrollBar(self.scrollArea.horizontalScrollBar(), factor)
self.adjustScrollBar(self.scrollArea.verticalScrollBar(), factor)
self.zoomInAct.setEnabled(self.scaleFactor < 3.0)
self.zoomOutAct.setEnabled(self.scaleFactor > 0.333)
def adjustScrollBar(self, scrollBar, factor):
scrollBar.setValue(int(factor * scrollBar.value()
+ ((factor - 1) * scrollBar.pageStep()/2)))
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
imageViewer = ImageViewer()
imageViewer.show()
sys.exit(app.exec_())
| true
| true
|
f709cf7002dd1d680b4a2838c2421fcebabdf110
| 7,746
|
py
|
Python
|
hackathon/annotation_compare_viz.py
|
cns-iu/ccf-research
|
e029c8985a249c1caec925e95f5286c505c706ea
|
[
"MIT"
] | 1
|
2020-09-09T13:45:44.000Z
|
2020-09-09T13:45:44.000Z
|
hackathon/annotation_compare_viz.py
|
cns-iu/ccf-research
|
e029c8985a249c1caec925e95f5286c505c706ea
|
[
"MIT"
] | null | null | null |
hackathon/annotation_compare_viz.py
|
cns-iu/ccf-research
|
e029c8985a249c1caec925e95f5286c505c706ea
|
[
"MIT"
] | 4
|
2020-08-14T19:31:56.000Z
|
2021-09-07T04:11:45.000Z
|
import json
from re import split
import shutil
import os
import sys
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from skimage import io
from shapely.geometry import Polygon
Image.MAX_IMAGE_PIXELS = None
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
else:
shutil.rmtree(path)
os.makedirs(path)
def dice(a, b):
return 2 * a.intersection(b).area / (a.area + b.area)
def recall(a, b):
return a.intersection(b).area / b.area
def precision(a, b):
return a.intersection(b).area / a.area
def find_diff(dice_thred=0.5, draw_preview=True, log_score=True):
# A - new json
with open(file_A_path) as data_file:
data = json.load(data_file)
average_area = sum(
[Polygon(item["geometry"]["coordinates"][0]).area for item in data]
) / len(data)
area_threshold = average_area / 50
print("average area size: ", average_area)
print("size threshold: ", area_threshold)
coor_list_a = []
for item in data:
coor = item["geometry"]["coordinates"]
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_a.extend(item["geometry"]["coordinates"])
else:
print("A ignore", poly.area)
A_x_list = [[xy[0] for xy in coor] for coor in coor_list_a]
A_y_list = [[xy[1] for xy in coor] for coor in coor_list_a]
A_id_list = [i for i in range(len(coor_list_a))]
# B - old json
with open(file_B_path) as data_file:
data = json.load(data_file)
coor_list_b = []
for item in data:
coor = item["geometry"]["coordinates"]
coor = [
[[xy[1], xy[0]] for xy in coor[0]]
] # for some json. Comment this line if needed
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_b.extend(coor)
else:
print("B ignore", poly.area)
B_x_list = [[xy[0] for xy in coor] for coor in coor_list_b]
B_y_list = [[xy[1] for xy in coor] for coor in coor_list_b]
# find difference
center_list_new = []
for i in range(len(A_x_list)):
mean_x = (sum(A_x_list[i]) - A_x_list[i][-1]) / (len(A_x_list[i]) - 1)
mean_y = (sum(A_y_list[i]) - A_y_list[i][-1]) / (len(A_y_list[i]) - 1)
center_list_new.append((mean_x, mean_y))
center_list_old = []
for i in range(len(B_x_list)):
mean_x = (sum(B_x_list[i]) - B_x_list[i][-1]) / (len(B_x_list[i]) - 1)
mean_y = (sum(B_y_list[i]) - B_y_list[i][-1]) / (len(B_y_list[i]) - 1)
center_list_old.append((mean_x, mean_y))
new_added_list = []
new_added_f1_list = []
new_same_list = []
new_revised_list = []
f1_list = []
positon_threshold = 500
dice_threshold = dice_thred
ignore_count = 0
for i in A_id_list:
x, y = center_list_new[i]
new_p = Polygon(coor_list_a[i])
min_f1 = 0
min_j = -1
_recall, _precision = -1, -1
for j in range(len(center_list_old)):
_x, _y = center_list_old[j]
old_p = Polygon(coor_list_b[j])
if (x - _x) ** 2 + (y - _y) ** 2 <= positon_threshold ** 2:
f1 = dice(new_p, old_p)
if f1 > min_f1:
min_f1 = f1
min_j = j
_recall = recall(new_p, old_p)
_precision = precision(new_p, old_p)
if min_f1 >= 0.999:
_flag = f"Same\t{min_f1}"
new_same_list.append(i)
elif min_f1 >= dice_threshold:
_flag = f"Revised\t{min_f1}"
new_revised_list.append(i)
f1_list.append((min_f1, _recall, _precision))
else:
_flag = f"Added\t{min_f1}"
new_added_list.append(i)
new_added_f1_list.append(min_f1)
# print(min_f1)
if _flag.startswith("Same") or _flag.startswith("Revised"):
if min_j != -1:
coor_list_b.pop(min_j)
center_list_old.pop(min_j)
# print(i, _flag)
removed_count = len(center_list_old)
print(f"A\tB\tsame\tmatch\tadded\tdeleted")
print(
f"{len(A_x_list)}\t{len(B_x_list)}\t{len(new_same_list)}\t{len(new_revised_list)}"
f"\t{len(new_added_list)}\t{removed_count}"
)
print(f"[FP: {len(new_added_list)}/{len(A_x_list)}]")
print(f"[FN: {removed_count}/{len(B_x_list)}]")
# print(f"{len(new_same_list)} same")
# print(f"{len(new_revised_list)} revised")
# print(f"{len(new_added_list)} added")
# print(f"{removed_count} deleted")
# draw visualization
if draw_preview:
ref_image = io.imread(image_ref_path)
background = np.zeros(shape=ref_image.shape, dtype=np.uint8)
img = Image.fromarray(background, "L")
img = img.convert("RGB")
font_path = r"c:\windows\fonts\bahnschrift.ttf"
font = ImageFont.truetype(font_path, size=48)
title_font = ImageFont.truetype(font_path, size=72)
ImageDraw.Draw(img).text(
(100, 400),
text=f"DICE Threshold = {dice_thred}",
font=title_font,
fill="white",
)
ImageDraw.Draw(img).text(
(100, 480),
text=f"PREDICTION [FP: {len(new_added_list)}/{len(A_x_list)}]",
font=title_font,
fill="yellow",
)
ImageDraw.Draw(img).text(
(100, 560),
text=f"GROUND TRUTH [FN: {removed_count}/{len(B_x_list)}]",
font=title_font,
fill="red",
)
for i in new_added_list:
coor_tuple = [(xy[1], xy[0]) for xy in coor_list_a[i]]
# print(coor_tuple)
ImageDraw.Draw(img).line(coor_tuple, fill="yellow", width=6)
# text
f1 = new_added_f1_list[new_added_list.index(i)]
if f1 > 0:
text = "{:.3f}".format(f1) # + f",{Polygon(coor_list_a[i]).area}"
ImageDraw.Draw(img).text(
(center_list_new[i][1] - 40, center_list_new[i][0] + 60),
text,
font=font,
)
for coor_b in coor_list_b:
coor_tuple = [(xy[1], xy[0]) for xy in coor_b]
# print(coor_tuple)
ImageDraw.Draw(img).line(coor_tuple, fill="red", width=6)
# text = f",{Polygon(coor_b).area}"
# ImageDraw.Draw(img).text(
# (coor_tuple[0][0], coor_tuple[0][1]),
# text,
# font=font,
# )
img = np.array(img).astype("uint8")
output_path = image_ref_path.replace(
".png", f'_{str(dice_thred).replace(".","_")}.png'
)
io.imsave(output_path, img)
print(f"Image saved to {output_path}")
# write score
if log_score:
txt_path = file_A_path.replace("json", "txt")
with open(txt_path, "w") as f:
for item in f1_list:
f.write(f"{item[0]},{item[1]},{item[2]}\n")
if __name__ == "__main__":
file_A_path = (
r"C:\Users\yiju\Desktop\Copy\Scripts\masks\1-tom-new-kidney\pred_00a67c839.json"
)
file_B_path = r"C:\Users\yiju\Desktop\Copy\Data\hubmap-kidney-segmentation\test\00a67c839.json"
if len(sys.argv) >= 3:
file_A_path = sys.argv[1]
file_B_path = sys.argv[2]
image_ref_path = file_A_path.replace("json", "png")
A_name = file_A_path.split("\\")[-1].split(".")[0]
B_name = file_B_path.split("\\")[-1].split(".")[0]
print("A: ", A_name)
print("B: ", B_name)
for d in [0.5]: # [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
find_diff(dice_thred=d, draw_preview=True, log_score=True)
| 33.102564
| 99
| 0.560676
|
import json
from re import split
import shutil
import os
import sys
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from skimage import io
from shapely.geometry import Polygon
Image.MAX_IMAGE_PIXELS = None
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
else:
shutil.rmtree(path)
os.makedirs(path)
def dice(a, b):
return 2 * a.intersection(b).area / (a.area + b.area)
def recall(a, b):
return a.intersection(b).area / b.area
def precision(a, b):
return a.intersection(b).area / a.area
def find_diff(dice_thred=0.5, draw_preview=True, log_score=True):
with open(file_A_path) as data_file:
data = json.load(data_file)
average_area = sum(
[Polygon(item["geometry"]["coordinates"][0]).area for item in data]
) / len(data)
area_threshold = average_area / 50
print("average area size: ", average_area)
print("size threshold: ", area_threshold)
coor_list_a = []
for item in data:
coor = item["geometry"]["coordinates"]
poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_a.extend(item["geometry"]["coordinates"])
else:
print("A ignore", poly.area)
A_x_list = [[xy[0] for xy in coor] for coor in coor_list_a]
A_y_list = [[xy[1] for xy in coor] for coor in coor_list_a]
A_id_list = [i for i in range(len(coor_list_a))]
with open(file_B_path) as data_file:
data = json.load(data_file)
coor_list_b = []
for item in data:
coor = item["geometry"]["coordinates"]
coor = [
[[xy[1], xy[0]] for xy in coor[0]]
] poly = Polygon(coor[0])
if poly.area > area_threshold:
coor_list_b.extend(coor)
else:
print("B ignore", poly.area)
B_x_list = [[xy[0] for xy in coor] for coor in coor_list_b]
B_y_list = [[xy[1] for xy in coor] for coor in coor_list_b]
center_list_new = []
for i in range(len(A_x_list)):
mean_x = (sum(A_x_list[i]) - A_x_list[i][-1]) / (len(A_x_list[i]) - 1)
mean_y = (sum(A_y_list[i]) - A_y_list[i][-1]) / (len(A_y_list[i]) - 1)
center_list_new.append((mean_x, mean_y))
center_list_old = []
for i in range(len(B_x_list)):
mean_x = (sum(B_x_list[i]) - B_x_list[i][-1]) / (len(B_x_list[i]) - 1)
mean_y = (sum(B_y_list[i]) - B_y_list[i][-1]) / (len(B_y_list[i]) - 1)
center_list_old.append((mean_x, mean_y))
new_added_list = []
new_added_f1_list = []
new_same_list = []
new_revised_list = []
f1_list = []
positon_threshold = 500
dice_threshold = dice_thred
ignore_count = 0
for i in A_id_list:
x, y = center_list_new[i]
new_p = Polygon(coor_list_a[i])
min_f1 = 0
min_j = -1
_recall, _precision = -1, -1
for j in range(len(center_list_old)):
_x, _y = center_list_old[j]
old_p = Polygon(coor_list_b[j])
if (x - _x) ** 2 + (y - _y) ** 2 <= positon_threshold ** 2:
f1 = dice(new_p, old_p)
if f1 > min_f1:
min_f1 = f1
min_j = j
_recall = recall(new_p, old_p)
_precision = precision(new_p, old_p)
if min_f1 >= 0.999:
_flag = f"Same\t{min_f1}"
new_same_list.append(i)
elif min_f1 >= dice_threshold:
_flag = f"Revised\t{min_f1}"
new_revised_list.append(i)
f1_list.append((min_f1, _recall, _precision))
else:
_flag = f"Added\t{min_f1}"
new_added_list.append(i)
new_added_f1_list.append(min_f1)
if _flag.startswith("Same") or _flag.startswith("Revised"):
if min_j != -1:
coor_list_b.pop(min_j)
center_list_old.pop(min_j)
removed_count = len(center_list_old)
print(f"A\tB\tsame\tmatch\tadded\tdeleted")
print(
f"{len(A_x_list)}\t{len(B_x_list)}\t{len(new_same_list)}\t{len(new_revised_list)}"
f"\t{len(new_added_list)}\t{removed_count}"
)
print(f"[FP: {len(new_added_list)}/{len(A_x_list)}]")
print(f"[FN: {removed_count}/{len(B_x_list)}]")
if draw_preview:
ref_image = io.imread(image_ref_path)
background = np.zeros(shape=ref_image.shape, dtype=np.uint8)
img = Image.fromarray(background, "L")
img = img.convert("RGB")
font_path = r"c:\windows\fonts\bahnschrift.ttf"
font = ImageFont.truetype(font_path, size=48)
title_font = ImageFont.truetype(font_path, size=72)
ImageDraw.Draw(img).text(
(100, 400),
text=f"DICE Threshold = {dice_thred}",
font=title_font,
fill="white",
)
ImageDraw.Draw(img).text(
(100, 480),
text=f"PREDICTION [FP: {len(new_added_list)}/{len(A_x_list)}]",
font=title_font,
fill="yellow",
)
ImageDraw.Draw(img).text(
(100, 560),
text=f"GROUND TRUTH [FN: {removed_count}/{len(B_x_list)}]",
font=title_font,
fill="red",
)
for i in new_added_list:
coor_tuple = [(xy[1], xy[0]) for xy in coor_list_a[i]]
ImageDraw.Draw(img).line(coor_tuple, fill="yellow", width=6)
f1 = new_added_f1_list[new_added_list.index(i)]
if f1 > 0:
text = "{:.3f}".format(f1) ImageDraw.Draw(img).text(
(center_list_new[i][1] - 40, center_list_new[i][0] + 60),
text,
font=font,
)
for coor_b in coor_list_b:
coor_tuple = [(xy[1], xy[0]) for xy in coor_b]
ImageDraw.Draw(img).line(coor_tuple, fill="red", width=6)
img = np.array(img).astype("uint8")
output_path = image_ref_path.replace(
".png", f'_{str(dice_thred).replace(".","_")}.png'
)
io.imsave(output_path, img)
print(f"Image saved to {output_path}")
if log_score:
txt_path = file_A_path.replace("json", "txt")
with open(txt_path, "w") as f:
for item in f1_list:
f.write(f"{item[0]},{item[1]},{item[2]}\n")
if __name__ == "__main__":
file_A_path = (
r"C:\Users\yiju\Desktop\Copy\Scripts\masks\1-tom-new-kidney\pred_00a67c839.json"
)
file_B_path = r"C:\Users\yiju\Desktop\Copy\Data\hubmap-kidney-segmentation\test\00a67c839.json"
if len(sys.argv) >= 3:
file_A_path = sys.argv[1]
file_B_path = sys.argv[2]
image_ref_path = file_A_path.replace("json", "png")
A_name = file_A_path.split("\\")[-1].split(".")[0]
B_name = file_B_path.split("\\")[-1].split(".")[0]
print("A: ", A_name)
print("B: ", B_name)
for d in [0.5]: find_diff(dice_thred=d, draw_preview=True, log_score=True)
| true
| true
|
f709cf992b68de68927df65d68d163fcd1f06b58
| 3,861
|
py
|
Python
|
mmdet/models/relation_heads/imp_head.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 24
|
2021-10-14T03:28:28.000Z
|
2022-03-29T09:30:04.000Z
|
mmdet/models/relation_heads/imp_head.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 4
|
2021-12-14T15:04:49.000Z
|
2022-02-19T09:54:42.000Z
|
mmdet/models/relation_heads/imp_head.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 4
|
2021-10-31T11:23:06.000Z
|
2021-12-17T06:38:50.000Z
|
# ---------------------------------------------------------------
# imp_head.py
# Set-up time: 2020/5/21 下午11:22
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
from ..registry import HEADS
import torch
from .relation_head import RelationHead
from .approaches import IMPContext
from mmdet.core import bbox2roi
@HEADS.register_module
class IMPHead(RelationHead):
def __init__(self, **kwargs):
super(IMPHead, self).__init__(**kwargs)
self.context_layer = IMPContext(self.head_config, self.obj_classes, self.rel_classes)
def forward(self,
img,
img_meta,
det_result,
gt_result=None,
is_testing=False,
ignore_classes=None):
"""
Obtain the relation prediction results based on detection results.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_meta (list[dict]): list of image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
det_result: (Result): Result containing bbox, label, mask, point, rels,
etc. According to different mode, all the contents have been
set correctly. Feel free to use it.
gt_result : (Result): The ground truth information.
is_testing:
Returns:
det_result with the following newly added keys:
refine_scores (list[Tensor]): logits of object
rel_scores (list[Tensor]): logits of relation
rel_pair_idxes (list[Tensor]): (num_rel, 2) index of subject and object
relmaps (list[Tensor]): (num_obj, num_obj):
target_rel_labels (list[Tensor]): the target relation label.
"""
roi_feats, union_feats, det_result = self.frontend_features(img, img_meta, det_result, gt_result)
if roi_feats.shape[0] == 0:
return det_result
refine_obj_scores, rel_scores = self.context_layer(roi_feats, union_feats, det_result)
num_rels = [r.shape[0] for r in det_result.rel_pair_idxes]
num_objs = [len(b) for b in det_result.bboxes]
assert len(num_rels) == len(num_objs)
if self.use_bias:
obj_preds = refine_obj_scores.max(-1)[1]
obj_preds = obj_preds.split(num_objs, dim=0)
pair_preds = []
for pair_idx, obj_pred in zip(det_result.rel_pair_idxes, obj_preds):
pair_preds.append(torch.stack((obj_pred[pair_idx[:, 0]], obj_pred[pair_idx[:, 1]]), dim=1))
pair_pred = torch.cat(pair_preds, dim=0)
rel_scores = rel_scores + self.freq_bias.index_with_labels(pair_pred.long())
# make some changes: list to tensor or tensor to tuple
if self.training:
det_result.target_labels = torch.cat(det_result.target_labels, dim=-1)
det_result.target_rel_labels = torch.cat(det_result.target_rel_labels, dim=-1)
else:
refine_obj_scores = refine_obj_scores.split(num_objs, dim=0)
rel_scores = rel_scores.split(num_rels, dim=0)
det_result.refine_scores = refine_obj_scores
det_result.rel_scores = rel_scores
return det_result
| 43.382022
| 108
| 0.597255
|
from ..registry import HEADS
import torch
from .relation_head import RelationHead
from .approaches import IMPContext
from mmdet.core import bbox2roi
@HEADS.register_module
class IMPHead(RelationHead):
def __init__(self, **kwargs):
super(IMPHead, self).__init__(**kwargs)
self.context_layer = IMPContext(self.head_config, self.obj_classes, self.rel_classes)
def forward(self,
img,
img_meta,
det_result,
gt_result=None,
is_testing=False,
ignore_classes=None):
roi_feats, union_feats, det_result = self.frontend_features(img, img_meta, det_result, gt_result)
if roi_feats.shape[0] == 0:
return det_result
refine_obj_scores, rel_scores = self.context_layer(roi_feats, union_feats, det_result)
num_rels = [r.shape[0] for r in det_result.rel_pair_idxes]
num_objs = [len(b) for b in det_result.bboxes]
assert len(num_rels) == len(num_objs)
if self.use_bias:
obj_preds = refine_obj_scores.max(-1)[1]
obj_preds = obj_preds.split(num_objs, dim=0)
pair_preds = []
for pair_idx, obj_pred in zip(det_result.rel_pair_idxes, obj_preds):
pair_preds.append(torch.stack((obj_pred[pair_idx[:, 0]], obj_pred[pair_idx[:, 1]]), dim=1))
pair_pred = torch.cat(pair_preds, dim=0)
rel_scores = rel_scores + self.freq_bias.index_with_labels(pair_pred.long())
if self.training:
det_result.target_labels = torch.cat(det_result.target_labels, dim=-1)
det_result.target_rel_labels = torch.cat(det_result.target_rel_labels, dim=-1)
else:
refine_obj_scores = refine_obj_scores.split(num_objs, dim=0)
rel_scores = rel_scores.split(num_rels, dim=0)
det_result.refine_scores = refine_obj_scores
det_result.rel_scores = rel_scores
return det_result
| true
| true
|
f709d0352b5fa74dda5922641a0d9109bce49efe
| 423
|
py
|
Python
|
PlantEmissionController/PlantEmissionController/asgi.py
|
sania-dsouza/NRChallenge_Controller
|
d1dcb665aa7394ec49d89ecda3c49dc09b89f9d6
|
[
"MIT"
] | null | null | null |
PlantEmissionController/PlantEmissionController/asgi.py
|
sania-dsouza/NRChallenge_Controller
|
d1dcb665aa7394ec49d89ecda3c49dc09b89f9d6
|
[
"MIT"
] | null | null | null |
PlantEmissionController/PlantEmissionController/asgi.py
|
sania-dsouza/NRChallenge_Controller
|
d1dcb665aa7394ec49d89ecda3c49dc09b89f9d6
|
[
"MIT"
] | null | null | null |
"""
ASGI config for PlantEmissionController project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PlantEmissionController.settings')
application = get_asgi_application()
| 24.882353
| 83
| 0.801418
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PlantEmissionController.settings')
application = get_asgi_application()
| true
| true
|
f709d34b2d10d8875740a51747de847183d6995e
| 13,048
|
py
|
Python
|
src/models/ast_models.py
|
jvel07/ast
|
600e7cf952ec59ac9cc1bb3170d3da7578e1f384
|
[
"BSD-3-Clause"
] | null | null | null |
src/models/ast_models.py
|
jvel07/ast
|
600e7cf952ec59ac9cc1bb3170d3da7578e1f384
|
[
"BSD-3-Clause"
] | null | null | null |
src/models/ast_models.py
|
jvel07/ast
|
600e7cf952ec59ac9cc1bb3170d3da7578e1f384
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 6/10/21 5:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : yuangong@mit.edu
# @File : ast_models.py
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
import wget
os.environ['TORCH_HOME'] = '../../pretrained_models'
import timm
from timm.models.layers import to_2tuple, trunc_normal_
# override the timm package to relax the input shape constraint.
class PatchEmbed(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class ASTModel(nn.Module):
"""
The AST model.
:param label_dim: the label dimension, i.e., the number of total classes, it is 527 for AudioSet, 50 for ESC-50, and 35 for speechcommands v2-35
:param fstride: the stride of patch spliting on the frequency dimension, for 16*16 patchs, fstride=16 means no overlap, fstride=10 means overlap of 6
:param tstride: the stride of patch spliting on the time dimension, for 16*16 patchs, tstride=16 means no overlap, tstride=10 means overlap of 6
:param input_fdim: the number of frequency bins of the input spectrogram
:param input_tdim: the number of time frames of the input spectrogram
:param imagenet_pretrain: if use ImageNet pretrained model
:param audioset_pretrain: if use full AudioSet and ImageNet pretrained model
:param model_size: the model size of AST, should be in [tiny224, small224, base224, base384], base224 and base 384 are same model, but are trained differently during ImageNet pretraining.
"""
def __init__(self, label_dim=3, fstride=10, tstride=10, input_fdim=128, input_tdim=1024, imagenet_pretrain=True,
audioset_pretrain=True, model_size='base384', verbose=True):
super(ASTModel, self).__init__()
assert timm.__version__ == '0.4.5', 'Please use timm == 0.4.5, the code might not be compatible with newer versions.'
if verbose == True:
print('---------------AST Model Summary---------------')
print('ImageNet pretraining: {:s}, AudioSet pretraining: {:s}'.format(str(imagenet_pretrain),
str(audioset_pretrain)))
# override timm input shape restriction
timm.models.vision_transformer.PatchEmbed = PatchEmbed
# if AudioSet pretraining is not used (but ImageNet pretraining may still apply)
if audioset_pretrain == False:
if model_size == 'tiny224':
self.v = timm.create_model('vit_deit_tiny_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'small224':
self.v = timm.create_model('vit_deit_small_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'base224':
self.v = timm.create_model('vit_deit_base_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'base384':
self.v = timm.create_model('vit_deit_base_distilled_patch16_384', pretrained=imagenet_pretrain)
else:
raise Exception('Model size must be one of tiny224, small224, base224, base384.')
self.original_num_patches = self.v.patch_embed.num_patches
self.oringal_hw = int(self.original_num_patches ** 0.5)
self.original_embedding_dim = self.v.pos_embed.shape[2]
self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim),
nn.Linear(self.original_embedding_dim, label_dim))
# automatcially get the intermediate shape
f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)
num_patches = f_dim * t_dim
self.v.patch_embed.num_patches = num_patches
if verbose == True:
print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))
print('number of patches={:d}'.format(num_patches))
# the linear projection layer
new_proj = torch.nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))
if imagenet_pretrain == True:
new_proj.weight = torch.nn.Parameter(torch.sum(self.v.patch_embed.proj.weight, dim=1).unsqueeze(1))
new_proj.bias = self.v.patch_embed.proj.bias
self.v.patch_embed.proj = new_proj
# the positional embedding
if imagenet_pretrain == True:
# get the positional embedding from deit model, skip the first two tokens (cls token and distillation token), reshape it to original 2D shape (24*24).
new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, self.original_num_patches,
self.original_embedding_dim).transpose(1,
2).reshape(
1, self.original_embedding_dim, self.oringal_hw, self.oringal_hw)
# cut (from middle) or interpolate the second dimension of the positional embedding
if t_dim <= self.oringal_hw:
new_pos_embed = new_pos_embed[:, :, :,
int(self.oringal_hw / 2) - int(t_dim / 2): int(self.oringal_hw / 2) - int(
t_dim / 2) + t_dim]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(self.oringal_hw, t_dim),
mode='bilinear')
# cut (from middle) or interpolate the first dimension of the positional embedding
if f_dim <= self.oringal_hw:
new_pos_embed = new_pos_embed[:, :,
int(self.oringal_hw / 2) - int(f_dim / 2): int(self.oringal_hw / 2) - int(
f_dim / 2) + f_dim, :]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(f_dim, t_dim), mode='bilinear')
# flatten the positional embedding
new_pos_embed = new_pos_embed.reshape(1, self.original_embedding_dim, num_patches).transpose(1, 2)
# concatenate the above positional embedding with the cls token and distillation token of the deit model.
self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))
else:
# if not use imagenet pretrained model, just randomly initialize a learnable positional embedding
# TODO can use sinusoidal positional embedding instead
new_pos_embed = nn.Parameter(
torch.zeros(1, self.v.patch_embed.num_patches + 2, self.original_embedding_dim))
self.v.pos_embed = new_pos_embed
trunc_normal_(self.v.pos_embed, std=.02)
# now load a model that is pretrained on both ImageNet and AudioSet
elif audioset_pretrain == True:
if audioset_pretrain == True and imagenet_pretrain == False:
raise ValueError(
'currently model pretrained on only audioset is not supported, please set imagenet_pretrain = True to use audioset pretrained model.')
if model_size != 'base384':
raise ValueError('currently only has base384 AudioSet pretrained model.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if os.path.exists('../../pretrained_models/audioset_10_10_0.4593.pth') == False:
# this model performs 0.4593 mAP on the audioset eval set
audioset_mdl_url = 'https://www.dropbox.com/s/cv4knew8mvbrnvq/audioset_0.4593.pth?dl=1'
wget.download(audioset_mdl_url, out='../../pretrained_models/audioset_10_10_0.4593.pth')
sd = torch.load('../../pretrained_models/audioset_10_10_0.4593.pth', map_location=device)
# sd = torch.load('../../pretrained_models/ast_audioset.pth', map_location=device)
audio_model = ASTModel(label_dim=527, fstride=10, tstride=10, input_fdim=128, input_tdim=1024,
imagenet_pretrain=False, audioset_pretrain=False, model_size='base384',
verbose=False)
audio_model = torch.nn.DataParallel(audio_model)
print("***************USING=>", torch.cuda.current_device())
audio_model.load_state_dict(sd, strict=False)
self.v = audio_model.module.v
self.original_embedding_dim = self.v.pos_embed.shape[2]
self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim),
nn.Linear(self.original_embedding_dim, label_dim))
f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)
num_patches = f_dim * t_dim
self.v.patch_embed.num_patches = num_patches
if verbose == True:
print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))
print('number of patches={:d}'.format(num_patches))
new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, 1212, 768).transpose(1, 2).reshape(1, 768,
12, 101)
# if the input sequence length is larger than the original audioset (10s), then cut the positional embedding
if t_dim < 101:
new_pos_embed = new_pos_embed[:, :, :, 50 - int(t_dim / 2): 50 - int(t_dim / 2) + t_dim]
# otherwise interpolate
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(12, t_dim), mode='bilinear')
print("NEW POST EMBED:", new_pos_embed.shape)
new_pos_embed = new_pos_embed.reshape(1, 768, num_patches).transpose(1, 2)
print("NEW POST EMBED:", new_pos_embed.shape)
self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))
def get_shape(self, fstride, tstride, input_fdim=128, input_tdim=1024):
test_input = torch.randn(1, 1, input_fdim, input_tdim)
test_proj = nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))
test_out = test_proj(test_input)
f_dim = test_out.shape[2]
t_dim = test_out.shape[3]
return f_dim, t_dim
@autocast()
def forward(self, x):
"""
:param x: the input spectrogram, expected shape: (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
:return: prediction
"""
# expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
x = x.unsqueeze(1)
x = x.transpose(2, 3)
B = x.shape[0]
x = self.v.patch_embed(x)
cls_tokens = self.v.cls_token.expand(B, -1, -1)
dist_token = self.v.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
x = x + self.v.pos_embed
x = self.v.pos_drop(x)
for blk in self.v.blocks:
x = blk(x)
x = self.v.norm(x)
x = (x[:, 0] + x[:, 1]) / 2
# x = self.mlp_head(x)
return x
# if __name__ == '__main__':
# input_tdim = 100
# ast_mdl = ASTModel(input_tdim=input_tdim)
# # input a batch of 10 spectrogram, each with 100 time frames and 128 frequency bins
# test_input = torch.rand([10, input_tdim, 128])
# test_output = ast_mdl(test_input)
# # output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.
# print(test_output.shape)
#
# input_tdim = 512
# ast_mdl = ASTModel(input_tdim=input_tdim, label_dim=50, audioset_pretrain=True)
# # input a batch of 10 spectrogram, each with 512 time frames and 128 frequency bins
# test_input = torch.rand([10, input_tdim, 128])
# test_output = ast_mdl(test_input)
# # output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.
# print(test_output.shape)
| 56.978166
| 191
| 0.610132
|
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
import wget
os.environ['TORCH_HOME'] = '../../pretrained_models'
import timm
from timm.models.layers import to_2tuple, trunc_normal_
class PatchEmbed(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class ASTModel(nn.Module):
def __init__(self, label_dim=3, fstride=10, tstride=10, input_fdim=128, input_tdim=1024, imagenet_pretrain=True,
audioset_pretrain=True, model_size='base384', verbose=True):
super(ASTModel, self).__init__()
assert timm.__version__ == '0.4.5', 'Please use timm == 0.4.5, the code might not be compatible with newer versions.'
if verbose == True:
print('---------------AST Model Summary---------------')
print('ImageNet pretraining: {:s}, AudioSet pretraining: {:s}'.format(str(imagenet_pretrain),
str(audioset_pretrain)))
timm.models.vision_transformer.PatchEmbed = PatchEmbed
if audioset_pretrain == False:
if model_size == 'tiny224':
self.v = timm.create_model('vit_deit_tiny_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'small224':
self.v = timm.create_model('vit_deit_small_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'base224':
self.v = timm.create_model('vit_deit_base_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'base384':
self.v = timm.create_model('vit_deit_base_distilled_patch16_384', pretrained=imagenet_pretrain)
else:
raise Exception('Model size must be one of tiny224, small224, base224, base384.')
self.original_num_patches = self.v.patch_embed.num_patches
self.oringal_hw = int(self.original_num_patches ** 0.5)
self.original_embedding_dim = self.v.pos_embed.shape[2]
self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim),
nn.Linear(self.original_embedding_dim, label_dim))
f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)
num_patches = f_dim * t_dim
self.v.patch_embed.num_patches = num_patches
if verbose == True:
print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))
print('number of patches={:d}'.format(num_patches))
new_proj = torch.nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))
if imagenet_pretrain == True:
new_proj.weight = torch.nn.Parameter(torch.sum(self.v.patch_embed.proj.weight, dim=1).unsqueeze(1))
new_proj.bias = self.v.patch_embed.proj.bias
self.v.patch_embed.proj = new_proj
if imagenet_pretrain == True:
new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, self.original_num_patches,
self.original_embedding_dim).transpose(1,
2).reshape(
1, self.original_embedding_dim, self.oringal_hw, self.oringal_hw)
if t_dim <= self.oringal_hw:
new_pos_embed = new_pos_embed[:, :, :,
int(self.oringal_hw / 2) - int(t_dim / 2): int(self.oringal_hw / 2) - int(
t_dim / 2) + t_dim]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(self.oringal_hw, t_dim),
mode='bilinear')
if f_dim <= self.oringal_hw:
new_pos_embed = new_pos_embed[:, :,
int(self.oringal_hw / 2) - int(f_dim / 2): int(self.oringal_hw / 2) - int(
f_dim / 2) + f_dim, :]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(f_dim, t_dim), mode='bilinear')
new_pos_embed = new_pos_embed.reshape(1, self.original_embedding_dim, num_patches).transpose(1, 2)
self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))
else:
new_pos_embed = nn.Parameter(
torch.zeros(1, self.v.patch_embed.num_patches + 2, self.original_embedding_dim))
self.v.pos_embed = new_pos_embed
trunc_normal_(self.v.pos_embed, std=.02)
elif audioset_pretrain == True:
if audioset_pretrain == True and imagenet_pretrain == False:
raise ValueError(
'currently model pretrained on only audioset is not supported, please set imagenet_pretrain = True to use audioset pretrained model.')
if model_size != 'base384':
raise ValueError('currently only has base384 AudioSet pretrained model.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if os.path.exists('../../pretrained_models/audioset_10_10_0.4593.pth') == False:
audioset_mdl_url = 'https://www.dropbox.com/s/cv4knew8mvbrnvq/audioset_0.4593.pth?dl=1'
wget.download(audioset_mdl_url, out='../../pretrained_models/audioset_10_10_0.4593.pth')
sd = torch.load('../../pretrained_models/audioset_10_10_0.4593.pth', map_location=device)
audio_model = ASTModel(label_dim=527, fstride=10, tstride=10, input_fdim=128, input_tdim=1024,
imagenet_pretrain=False, audioset_pretrain=False, model_size='base384',
verbose=False)
audio_model = torch.nn.DataParallel(audio_model)
print("***************USING=>", torch.cuda.current_device())
audio_model.load_state_dict(sd, strict=False)
self.v = audio_model.module.v
self.original_embedding_dim = self.v.pos_embed.shape[2]
self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim),
nn.Linear(self.original_embedding_dim, label_dim))
f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)
num_patches = f_dim * t_dim
self.v.patch_embed.num_patches = num_patches
if verbose == True:
print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))
print('number of patches={:d}'.format(num_patches))
new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, 1212, 768).transpose(1, 2).reshape(1, 768,
12, 101)
if t_dim < 101:
new_pos_embed = new_pos_embed[:, :, :, 50 - int(t_dim / 2): 50 - int(t_dim / 2) + t_dim]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(12, t_dim), mode='bilinear')
print("NEW POST EMBED:", new_pos_embed.shape)
new_pos_embed = new_pos_embed.reshape(1, 768, num_patches).transpose(1, 2)
print("NEW POST EMBED:", new_pos_embed.shape)
self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))
def get_shape(self, fstride, tstride, input_fdim=128, input_tdim=1024):
test_input = torch.randn(1, 1, input_fdim, input_tdim)
test_proj = nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))
test_out = test_proj(test_input)
f_dim = test_out.shape[2]
t_dim = test_out.shape[3]
return f_dim, t_dim
@autocast()
def forward(self, x):
x = x.unsqueeze(1)
x = x.transpose(2, 3)
B = x.shape[0]
x = self.v.patch_embed(x)
cls_tokens = self.v.cls_token.expand(B, -1, -1)
dist_token = self.v.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
x = x + self.v.pos_embed
x = self.v.pos_drop(x)
for blk in self.v.blocks:
x = blk(x)
x = self.v.norm(x)
x = (x[:, 0] + x[:, 1]) / 2
return x
| true
| true
|
f709d36bd178f40879376ac199984275f9458cc3
| 1,113
|
py
|
Python
|
bin/create_trace_graphviz.py
|
AlexJew/CityEnergyAnalyst
|
6eb372c79e5100a2d0abce78561ae368fb409cd1
|
[
"MIT"
] | 1
|
2018-08-16T14:34:23.000Z
|
2018-08-16T14:34:23.000Z
|
bin/create_trace_graphviz.py
|
AlexJew/CityEnergyAnalyst
|
6eb372c79e5100a2d0abce78561ae368fb409cd1
|
[
"MIT"
] | null | null | null |
bin/create_trace_graphviz.py
|
AlexJew/CityEnergyAnalyst
|
6eb372c79e5100a2d0abce78561ae368fb409cd1
|
[
"MIT"
] | null | null | null |
"""
Read in the output from the trace-inputlocator script and create a GraphViz file.
Pass as input the path to the yaml output of the trace-inputlocator script via config file.
The output is written to the trace-inputlocator location.
WHY? because the trace-inputlocator only has the GraphViz output of the last call to the script. This
version re-creates the trace-data from the (merged) yaml file (the yaml output is merged if pre-existing in the output
file).
"""
from __future__ import print_function
import yaml
import cea.config
from cea.tests.trace_inputlocator import create_graphviz_output
def main(config):
with open(config.trace_inputlocator.yaml_output_file, 'r') as f:
yaml_data = yaml.load(f)
trace_data = []
for script in yaml_data.keys():
for direction in ('input', 'output'):
for locator, file in yaml_data[script][direction]:
trace_data.append((direction, script, locator, file))
create_graphviz_output(trace_data, config.trace_inputlocator.graphviz_output_file)
if __name__ == '__main__':
main(cea.config.Configuration())
| 35.903226
| 118
| 0.745732
|
from __future__ import print_function
import yaml
import cea.config
from cea.tests.trace_inputlocator import create_graphviz_output
def main(config):
with open(config.trace_inputlocator.yaml_output_file, 'r') as f:
yaml_data = yaml.load(f)
trace_data = []
for script in yaml_data.keys():
for direction in ('input', 'output'):
for locator, file in yaml_data[script][direction]:
trace_data.append((direction, script, locator, file))
create_graphviz_output(trace_data, config.trace_inputlocator.graphviz_output_file)
if __name__ == '__main__':
main(cea.config.Configuration())
| true
| true
|
f709d40f1b6482aa8186bd0b9c536a20d11a426e
| 11,220
|
py
|
Python
|
rest_api/build/lib/rest_api/workflow/main.py
|
hidura/sawtooth-blockmed
|
d0e047972557315489de1b308a84f477da6f993d
|
[
"Apache-2.0"
] | 3
|
2020-06-03T23:09:27.000Z
|
2021-05-15T22:21:21.000Z
|
rest_api/rest_api/workflow/main.py
|
hidura/sawtooth-blockmed
|
d0e047972557315489de1b308a84f477da6f993d
|
[
"Apache-2.0"
] | 6
|
2020-07-21T00:03:29.000Z
|
2021-09-28T03:30:02.000Z
|
rest_api/rest_api/workflow/main.py
|
hidura/sawtooth-blockmed
|
d0e047972557315489de1b308a84f477da6f993d
|
[
"Apache-2.0"
] | 1
|
2020-06-26T03:50:50.000Z
|
2020-06-26T03:50:50.000Z
|
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import argparse
import asyncio
import logging
import os
from signal import signal, SIGINT
import sys
# import rethinkdb as r
from sanic import Sanic
from sawtooth_signing import create_context
from sawtooth_signing import ParseError
# from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from sawtooth_signing import CryptoFactory
# from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from zmq.asyncio import ZMQEventLoop
from rest_api.workflow.general import get_keyfile, get_signer_from_file
from rest_api.workflow.doctors import DOCTORS_BP
from rest_api.workflow.patients import PATIENTS_BP
from rest_api.workflow.clients import CLIENTS_BP
from rest_api.workflow.evaluation import EVALUATION_BP
from rest_api.workflow.Consent import CONSENT_BP
from sawtooth_rest_api.messaging import Connection
# from api.authorization import AUTH_BP
# from api.errors import ERRORS_BP
# from api.holdings import HOLDINGS_BP
# from api.offers import OFFERS_BP
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
DEFAULT_CONFIG = {
'HOST': 'localhost',
'PORT': 8000,
'TIMEOUT': 500,
'VALIDATOR_URL': 'tcp://localhost:4004',
# 'DB_HOST': 'localhost',
# 'DB_PORT': 28015,
# 'DB_NAME': 'marketplace',
'DEBUG': True,
'KEEP_ALIVE': False,
'SECRET_KEY': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890',
'AES_KEY': 'ffffffffffffffffffffffffffffffff',
'BATCHER_PRIVATE_KEY': '1111111111111111111111111111111111111111111111111111111111111111',
'BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_LAB': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE': None
}
async def open_connections(appl):
# LOGGER.warning('opening database connection')
# r.set_loop_type('asyncio')
# app.config.DB_CONN = await r.connect(
# host=app.config.DB_HOST,
# port=app.config.DB_PORT,
# db=app.config.DB_NAME)
appl.config.VAL_CONN = Connection(appl.config.VALIDATOR_URL)
LOGGER.warning('opening validator connection: ' + str(appl.config.VALIDATOR_URL))
appl.config.VAL_CONN.open()
def close_connections(appl):
# LOGGER.warning('closing database connection')
# app.config.DB_CONN.close()
LOGGER.warning('closing validator connection')
appl.config.VAL_CONN.close()
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--host',
help='The host for the api to run on.')
parser.add_argument('--port',
help='The port for the api to run on.')
parser.add_argument('--timeout',
help='Seconds to wait for a validator response')
parser.add_argument('--validator',
help='The url to connect to a running validator')
# parser.add_argument('--db-host',
# help='The host for the state database')
# parser.add_argument('--db-port',
# help='The port for the state database')
# parser.add_argument('--db-name',
# help='The name of the database')
parser.add_argument('--debug',
help='Option to run Sanic in debug mode')
parser.add_argument('--secret_key',
help='The API secret key')
parser.add_argument('--aes-key',
help='The AES key used for private key encryption')
parser.add_argument('--batcher-private-key',
help='The sawtooth key used for transaction signing')
parser.add_argument('--batcher-private-key-file-name-clinic',
help='The sawtooth key used for batch signing having clinic role')
parser.add_argument('--batcher-private-key-file-name-doctor',
help='The sawtooth key used for batch signing having doctor role')
parser.add_argument('--batcher-private-key-file-name-patient',
help='The sawtooth key used for batch signing having patient role')
parser.add_argument('--batcher-private-key-file-name-lab',
help='The sawtooth key used for batch signing having lab role')
parser.add_argument('--batcher-private-key-file-name-insurance',
help='The sawtooth key used for batch signing having insurance role')
return parser.parse_args(args)
def load_config(appl): # pylint: disable=too-many-branches
appl.config.update(DEFAULT_CONFIG)
config_file_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'config.py')
try:
appl.config.from_pyfile(config_file_path)
except FileNotFoundError:
LOGGER.warning("No config file provided")
# CLI Options will override config file options
opts = parse_args(sys.argv[1:])
if opts.host is not None:
appl.config.HOST = opts.host
if opts.port is not None:
appl.config.PORT = opts.port
if opts.timeout is not None:
appl.config.TIMEOUT = opts.timeout
if opts.validator is not None:
appl.config.VALIDATOR_URL = opts.validator
# if opts.db_host is not None:
# app.config.DB_HOST = opts.db_host
# if opts.db_port is not None:
# app.config.DB_PORT = opts.db_port
# if opts.db_name is not None:
# app.config.DB_NAME = opts.db_name
if opts.debug is not None:
appl.config.DEBUG = opts.debug
if opts.secret_key is not None:
appl.config.SECRET_KEY = opts.secret_key
if appl.config.SECRET_KEY is None:
LOGGER.exception("API secret key was not provided")
sys.exit(1)
if opts.aes_key is not None:
appl.config.AES_KEY = opts.aes_key
if appl.config.AES_KEY is None:
LOGGER.exception("AES key was not provided")
sys.exit(1)
if opts.batcher_private_key is not None:
appl.config.BATCHER_PRIVATE_KEY = opts.batcher_private_key
if appl.config.BATCHER_PRIVATE_KEY is None:
LOGGER.exception("Batcher private key was not provided")
sys.exit(1)
if opts.batcher_private_key_file_name_clinic is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC = opts.batcher_private_key_file_name_clinic
if opts.batcher_private_key_file_name_doctor is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR = opts.batcher_private_key_file_name_doctor
if opts.batcher_private_key_file_name_patient is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT = opts.batcher_private_key_file_name_patient
if opts.batcher_private_key_file_name_lab is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_LAB = opts.batcher_private_key_file_name_lab
if opts.batcher_private_key_file_name_insurance is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE = opts.batcher_private_key_file_name_insurance
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC is None:
LOGGER.exception("Batcher private key file name for Clinic entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR is None:
LOGGER.exception("Batcher private key file name for Doctor entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT is None:
LOGGER.exception("Batcher private key file name for Patient entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_LAB is None:
LOGGER.exception("Batcher private key file name for Lab entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE is None:
LOGGER.exception("Batcher private key file name for Insurance entity was not provided")
sys.exit(1)
try:
private_key_file_name_clinic = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC)
clinic_private_key = get_signer_from_file(private_key_file_name_clinic)
private_key_file_name_doctor = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR)
doctor_private_key = get_signer_from_file(private_key_file_name_doctor)
private_key_file_name_patient = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT)
patient_private_key = get_signer_from_file(private_key_file_name_patient)
private_key_file_name_lab = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_LAB)
lab_private_key = get_signer_from_file(private_key_file_name_lab)
private_key_file_name_insurance = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE)
insurance_private_key = get_signer_from_file(private_key_file_name_insurance)
# private_key = Secp256k1PrivateKey.from_hex(
# app.config.BATCHER_PRIVATE_KEY)
except ParseError as err:
LOGGER.exception('Unable to load private key: %s', str(err))
sys.exit(1)
appl.config.CONTEXT = create_context('secp256k1')
appl.config.SIGNER_CLINIC = CryptoFactory(
appl.config.CONTEXT).new_signer(clinic_private_key)
appl.config.SIGNER_DOCTOR = CryptoFactory(
appl.config.CONTEXT).new_signer(doctor_private_key)
appl.config.SIGNER_PATIENT = CryptoFactory(
appl.config.CONTEXT).new_signer(patient_private_key)
appl.config.SIGNER_LAB = CryptoFactory(
appl.config.CONTEXT).new_signer(lab_private_key)
appl.config.SIGNER_INSURANCE = CryptoFactory(
appl.config.CONTEXT).new_signer(insurance_private_key)
app = Sanic(__name__)
app.config['CORS_AUTOMATIC_OPTIONS'] = True
def main():
LOGGER.info('Starting Clinic Rest API server...')
# CORS(app)
app.blueprint(DOCTORS_BP)
app.blueprint(PATIENTS_BP)
app.blueprint(CLIENTS_BP)
app.blueprint(CONSENT_BP)
app.blueprint(EVALUATION_BP)
load_config(app)
zmq = ZMQEventLoop()
asyncio.set_event_loop(zmq)
server = app.create_server(
host=app.config.HOST, port=app.config.PORT, debug=app.config.DEBUG, return_asyncio_server=True)
loop = asyncio.get_event_loop()
asyncio.ensure_future(open_connections(app))
asyncio.ensure_future(server)
signal(SIGINT, lambda s, f: loop.close())
try:
LOGGER.info('Clinic Rest API server starting')
loop.run_forever()
except KeyboardInterrupt:
LOGGER.info('Clinic Rest API started interrupted')
close_connections(app)
loop.stop()
if __name__ == "__main__":
main()
| 41.25
| 106
| 0.712923
|
import argparse
import asyncio
import logging
import os
from signal import signal, SIGINT
import sys
from sanic import Sanic
from sawtooth_signing import create_context
from sawtooth_signing import ParseError
from sawtooth_signing import CryptoFactory
from zmq.asyncio import ZMQEventLoop
from rest_api.workflow.general import get_keyfile, get_signer_from_file
from rest_api.workflow.doctors import DOCTORS_BP
from rest_api.workflow.patients import PATIENTS_BP
from rest_api.workflow.clients import CLIENTS_BP
from rest_api.workflow.evaluation import EVALUATION_BP
from rest_api.workflow.Consent import CONSENT_BP
from sawtooth_rest_api.messaging import Connection
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
DEFAULT_CONFIG = {
'HOST': 'localhost',
'PORT': 8000,
'TIMEOUT': 500,
'VALIDATOR_URL': 'tcp://localhost:4004',
'DEBUG': True,
'KEEP_ALIVE': False,
'SECRET_KEY': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890',
'AES_KEY': 'ffffffffffffffffffffffffffffffff',
'BATCHER_PRIVATE_KEY': '1111111111111111111111111111111111111111111111111111111111111111',
'BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_LAB': None,
'BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE': None
}
async def open_connections(appl):
appl.config.VAL_CONN = Connection(appl.config.VALIDATOR_URL)
LOGGER.warning('opening validator connection: ' + str(appl.config.VALIDATOR_URL))
appl.config.VAL_CONN.open()
def close_connections(appl):
LOGGER.warning('closing validator connection')
appl.config.VAL_CONN.close()
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--host',
help='The host for the api to run on.')
parser.add_argument('--port',
help='The port for the api to run on.')
parser.add_argument('--timeout',
help='Seconds to wait for a validator response')
parser.add_argument('--validator',
help='The url to connect to a running validator')
parser.add_argument('--debug',
help='Option to run Sanic in debug mode')
parser.add_argument('--secret_key',
help='The API secret key')
parser.add_argument('--aes-key',
help='The AES key used for private key encryption')
parser.add_argument('--batcher-private-key',
help='The sawtooth key used for transaction signing')
parser.add_argument('--batcher-private-key-file-name-clinic',
help='The sawtooth key used for batch signing having clinic role')
parser.add_argument('--batcher-private-key-file-name-doctor',
help='The sawtooth key used for batch signing having doctor role')
parser.add_argument('--batcher-private-key-file-name-patient',
help='The sawtooth key used for batch signing having patient role')
parser.add_argument('--batcher-private-key-file-name-lab',
help='The sawtooth key used for batch signing having lab role')
parser.add_argument('--batcher-private-key-file-name-insurance',
help='The sawtooth key used for batch signing having insurance role')
return parser.parse_args(args)
def load_config(appl): appl.config.update(DEFAULT_CONFIG)
config_file_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'config.py')
try:
appl.config.from_pyfile(config_file_path)
except FileNotFoundError:
LOGGER.warning("No config file provided")
opts = parse_args(sys.argv[1:])
if opts.host is not None:
appl.config.HOST = opts.host
if opts.port is not None:
appl.config.PORT = opts.port
if opts.timeout is not None:
appl.config.TIMEOUT = opts.timeout
if opts.validator is not None:
appl.config.VALIDATOR_URL = opts.validator
if opts.debug is not None:
appl.config.DEBUG = opts.debug
if opts.secret_key is not None:
appl.config.SECRET_KEY = opts.secret_key
if appl.config.SECRET_KEY is None:
LOGGER.exception("API secret key was not provided")
sys.exit(1)
if opts.aes_key is not None:
appl.config.AES_KEY = opts.aes_key
if appl.config.AES_KEY is None:
LOGGER.exception("AES key was not provided")
sys.exit(1)
if opts.batcher_private_key is not None:
appl.config.BATCHER_PRIVATE_KEY = opts.batcher_private_key
if appl.config.BATCHER_PRIVATE_KEY is None:
LOGGER.exception("Batcher private key was not provided")
sys.exit(1)
if opts.batcher_private_key_file_name_clinic is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC = opts.batcher_private_key_file_name_clinic
if opts.batcher_private_key_file_name_doctor is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR = opts.batcher_private_key_file_name_doctor
if opts.batcher_private_key_file_name_patient is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT = opts.batcher_private_key_file_name_patient
if opts.batcher_private_key_file_name_lab is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_LAB = opts.batcher_private_key_file_name_lab
if opts.batcher_private_key_file_name_insurance is not None:
appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE = opts.batcher_private_key_file_name_insurance
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC is None:
LOGGER.exception("Batcher private key file name for Clinic entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR is None:
LOGGER.exception("Batcher private key file name for Doctor entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT is None:
LOGGER.exception("Batcher private key file name for Patient entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_LAB is None:
LOGGER.exception("Batcher private key file name for Lab entity was not provided")
sys.exit(1)
if appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE is None:
LOGGER.exception("Batcher private key file name for Insurance entity was not provided")
sys.exit(1)
try:
private_key_file_name_clinic = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_CLINIC)
clinic_private_key = get_signer_from_file(private_key_file_name_clinic)
private_key_file_name_doctor = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_DOCTOR)
doctor_private_key = get_signer_from_file(private_key_file_name_doctor)
private_key_file_name_patient = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_PATIENT)
patient_private_key = get_signer_from_file(private_key_file_name_patient)
private_key_file_name_lab = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_LAB)
lab_private_key = get_signer_from_file(private_key_file_name_lab)
private_key_file_name_insurance = get_keyfile(appl.config.BATCHER_PRIVATE_KEY_FILE_NAME_INSURANCE)
insurance_private_key = get_signer_from_file(private_key_file_name_insurance)
except ParseError as err:
LOGGER.exception('Unable to load private key: %s', str(err))
sys.exit(1)
appl.config.CONTEXT = create_context('secp256k1')
appl.config.SIGNER_CLINIC = CryptoFactory(
appl.config.CONTEXT).new_signer(clinic_private_key)
appl.config.SIGNER_DOCTOR = CryptoFactory(
appl.config.CONTEXT).new_signer(doctor_private_key)
appl.config.SIGNER_PATIENT = CryptoFactory(
appl.config.CONTEXT).new_signer(patient_private_key)
appl.config.SIGNER_LAB = CryptoFactory(
appl.config.CONTEXT).new_signer(lab_private_key)
appl.config.SIGNER_INSURANCE = CryptoFactory(
appl.config.CONTEXT).new_signer(insurance_private_key)
app = Sanic(__name__)
app.config['CORS_AUTOMATIC_OPTIONS'] = True
def main():
LOGGER.info('Starting Clinic Rest API server...')
app.blueprint(DOCTORS_BP)
app.blueprint(PATIENTS_BP)
app.blueprint(CLIENTS_BP)
app.blueprint(CONSENT_BP)
app.blueprint(EVALUATION_BP)
load_config(app)
zmq = ZMQEventLoop()
asyncio.set_event_loop(zmq)
server = app.create_server(
host=app.config.HOST, port=app.config.PORT, debug=app.config.DEBUG, return_asyncio_server=True)
loop = asyncio.get_event_loop()
asyncio.ensure_future(open_connections(app))
asyncio.ensure_future(server)
signal(SIGINT, lambda s, f: loop.close())
try:
LOGGER.info('Clinic Rest API server starting')
loop.run_forever()
except KeyboardInterrupt:
LOGGER.info('Clinic Rest API started interrupted')
close_connections(app)
loop.stop()
if __name__ == "__main__":
main()
| true
| true
|
f709d444e910f6f32e57c5407aa2e7a31cbc3d3d
| 6,676
|
py
|
Python
|
util/security/test/test_jwtutil.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 2,027
|
2019-11-12T18:05:48.000Z
|
2022-03-31T22:25:04.000Z
|
util/security/test/test_jwtutil.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 496
|
2019-11-12T18:13:37.000Z
|
2022-03-31T10:43:45.000Z
|
util/security/test/test_jwtutil.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 249
|
2019-11-12T18:02:27.000Z
|
2022-03-22T12:19:19.000Z
|
import time
import pytest
import jwt
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from authlib.jose import jwk
from util.security.jwtutil import (
decode,
exp_max_s_option,
jwk_dict_to_public_key,
InvalidTokenError,
InvalidAlgorithmError,
)
@pytest.fixture(scope="session")
def private_key():
return rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
@pytest.fixture(scope="session")
def private_key_pem(private_key):
return private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
@pytest.fixture(scope="session")
def public_key(private_key):
return private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
def _token_data(audience, subject, iss, iat=None, exp=None, nbf=None):
return {
"iss": iss,
"aud": audience,
"nbf": nbf() if nbf is not None else int(time.time()),
"iat": iat() if iat is not None else int(time.time()),
"exp": exp() if exp is not None else int(time.time() + 3600),
"sub": subject,
}
@pytest.mark.parametrize(
"aud, iss, nbf, iat, exp, expected_exception",
[
pytest.param(
"invalidaudience",
"someissuer",
None,
None,
None,
"Invalid audience",
id="invalid audience",
),
pytest.param(
"someaudience", "invalidissuer", None, None, None, "Invalid issuer", id="invalid issuer"
),
pytest.param(
"someaudience",
"someissuer",
lambda: time.time() + 120,
None,
None,
"The token is not yet valid",
id="invalid not before",
),
pytest.param(
"someaudience",
"someissuer",
None,
lambda: time.time() + 120,
None,
"Issued At claim",
id="issued at in future",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() - 100,
"Signature has expired",
id="already expired",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() + 10000,
"Token was signed for more than",
id="expiration too far in future",
),
pytest.param(
"someaudience",
"someissuer",
lambda: time.time() + 10,
None,
None,
None,
id="not before in future by within leeway",
),
pytest.param(
"someaudience",
"someissuer",
None,
lambda: time.time() + 10,
None,
None,
id="issued at in future but within leeway",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() - 10,
None,
id="expiration in past but within leeway",
),
],
)
def test_decode_jwt_validation(
aud, iss, nbf, iat, exp, expected_exception, private_key_pem, public_key
):
token = jwt.encode(_token_data(aud, "subject", iss, iat, exp, nbf), private_key_pem, "RS256")
if expected_exception is not None:
with pytest.raises(InvalidTokenError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["RS256"],
audience="someaudience",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match(expected_exception)
else:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["RS256"],
audience="someaudience",
issuer="someissuer",
options=max_exp,
leeway=60,
)
def test_decode_jwt_invalid_key(private_key_pem):
# Encode with the test private key.
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
# Try to decode with a different public key.
another_public_key = (
rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
.public_key()
.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
)
with pytest.raises(InvalidTokenError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
another_public_key,
algorithms=["RS256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match("Signature verification failed")
def test_decode_jwt_invalid_algorithm(private_key_pem, public_key):
# Encode with the test private key.
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
# Attempt to decode but only with a different algorithm than that used.
with pytest.raises(InvalidAlgorithmError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["ES256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match("are not whitelisted")
def test_jwk_dict_to_public_key(private_key, private_key_pem):
public_key = private_key.public_key()
key_dict = jwk.dumps(
public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
)
converted = jwk_dict_to_public_key(key_dict)
# Encode with the test private key.
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
# Decode with the converted key.
max_exp = exp_max_s_option(3600)
decode(
token,
converted,
algorithms=["RS256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
| 27.933054
| 100
| 0.560815
|
import time
import pytest
import jwt
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from authlib.jose import jwk
from util.security.jwtutil import (
decode,
exp_max_s_option,
jwk_dict_to_public_key,
InvalidTokenError,
InvalidAlgorithmError,
)
@pytest.fixture(scope="session")
def private_key():
return rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
@pytest.fixture(scope="session")
def private_key_pem(private_key):
return private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
@pytest.fixture(scope="session")
def public_key(private_key):
return private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
def _token_data(audience, subject, iss, iat=None, exp=None, nbf=None):
return {
"iss": iss,
"aud": audience,
"nbf": nbf() if nbf is not None else int(time.time()),
"iat": iat() if iat is not None else int(time.time()),
"exp": exp() if exp is not None else int(time.time() + 3600),
"sub": subject,
}
@pytest.mark.parametrize(
"aud, iss, nbf, iat, exp, expected_exception",
[
pytest.param(
"invalidaudience",
"someissuer",
None,
None,
None,
"Invalid audience",
id="invalid audience",
),
pytest.param(
"someaudience", "invalidissuer", None, None, None, "Invalid issuer", id="invalid issuer"
),
pytest.param(
"someaudience",
"someissuer",
lambda: time.time() + 120,
None,
None,
"The token is not yet valid",
id="invalid not before",
),
pytest.param(
"someaudience",
"someissuer",
None,
lambda: time.time() + 120,
None,
"Issued At claim",
id="issued at in future",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() - 100,
"Signature has expired",
id="already expired",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() + 10000,
"Token was signed for more than",
id="expiration too far in future",
),
pytest.param(
"someaudience",
"someissuer",
lambda: time.time() + 10,
None,
None,
None,
id="not before in future by within leeway",
),
pytest.param(
"someaudience",
"someissuer",
None,
lambda: time.time() + 10,
None,
None,
id="issued at in future but within leeway",
),
pytest.param(
"someaudience",
"someissuer",
None,
None,
lambda: time.time() - 10,
None,
id="expiration in past but within leeway",
),
],
)
def test_decode_jwt_validation(
aud, iss, nbf, iat, exp, expected_exception, private_key_pem, public_key
):
token = jwt.encode(_token_data(aud, "subject", iss, iat, exp, nbf), private_key_pem, "RS256")
if expected_exception is not None:
with pytest.raises(InvalidTokenError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["RS256"],
audience="someaudience",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match(expected_exception)
else:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["RS256"],
audience="someaudience",
issuer="someissuer",
options=max_exp,
leeway=60,
)
def test_decode_jwt_invalid_key(private_key_pem):
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
another_public_key = (
rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
.public_key()
.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
)
with pytest.raises(InvalidTokenError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
another_public_key,
algorithms=["RS256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match("Signature verification failed")
def test_decode_jwt_invalid_algorithm(private_key_pem, public_key):
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
with pytest.raises(InvalidAlgorithmError) as ite:
max_exp = exp_max_s_option(3600)
decode(
token,
public_key,
algorithms=["ES256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
assert ite.match("are not whitelisted")
def test_jwk_dict_to_public_key(private_key, private_key_pem):
public_key = private_key.public_key()
key_dict = jwk.dumps(
public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
)
converted = jwk_dict_to_public_key(key_dict)
token = jwt.encode(_token_data("aud", "subject", "someissuer"), private_key_pem, "RS256")
max_exp = exp_max_s_option(3600)
decode(
token,
converted,
algorithms=["RS256"],
audience="aud",
issuer="someissuer",
options=max_exp,
leeway=60,
)
| true
| true
|
f709d502b186aa12fa05d2db0b3fe245571830c7
| 4,145
|
py
|
Python
|
mas_cache/models.py
|
blochberger/mas-cache
|
f7f507365220cd6a3628de1266a510a750121f0c
|
[
"0BSD"
] | 1
|
2020-04-24T09:02:53.000Z
|
2020-04-24T09:02:53.000Z
|
mas_cache/models.py
|
blochberger/mas-cache
|
f7f507365220cd6a3628de1266a510a750121f0c
|
[
"0BSD"
] | null | null | null |
mas_cache/models.py
|
blochberger/mas-cache
|
f7f507365220cd6a3628de1266a510a750121f0c
|
[
"0BSD"
] | null | null | null |
from typing import Any, Dict, Optional
from django.contrib.postgres.fields import JSONField
from django.core.validators import RegexValidator
from django.db import models
from django.utils.timezone import datetime
from django.utils.translation import gettext_lazy as _
from core.fields import IntegerChoicesField
# Validators
def CountryCodeValidator() -> RegexValidator:
return RegexValidator(r'^[a-z]{2}$')
# Enums
CHART_TYPE_API = ['top-free', 'top-paid']
class ChartType(models.IntegerChoices):
FREE = 0, _("Top Free Apps")
PAID = 1, _("Top Paid Apps")
@classmethod
def from_api(cls, value: str) -> 'ChartType':
if value not in CHART_TYPE_API:
raise ValueError(f"Unknown chart type: {value}")
return cls(CHART_TYPE_API.index(value))
def to_api(self) -> str:
return CHART_TYPE_API[int(self)]
# Models
class Application(models.Model):
itunes_id = models.PositiveIntegerField(primary_key=True)
@property
def latest_metadata(self) -> Optional['Metadata']:
metadata = Metadata.objects.filter(
application=self,
data__isnull=False,
).order_by('-timestamp')
if not metadata.exists():
return None
return metadata.first()
@property
def is_known(self) -> bool:
metadata = self.latest_metadata
if metadata is None:
return False
return 'attributes' in metadata.data
@property
def is_bundle(self) -> bool:
metadata = self.latest_metadata
if metadata is None:
return False
return metadata.data.get('type', None) == 'app-bundles'
@property
def timestamp(self) -> Optional[datetime]:
metadata = self.latest_metadata
if metadata is None:
return None
return metadata.timestamp
@property
def attributes(self) -> Dict[str, Any]:
metadata = self.latest_metadata
if metadata is None:
return {}
return metadata.data.get('attributes', {})
def platform_attributes(self, platform: str = 'osx') -> Dict[str, Any]:
return self.attributes.get('platformAttributes', {}).get(platform, {})
@property
def name(self) -> Optional[str]:
return self.attributes.get('name', None)
@property
def bundle_identifier(self) -> Optional[str]:
return self.platform_attributes().get('bundleId', None)
def __str__(self) -> str:
if self.name is None:
return str(self.itunes_id)
return self.name
class Genre(models.Model):
itunes_id = models.PositiveSmallIntegerField(primary_key=True)
name = models.CharField(max_length=255, blank=True, null=True, default=None)
parent = models.ForeignKey(
'Genre',
on_delete=models.CASCADE,
related_name='children',
blank=True,
null=True,
default=None,
)
def __str__(self) -> str:
if self.name is None:
return str(self.itunes_id)
return self.name
class AppStore(models.Model):
country = models.CharField(
max_length=2,
primary_key=True,
validators=[CountryCodeValidator],
)
applications = models.ManyToManyField(
Application,
related_name='stores',
through='Metadata',
through_fields=('store', 'application'),
)
def __str__(self) -> str:
return f"{self.country}"
class Metadata(models.Model):
application = models.ForeignKey(Application, on_delete=models.CASCADE)
store = models.ForeignKey(AppStore, on_delete=models.CASCADE)
source = models.URLField(max_length=4096)
timestamp = models.DateTimeField()
data = JSONField()
class Meta:
unique_together = (('application', 'store', 'source', 'timestamp'),)
class Chart(models.Model):
genre = models.ForeignKey(
Genre,
on_delete=models.CASCADE,
related_name='charts',
)
store = models.ForeignKey(
AppStore,
on_delete=models.CASCADE,
related_name='charts',
)
chart_type = IntegerChoicesField(ChartType)
timestamp = models.DateTimeField()
class Meta:
unique_together = (('genre', 'store', 'chart_type', 'timestamp'),)
class ChartEntry(models.Model):
chart = models.ForeignKey(
Chart,
on_delete=models.CASCADE,
related_name='entries',
)
application = models.ForeignKey(Application, on_delete=models.CASCADE)
position = models.PositiveSmallIntegerField()
class Meta:
unique_together = (
('chart', 'application'),
('chart', 'position'),
('chart', 'application', 'position'),
)
| 23.027778
| 77
| 0.7269
|
from typing import Any, Dict, Optional
from django.contrib.postgres.fields import JSONField
from django.core.validators import RegexValidator
from django.db import models
from django.utils.timezone import datetime
from django.utils.translation import gettext_lazy as _
from core.fields import IntegerChoicesField
def CountryCodeValidator() -> RegexValidator:
return RegexValidator(r'^[a-z]{2}$')
CHART_TYPE_API = ['top-free', 'top-paid']
class ChartType(models.IntegerChoices):
FREE = 0, _("Top Free Apps")
PAID = 1, _("Top Paid Apps")
@classmethod
def from_api(cls, value: str) -> 'ChartType':
if value not in CHART_TYPE_API:
raise ValueError(f"Unknown chart type: {value}")
return cls(CHART_TYPE_API.index(value))
def to_api(self) -> str:
return CHART_TYPE_API[int(self)]
class Application(models.Model):
itunes_id = models.PositiveIntegerField(primary_key=True)
@property
def latest_metadata(self) -> Optional['Metadata']:
metadata = Metadata.objects.filter(
application=self,
data__isnull=False,
).order_by('-timestamp')
if not metadata.exists():
return None
return metadata.first()
@property
def is_known(self) -> bool:
metadata = self.latest_metadata
if metadata is None:
return False
return 'attributes' in metadata.data
@property
def is_bundle(self) -> bool:
metadata = self.latest_metadata
if metadata is None:
return False
return metadata.data.get('type', None) == 'app-bundles'
@property
def timestamp(self) -> Optional[datetime]:
metadata = self.latest_metadata
if metadata is None:
return None
return metadata.timestamp
@property
def attributes(self) -> Dict[str, Any]:
metadata = self.latest_metadata
if metadata is None:
return {}
return metadata.data.get('attributes', {})
def platform_attributes(self, platform: str = 'osx') -> Dict[str, Any]:
return self.attributes.get('platformAttributes', {}).get(platform, {})
@property
def name(self) -> Optional[str]:
return self.attributes.get('name', None)
@property
def bundle_identifier(self) -> Optional[str]:
return self.platform_attributes().get('bundleId', None)
def __str__(self) -> str:
if self.name is None:
return str(self.itunes_id)
return self.name
class Genre(models.Model):
itunes_id = models.PositiveSmallIntegerField(primary_key=True)
name = models.CharField(max_length=255, blank=True, null=True, default=None)
parent = models.ForeignKey(
'Genre',
on_delete=models.CASCADE,
related_name='children',
blank=True,
null=True,
default=None,
)
def __str__(self) -> str:
if self.name is None:
return str(self.itunes_id)
return self.name
class AppStore(models.Model):
country = models.CharField(
max_length=2,
primary_key=True,
validators=[CountryCodeValidator],
)
applications = models.ManyToManyField(
Application,
related_name='stores',
through='Metadata',
through_fields=('store', 'application'),
)
def __str__(self) -> str:
return f"{self.country}"
class Metadata(models.Model):
application = models.ForeignKey(Application, on_delete=models.CASCADE)
store = models.ForeignKey(AppStore, on_delete=models.CASCADE)
source = models.URLField(max_length=4096)
timestamp = models.DateTimeField()
data = JSONField()
class Meta:
unique_together = (('application', 'store', 'source', 'timestamp'),)
class Chart(models.Model):
genre = models.ForeignKey(
Genre,
on_delete=models.CASCADE,
related_name='charts',
)
store = models.ForeignKey(
AppStore,
on_delete=models.CASCADE,
related_name='charts',
)
chart_type = IntegerChoicesField(ChartType)
timestamp = models.DateTimeField()
class Meta:
unique_together = (('genre', 'store', 'chart_type', 'timestamp'),)
class ChartEntry(models.Model):
chart = models.ForeignKey(
Chart,
on_delete=models.CASCADE,
related_name='entries',
)
application = models.ForeignKey(Application, on_delete=models.CASCADE)
position = models.PositiveSmallIntegerField()
class Meta:
unique_together = (
('chart', 'application'),
('chart', 'position'),
('chart', 'application', 'position'),
)
| true
| true
|
f709d58d517711bebc218d0488e153bc0a08ffde
| 443
|
py
|
Python
|
switches/migrations/0002_auto_20161227_1515.py
|
schatten/django-migration-workflow
|
91e6def9cf54e36c3b9cba6d7c28b63f945ca954
|
[
"MIT"
] | null | null | null |
switches/migrations/0002_auto_20161227_1515.py
|
schatten/django-migration-workflow
|
91e6def9cf54e36c3b9cba6d7c28b63f945ca954
|
[
"MIT"
] | null | null | null |
switches/migrations/0002_auto_20161227_1515.py
|
schatten/django-migration-workflow
|
91e6def9cf54e36c3b9cba6d7c28b63f945ca954
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-27 15:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('switches', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='farfaraway',
name='last',
field=models.IntegerField(default=11),
),
]
| 21.095238
| 50
| 0.609481
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('switches', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='farfaraway',
name='last',
field=models.IntegerField(default=11),
),
]
| true
| true
|
f709d6c3320473f055bd3d81082bcccaa89dcb9b
| 5,733
|
py
|
Python
|
ticketer/ticketer/middlewares.py
|
chiselko6/ticketer
|
9d9af440e75452478ba99cba659fca4d1ef4c12b
|
[
"MIT"
] | null | null | null |
ticketer/ticketer/middlewares.py
|
chiselko6/ticketer
|
9d9af440e75452478ba99cba659fca4d1ef4c12b
|
[
"MIT"
] | null | null | null |
ticketer/ticketer/middlewares.py
|
chiselko6/ticketer
|
9d9af440e75452478ba99cba659fca4d1ef4c12b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from typing import Iterable
from scrapy import signals
from .items import TransportInfo, SeatInfo
from .settings import MOCKED_DATA_PATH
class TicketerSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class TicketerDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class MockedSpiderMiddleware(object):
def process_spider_output(self, response, result: Iterable[TransportInfo], spider):
with open(MOCKED_DATA_PATH, 'w') as fout:
fout.write(response.body)
for i in result:
yield i
class TransportScheduleSpiderMiddleware(object):
def process_spider_output(self, response, result: Iterable[TransportInfo], spider):
required_min_seats = int(spider.settings['MIN_SEATS'])
required_transport_num = spider.settings['NUM']
required_seat_type = spider.settings['SEAT_TYPE']
def eligible_transport(transport: TransportInfo) -> bool:
return required_transport_num is None or required_transport_num == transport['id']
def eligible_seat(seat: SeatInfo) -> bool:
return required_seat_type is None or seat['type'] == required_seat_type
def eligible_seats(seats: Iterable[SeatInfo]) -> Iterable[SeatInfo]:
return filter(eligible_seat, seats)
def available_seat(seat: SeatInfo) -> bool:
remaining = seat['remaining']
if remaining is None:
return False
return int(remaining) >= required_min_seats
found_any = False
for transport in result:
found = False
if eligible_transport(transport):
seats = eligible_seats(transport['seats'])
for seat in seats:
if available_seat(seat):
found = True
found_any = found_any or found
if found:
yield transport
if not found_any:
yield response.request
class MockedDownloaderMiddleware(object):
def process_request(self, request, spider):
from scrapy.http import HtmlResponse
with open(MOCKED_DATA_PATH, 'r') as fin:
body = fin.read()
response = HtmlResponse(url=request.url,
body=body)
return response
| 33.331395
| 94
| 0.652189
|
from typing import Iterable
from scrapy import signals
from .items import TransportInfo, SeatInfo
from .settings import MOCKED_DATA_PATH
class TicketerSpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
return None
def process_spider_output(self, response, result, spider):
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
pass
def process_start_requests(self, start_requests, spider):
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class TicketerDownloaderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
return None
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class MockedSpiderMiddleware(object):
def process_spider_output(self, response, result: Iterable[TransportInfo], spider):
with open(MOCKED_DATA_PATH, 'w') as fout:
fout.write(response.body)
for i in result:
yield i
class TransportScheduleSpiderMiddleware(object):
def process_spider_output(self, response, result: Iterable[TransportInfo], spider):
required_min_seats = int(spider.settings['MIN_SEATS'])
required_transport_num = spider.settings['NUM']
required_seat_type = spider.settings['SEAT_TYPE']
def eligible_transport(transport: TransportInfo) -> bool:
return required_transport_num is None or required_transport_num == transport['id']
def eligible_seat(seat: SeatInfo) -> bool:
return required_seat_type is None or seat['type'] == required_seat_type
def eligible_seats(seats: Iterable[SeatInfo]) -> Iterable[SeatInfo]:
return filter(eligible_seat, seats)
def available_seat(seat: SeatInfo) -> bool:
remaining = seat['remaining']
if remaining is None:
return False
return int(remaining) >= required_min_seats
found_any = False
for transport in result:
found = False
if eligible_transport(transport):
seats = eligible_seats(transport['seats'])
for seat in seats:
if available_seat(seat):
found = True
found_any = found_any or found
if found:
yield transport
if not found_any:
yield response.request
class MockedDownloaderMiddleware(object):
def process_request(self, request, spider):
from scrapy.http import HtmlResponse
with open(MOCKED_DATA_PATH, 'r') as fin:
body = fin.read()
response = HtmlResponse(url=request.url,
body=body)
return response
| true
| true
|
f709d75432845ad140645213ccef7f5b9231db73
| 1,026
|
py
|
Python
|
marvelous/dates.py
|
rkuykendall/marvelous
|
7c362643c7327b8fffab941e04fe43dcc7e2d2f9
|
[
"MIT"
] | 14
|
2016-07-13T19:48:35.000Z
|
2021-11-01T01:47:04.000Z
|
marvelous/dates.py
|
rkuykendall/marvelous
|
7c362643c7327b8fffab941e04fe43dcc7e2d2f9
|
[
"MIT"
] | 19
|
2016-10-31T21:55:18.000Z
|
2021-10-31T21:50:37.000Z
|
marvelous/dates.py
|
rkuykendall/marvelous
|
7c362643c7327b8fffab941e04fe43dcc7e2d2f9
|
[
"MIT"
] | 6
|
2016-07-21T19:09:33.000Z
|
2021-05-16T12:36:47.000Z
|
from marshmallow import INCLUDE, Schema, fields, post_load, pre_load
class Dates:
def __init__(self, on_sale=None, foc=None, unlimited=None, **kwargs):
self.on_sale = on_sale
self.foc = foc
self.unlimited = unlimited
self.unknown = kwargs
class DatesSchema(Schema):
onsaleDate = fields.DateTime(attribute="on_sale")
focDate = fields.DateTime(attribute="foc")
unlimitedDate = fields.DateTime(attribute="unlimited")
class Meta:
unknown = INCLUDE
@pre_load
def process_input(self, data, **kwargs):
new_data = {}
for d in data:
# Marvel comic 4373, and maybe others, returns a focDate of
# "-0001-11-30T00:00:00-0500". The best way to handle this is
# probably just to ignore it, since I don't know how to fix it.
if d["date"][0] != "-":
new_data[d["type"]] = d["date"]
return new_data
@post_load
def make(self, data, **kwargs):
return Dates(**data)
| 29.314286
| 75
| 0.609162
|
from marshmallow import INCLUDE, Schema, fields, post_load, pre_load
class Dates:
def __init__(self, on_sale=None, foc=None, unlimited=None, **kwargs):
self.on_sale = on_sale
self.foc = foc
self.unlimited = unlimited
self.unknown = kwargs
class DatesSchema(Schema):
onsaleDate = fields.DateTime(attribute="on_sale")
focDate = fields.DateTime(attribute="foc")
unlimitedDate = fields.DateTime(attribute="unlimited")
class Meta:
unknown = INCLUDE
@pre_load
def process_input(self, data, **kwargs):
new_data = {}
for d in data:
if d["date"][0] != "-":
new_data[d["type"]] = d["date"]
return new_data
@post_load
def make(self, data, **kwargs):
return Dates(**data)
| true
| true
|
f709d768543bcd6310dd9e2b12e5531a81c54d69
| 15
|
py
|
Python
|
text_store/filters.py
|
digirati-co-uk/drf-text-store
|
ca29cfc4522d0f718e0419f956705b845e77dae4
|
[
"MIT"
] | null | null | null |
text_store/filters.py
|
digirati-co-uk/drf-text-store
|
ca29cfc4522d0f718e0419f956705b845e77dae4
|
[
"MIT"
] | null | null | null |
text_store/filters.py
|
digirati-co-uk/drf-text-store
|
ca29cfc4522d0f718e0419f956705b845e77dae4
|
[
"MIT"
] | null | null | null |
# Placeholder
| 7.5
| 14
| 0.733333
| true
| true
|
|
f709d7a5951382a166dd35184fa8a0d66be3aabe
| 280
|
py
|
Python
|
opm/external/sentiment.py
|
Open-Prose-Metrics/open_prose_metrics_app-core
|
9df65edfe9ee9af0a0731c3f2e21ea25bced250c
|
[
"MIT"
] | null | null | null |
opm/external/sentiment.py
|
Open-Prose-Metrics/open_prose_metrics_app-core
|
9df65edfe9ee9af0a0731c3f2e21ea25bced250c
|
[
"MIT"
] | 4
|
2021-04-30T21:38:10.000Z
|
2022-01-13T03:32:33.000Z
|
opm/external/sentiment.py
|
Open-Prose-Metrics/open_prose_metrics_app-core
|
9df65edfe9ee9af0a0731c3f2e21ea25bced250c
|
[
"MIT"
] | 1
|
2021-03-21T14:08:28.000Z
|
2021-03-21T14:08:28.000Z
|
#external tools: textgain
import requests
import json
def sentiment_result(text):
URL = 'http://text-processing.com/api/sentiment/'
raw_text = text
r = requests.post(URL, data = {'text':raw_text})
sentiment = json.loads(r.text).get('label')
return sentiment
| 23.333333
| 53
| 0.692857
|
import requests
import json
def sentiment_result(text):
URL = 'http://text-processing.com/api/sentiment/'
raw_text = text
r = requests.post(URL, data = {'text':raw_text})
sentiment = json.loads(r.text).get('label')
return sentiment
| true
| true
|
f709d7d9a6c3f3c9af884ebe9c62a536e3dfa929
| 597
|
py
|
Python
|
verification_rules/common/__init__.py
|
adrianmkng/watchmen
|
4be15ad64a5d54d4f546ca8c139fa41fd42dd6aa
|
[
"Apache-2.0"
] | 190
|
2017-12-13T05:01:42.000Z
|
2021-11-15T23:35:54.000Z
|
verification_rules/common/__init__.py
|
adrianmkng/watchmen
|
4be15ad64a5d54d4f546ca8c139fa41fd42dd6aa
|
[
"Apache-2.0"
] | 2
|
2018-08-31T04:53:03.000Z
|
2018-11-14T00:13:49.000Z
|
verification_rules/common/__init__.py
|
adrianmkng/watchmen
|
4be15ad64a5d54d4f546ca8c139fa41fd42dd6aa
|
[
"Apache-2.0"
] | 22
|
2017-12-13T04:36:46.000Z
|
2021-07-29T07:37:41.000Z
|
# Copyright 2017 Insurance Australia Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 39.8
| 74
| 0.768844
| true
| true
|
|
f709d89cfc1fad043efd50cf5039f023c148e8cd
| 419
|
py
|
Python
|
eventmanager/categories/migrations/0002_category_category_image.py
|
ralcabes/EventManager
|
dd73b2387228b4508d27974a8eeb222d5ffeecdc
|
[
"MIT"
] | 4
|
2019-01-06T16:58:20.000Z
|
2019-04-08T10:20:46.000Z
|
eventmanager/categories/migrations/0002_category_category_image.py
|
ralcabes/EventManager
|
dd73b2387228b4508d27974a8eeb222d5ffeecdc
|
[
"MIT"
] | 297
|
2018-11-14T13:59:19.000Z
|
2022-03-11T23:33:28.000Z
|
eventmanager/categories/migrations/0002_category_category_image.py
|
ralcabes/EventManager
|
dd73b2387228b4508d27974a8eeb222d5ffeecdc
|
[
"MIT"
] | 1
|
2019-04-22T15:17:32.000Z
|
2019-04-22T15:17:32.000Z
|
# Generated by Django 2.1.5 on 2019-02-01 08:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='category_image',
field=models.ImageField(blank=True, null=True, upload_to='categories'),
),
]
| 22.052632
| 83
| 0.610979
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='category_image',
field=models.ImageField(blank=True, null=True, upload_to='categories'),
),
]
| true
| true
|
f709d8e6fdd3da9cbade87b9b8ef4c5d23380806
| 1,121
|
py
|
Python
|
payment/urls.py
|
skyydq/GreaterWMS
|
e14014a73b36ec0f0df03712a229b0931cb388fb
|
[
"Apache-2.0"
] | null | null | null |
payment/urls.py
|
skyydq/GreaterWMS
|
e14014a73b36ec0f0df03712a229b0931cb388fb
|
[
"Apache-2.0"
] | null | null | null |
payment/urls.py
|
skyydq/GreaterWMS
|
e14014a73b36ec0f0df03712a229b0931cb388fb
|
[
"Apache-2.0"
] | 1
|
2021-07-01T03:05:21.000Z
|
2021-07-01T03:05:21.000Z
|
"""singosgu URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, re_path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path(r'freight/', views.TransportationFeeListViewSet.as_view({"get": "list", "post": "create"}), name="transportationfee"),
re_path(r'^freight/(?P<pk>\d+)/$', views.TransportationFeeListViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
}), name="transportationfee_1")
]
| 36.16129
| 123
| 0.706512
|
from django.urls import path, re_path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path(r'freight/', views.TransportationFeeListViewSet.as_view({"get": "list", "post": "create"}), name="transportationfee"),
re_path(r'^freight/(?P<pk>\d+)/$', views.TransportationFeeListViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
}), name="transportationfee_1")
]
| true
| true
|
f709d94bfb6b20dabb81896e00c628fa1b92786d
| 4,040
|
py
|
Python
|
pyfesom/fesom2gdal.py
|
pgierz/pyfesom
|
6bff7c9a10a080ccb0e089cde0cd0005cd13644d
|
[
"MIT"
] | null | null | null |
pyfesom/fesom2gdal.py
|
pgierz/pyfesom
|
6bff7c9a10a080ccb0e089cde0cd0005cd13644d
|
[
"MIT"
] | null | null | null |
pyfesom/fesom2gdal.py
|
pgierz/pyfesom
|
6bff7c9a10a080ccb0e089cde0cd0005cd13644d
|
[
"MIT"
] | null | null | null |
# Save the geometry (triangles, verticies) of a FESOM grid to a gdal dataset
# Author: R. Rietbroek
# Date 17 May 2019
# Currently this save the surface nodes only
# Improvements are possible
# * Optionally store the 3D surfaces
# * add info on e.g. bathymetry the nodes
# *
import osgeo,ogr as ogr
import osgeo.osr as osr
def fesom2gdal(mesh,outputname,gdaldriver='GPKG'):
"""Export a FESOM surface mesh to a GIS shapefile
input: mesh a FESOM mesh loaded with fesom_mesh(meshpath, get3d=True)
outputname: the name of the output dataset
gdaldriver: the driver to use to write the output (defaults to geopackage, but could be anything the gdal library supports including POSTGIS)
returns: nothing"""
driver = ogr.GetDriverByName(gdaldriver)
data_source = driver.CreateDataSource(outputname)
# create the spatial reference, WGS84
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
# create the layer containing the vertices
vertlayer = data_source.CreateLayer("vert", srs, ogr.wkbPoint)
field_onbound = ogr.FieldDefn("onboundary", ogr.OFTInteger)
vertlayer.CreateField(field_onbound)
field_topo = ogr.FieldDefn("topo", ogr.OFTReal)
vertlayer.CreateField(field_topo)
#also store the indices of the 3Delems for the corresponding z-levels (to look up 3d indices)
# NOTE: ESRI SHapefiles do not support lists as attributes!! so this will not be registered when
field_zindx=ogr.FieldDefn("nodeid",ogr.OFTIntegerList)
vertlayer.CreateField(field_zindx)
# add vertices
for id,(lon,lat,onbnd) in enumerate(zip(mesh.x2,mesh.y2,mesh.ind2d)):
feature = ogr.Feature(vertlayer.GetLayerDefn())
# Note: we need a conversion to int so the value get's accepted by the gdal library
feature.SetField("onboundary", int(onbnd))
feature.SetField('topo',mesh.topo[id])
# note: we subtract 1 here to be consistent with the zero-indexing used in nodeid
idxfield=feature.GetFieldIndex("nodeid")
feature.SetFieldIntegerList(idxfield,[int(x-1) for x in mesh.n32[id,:] if x >=0])
#create a point geometry
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(lon,lat)
feature.SetGeometry(point)
vertlayer.CreateFeature(feature)
# Dereference the feature in order to appropriately call the destructor
feature = None
# create the layer containing the triangles
# NOTE: the dedicated triangle type is not visible in Qgis
# surftype=ogr.wkbTriangle
surftype=ogr.wkbPolygon
tinlayer = data_source.CreateLayer("tin", srs, ogr.wkbTriangle)
# Add the fields we're interested in (nodeid's of
field_nodeid = ogr.FieldDefn("nodeid", ogr.OFTIntegerList)
tinlayer.CreateField(field_nodeid)
field_area = ogr.FieldDefn("area", ogr.OFTReal)
tinlayer.CreateField(field_area)
field_topo = ogr.FieldDefn("topo", ogr.OFTReal)
tinlayer.CreateField(field_topo)
# exclude cyclic elements
elem2=mesh.elem[mesh.no_cyclic_elem,:]
#loop over triangular elements
for i1,i2,i3 in elem2:
feature = ogr.Feature(tinlayer.GetLayerDefn())
ring=ogr.Geometry(ogr.wkbLinearRing)
tri=ogr.Geometry(surftype)
ring.AddPoint(mesh.x2[i1],mesh.y2[i1])
ring.AddPoint(mesh.x2[i2],mesh.y2[i2])
ring.AddPoint(mesh.x2[i3],mesh.y2[i3])
tri.AddGeometry(ring)
idxfield=feature.GetFieldIndex("nodeid")
feature.SetFieldIntegerList(idxfield,[int(i1),int(i2),int(i3)])
# TODO convert to squared km (which projection is used for FESOM??)
feature.SetField("area", tri.Area())
#currently just set topo to the mean of the topo of the vertices
feature.SetField("topo", (mesh.topo[i1]+mesh.topo[i2]+mesh.topo[i3])/3.0)
feature.SetGeometry(tri)
tinlayer.CreateFeature(feature)
feature=None
# Save and close the data source
data_source = None
| 38.113208
| 149
| 0.690347
|
import osgeo,ogr as ogr
import osgeo.osr as osr
def fesom2gdal(mesh,outputname,gdaldriver='GPKG'):
driver = ogr.GetDriverByName(gdaldriver)
data_source = driver.CreateDataSource(outputname)
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
vertlayer = data_source.CreateLayer("vert", srs, ogr.wkbPoint)
field_onbound = ogr.FieldDefn("onboundary", ogr.OFTInteger)
vertlayer.CreateField(field_onbound)
field_topo = ogr.FieldDefn("topo", ogr.OFTReal)
vertlayer.CreateField(field_topo)
field_zindx=ogr.FieldDefn("nodeid",ogr.OFTIntegerList)
vertlayer.CreateField(field_zindx)
for id,(lon,lat,onbnd) in enumerate(zip(mesh.x2,mesh.y2,mesh.ind2d)):
feature = ogr.Feature(vertlayer.GetLayerDefn())
feature.SetField("onboundary", int(onbnd))
feature.SetField('topo',mesh.topo[id])
# note: we subtract 1 here to be consistent with the zero-indexing used in nodeid
idxfield=feature.GetFieldIndex("nodeid")
feature.SetFieldIntegerList(idxfield,[int(x-1) for x in mesh.n32[id,:] if x >=0])
#create a point geometry
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(lon,lat)
feature.SetGeometry(point)
vertlayer.CreateFeature(feature)
# Dereference the feature in order to appropriately call the destructor
feature = None
# create the layer containing the triangles
# NOTE: the dedicated triangle type is not visible in Qgis
# surftype=ogr.wkbTriangle
surftype=ogr.wkbPolygon
tinlayer = data_source.CreateLayer("tin", srs, ogr.wkbTriangle)
# Add the fields we're interested in (nodeid's of
field_nodeid = ogr.FieldDefn("nodeid", ogr.OFTIntegerList)
tinlayer.CreateField(field_nodeid)
field_area = ogr.FieldDefn("area", ogr.OFTReal)
tinlayer.CreateField(field_area)
field_topo = ogr.FieldDefn("topo", ogr.OFTReal)
tinlayer.CreateField(field_topo)
# exclude cyclic elements
elem2=mesh.elem[mesh.no_cyclic_elem,:]
#loop over triangular elements
for i1,i2,i3 in elem2:
feature = ogr.Feature(tinlayer.GetLayerDefn())
ring=ogr.Geometry(ogr.wkbLinearRing)
tri=ogr.Geometry(surftype)
ring.AddPoint(mesh.x2[i1],mesh.y2[i1])
ring.AddPoint(mesh.x2[i2],mesh.y2[i2])
ring.AddPoint(mesh.x2[i3],mesh.y2[i3])
tri.AddGeometry(ring)
idxfield=feature.GetFieldIndex("nodeid")
feature.SetFieldIntegerList(idxfield,[int(i1),int(i2),int(i3)])
# TODO convert to squared km (which projection is used for FESOM??)
feature.SetField("area", tri.Area())
#currently just set topo to the mean of the topo of the vertices
feature.SetField("topo", (mesh.topo[i1]+mesh.topo[i2]+mesh.topo[i3])/3.0)
feature.SetGeometry(tri)
tinlayer.CreateFeature(feature)
feature=None
# Save and close the data source
data_source = None
| true
| true
|
f709d99e36430fd0fbf77d273f9236d04f2c7a44
| 4,175
|
py
|
Python
|
settings.py
|
proffalken/edison
|
5bfa941f8876cb8698cd8009c4514bc03d24c109
|
[
"BSD-3-Clause"
] | 3
|
2015-11-05T07:29:00.000Z
|
2021-06-17T23:44:17.000Z
|
settings.py
|
proffalken/edison
|
5bfa941f8876cb8698cd8009c4514bc03d24c109
|
[
"BSD-3-Clause"
] | 1
|
2016-05-04T10:54:48.000Z
|
2016-05-04T10:54:56.000Z
|
settings.py
|
proffalken/edison
|
5bfa941f8876cb8698cd8009c4514bc03d24c109
|
[
"BSD-3-Clause"
] | null | null | null |
# This file is part of the Edison Project.
# Please refer to the LICENSE document that was supplied with this software for information on how it can be used.
# Django settings for Edison project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
# Django Debug Toolbar settings
INTERNAL_IPS = ('127.0.0.1','192.168.1.56','192.168.3.57')
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS':False,
'HIDE_DJANGO_SQL': False,
}
MANAGERS = ADMINS
DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'edison' # Or path to database file if using sqlite3.
DATABASE_USER = 'edison' # Not used with sqlite3.
DATABASE_PASSWORD = 'edison' # Not used with sqlite3.
DATABASE_HOST = 'localhost' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '3306' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = "/var/djangosites/edison/media"
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = 'http://edison/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = 'http://edison/admin_m/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '&(nwanlz8mdftiy06qrjkqh_i428x90u&ajb%lipbc(wk79gb*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
# 'django.middleware.csrf.CsrfMiddleware',
#'django.middleware.csrf.CsrfResponseMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/var/djangosites/edison/templates",
)
# set oauth callback address
OAUTH_CALLBACK_VIEW="api.views.request_token_ready"
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.admindocs',
'cmdb',
'piston',
'changemanagement',
'orchestra',
'auditorium',
)
| 35.683761
| 114
| 0.734371
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
INTERNAL_IPS = ('127.0.0.1','192.168.1.56','192.168.3.57')
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS':False,
'HIDE_DJANGO_SQL': False,
}
MANAGERS = ADMINS
DATABASE_ENGINE = 'mysql' DATABASE_NAME = 'edison' DATABASE_USER = 'edison' DATABASE_PASSWORD = 'edison' DATABASE_HOST = 'localhost' DATABASE_PORT = '3306'
TIME_ZONE = 'Europe/London'
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
USE_I18N = True
MEDIA_ROOT = "/var/djangosites/edison/media"
MEDIA_URL = 'http://edison/media/'
ADMIN_MEDIA_PREFIX = 'http://edison/admin_m/'
SECRET_KEY = '&(nwanlz8mdftiy06qrjkqh_i428x90u&ajb%lipbc(wk79gb*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
# 'django.middleware.csrf.CsrfMiddleware',
#'django.middleware.csrf.CsrfResponseMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/var/djangosites/edison/templates",
)
OAUTH_CALLBACK_VIEW="api.views.request_token_ready"
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.admindocs',
'cmdb',
'piston',
'changemanagement',
'orchestra',
'auditorium',
)
| true
| true
|
f709d9ab7f9abe834c26068d509b0d3e3b154b68
| 9,223
|
py
|
Python
|
tests/test_morse.py
|
amandalynnes/morse-code-bits
|
88d1f66404824c0d882c59811324996d4b2baf6b
|
[
"MIT"
] | null | null | null |
tests/test_morse.py
|
amandalynnes/morse-code-bits
|
88d1f66404824c0d882c59811324996d4b2baf6b
|
[
"MIT"
] | null | null | null |
tests/test_morse.py
|
amandalynnes/morse-code-bits
|
88d1f66404824c0d882c59811324996d4b2baf6b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# flake8: noqa
"""
Unit tests for Morse Code: Bits
Students should not modify this file.
"""
__author__ = 'madarp'
import sys
import unittest
import importlib
import subprocess
# suppress __pycache__ and .pyc files
sys.dont_write_bytecode = True
# Kenzie devs: change this to 'soln.morse' to test solution
PKG_NAME = 'morse'
# some handy morse strings
# HEY JUDE
morse_hey_jude = '.... . -.-- .--- ..- -.. .'
# THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG.
morse_quick_fox = '- .... . --.- ..- .. -.-. -.- -... .-. --- .-- -. ..-. --- -..- .--- ..- -- .--. ... --- ...- . .-. - .... . .-.. .- --.. -.-- -.. --- --. .-.-.-'
class TestDecodeMorse(unittest.TestCase):
"""Only tests the decode_morse() function"""
@classmethod
def setUpClass(cls):
"""Performs module import and suite setup at test-runtime"""
cls.assertGreaterEqual(cls, sys.version_info[0], 3)
cls.module = importlib.import_module(PKG_NAME)
def test_hey_jude(self):
"""Check basic HEY JUDE"""
actual = self.module.decode_morse(morse_hey_jude)
self.assertEqual(actual, 'HEY JUDE')
def test_basic_letters(self):
"""Check Basic Morse decoding"""
self.assertEqual(self.module.decode_morse('.-'), 'A')
self.assertEqual(self.module.decode_morse('.'), 'E')
self.assertEqual(self.module.decode_morse('..'), 'I')
self.assertEqual(self.module.decode_morse('. .'), 'EE')
self.assertEqual(self.module.decode_morse('. .'), 'E E')
self.assertEqual(self.module.decode_morse('...---...'), 'SOS')
self.assertEqual(self.module.decode_morse('... --- ...'), 'SOS')
self.assertEqual(self.module.decode_morse('... --- ...'), 'S O S')
def test_extra_spaces(self):
"""Check handling of spaces"""
self.assertEqual(self.module.decode_morse(' . '), 'E')
self.assertEqual(self.module.decode_morse(' . . '), 'E E')
def test_complex(self):
"""Check long message decoding"""
morse = ' ...---... -.-.-- - .... . --.- ..- .. -.-. -.- -... .-. --- .-- -. ..-. --- -..- .--- ..- -- .--. ... --- ...- . .-. - .... . ' ' .-.. .- --.. -.-- -.. --- --. .-.-.- '
actual = self.module.decode_morse(morse)
self.assertEqual(actual, 'SOS! THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG.')
def test_flake8(self):
"""Checking for PEP8/flake8 compliance"""
result = subprocess.run(['flake8', self.module.__file__])
self.assertEqual(result.returncode, 0)
def test_author_string(self):
"""Checking for __author__ string"""
self.assertNotEqual(self.module.__author__, '???')
class TestDecodeBits(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Performs module import and suite setup at test-runtime"""
cls.assertGreaterEqual(cls, sys.version_info[0], 3)
cls.module = importlib.import_module(PKG_NAME)
def test_et_phone_home(self):
"""Check if ET PHONE HOME can be transcoded to Morse"""
bits = '11000000111111000000000000001100111111001111110011000000110011001100110000001111110011111100111111000000111111001100000011000000000000001100110011001100000011111100111111001111110000001111110011111100000011'
morse = self.module.decode_bits(bits)
self.assertEqual(morse, '. - .--. .... --- -. . .... --- -- .')
def test_hey_jude_2x(self):
"""Check if HEY JUDE can be transcoded to Morse"""
bits = '1100110011001100000011000000111111001100111111001111110000000000000011001111110011111100111111000000110011001111110000001111110011001100000011'
morse = self.module.decode_bits(bits)
self.assertEqual(morse, morse_hey_jude)
def test_hey_jude_6x(self):
bits = '111111000000111111000000111111000000111111000000000000000000111111000000000000000000111111111111111111000000111111000000111111111111111111000000111111111111111111000000000000000000000000000000000000000000111111000000111111111111111111000000111111111111111111000000111111111111111111000000000000000000111111000000111111000000111111111111111111000000000000000000111111111111111111000000111111000000111111000000000000000000111111'
morse = self.module.decode_bits(bits)
self.assertEqual(morse, morse_hey_jude)
def test_basic_bits(self):
"""Check short letters transcoding to Morse"""
self.assertEqual(self.module.decode_bits('1'), '.') # E
self.assertEqual(self.module.decode_bits('101'), '..') # I
self.assertEqual(self.module.decode_bits('10001'), '. .') # E E
self.assertEqual(self.module.decode_bits('10111'), '.-') # A
self.assertEqual(self.module.decode_bits('1110111'), '--') # M
def test_multiple_bits_per_dot(self):
"""Multiple bits per dot handling"""
self.assertEqual(self.module.decode_bits('111'), '.') # E
self.assertEqual(self.module.decode_bits('1111111'), '.') # E
self.assertEqual(self.module.decode_bits('110011'), '..') # I
self.assertEqual(self.module.decode_bits('111000111'), '..') # I
self.assertEqual(self.module.decode_bits('111110000011111'), '..') # I
self.assertEqual(self.module.decode_bits('111000000000111'), '. .') # EE
self.assertEqual(self.module.decode_bits('11111100111111'), '--') # M
self.assertEqual(self.module.decode_bits('111000111000111'), '...') # S
def test_extra_zeroes(self):
"""Check handling of leading and trailing zeros"""
self.assertEqual(self.module.decode_bits('01110'), '.')
self.assertEqual(self.module.decode_bits('000000011100000'), '.')
def test_long_message_1x(self):
"""Check long message at 1x time unit"""
bits = (
'0001110001010101000100000001110111010111000101011100010100011101'
'0111010001110101110000000111010101000101110100011101110111000101'
'1101110001110100000001010111010001110111011100011101010111000000'
'01011101110111000101011100011101110001011101110100010101000000011'
'10111011100010101011100010001011101000000011100010101010001000000'
'01011101010001011100011101110101000111010111011100000001110101000'
'11101110111000111011101000101110101110101110'
)
actual = self.module.decode_bits(bits)
self.assertEqual(actual, morse_quick_fox)
def test_long_message_5x(self):
bits = (
'1111111111111110000000000000001111100000111110000011111000001111'
'1000000000000000111110000000000000000000000000000000000011111111'
'1111111000001111111111111110000011111000001111111111111110000000'
'0000000011111000001111100000111111111111111000000000000000111110'
'0000111110000000000000001111111111111110000011111000001111111111'
'1111100000111110000000000000001111111111111110000011111000001111'
'1111111111100000000000000000000000000000000000111111111111111000'
'00111110000011111000001111100000000000000011111000001111111111111'
'11000001111100000000000000011111111111111100000111111111111111000'
'00111111111111111000000000000000111110000011111111111111100000111'
'11111111111100000000000000011111111111111100000111110000000000000'
'00000000000000000000001111100000111110000011111111111111100000111'
'11000000000000000111111111111111000001111111111111110000011111111'
'111111100000000000000011111111111111100000111110000011111000001111'
'11111111111000000000000000000000000000000000001111100000111111111'
'11111100000111111111111111000001111111111111110000000000000001111'
'10000011111000001111111111111110000000000000001111111111111110000'
'011111111111111100000000000000011111000001111111111111110000011111'
'111111111100000111110000000000000001111100000111110000011111000000'
'000000000000000000000000000001111111111111110000011111111111111100'
'000111111111111111000000000000000111110000011111000001111100000111'
'111111111111000000000000000111110000000000000001111100000111111111'
'111111000001111100000000000000000000000000000000000111111111111111'
'0000000000000001111100000111110000011111000001111100000000000000011111000000000000000000000000000000000001111100000111111111111111000001111100000111110000000000000001111100000111111111111111000000000000000111111111111111000001111111111111110000011111000001111100000000000000011111111111111100000111110000011111111111111100000111111111111111000000000000000000000000000000000001111111111111110000011111000001111100000000000000011111111111111100000111111111111111000001111111111111110000000000000001111111111111110000011111111111111100000111110000000000000001111100000111111111111111000001111100000111111111111111000001111100000111111111111111'
)
actual = self.module.decode_bits(bits)
self.assertEqual(actual, morse_quick_fox)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 54.252941
| 654
| 0.70682
|
__author__ = 'madarp'
import sys
import unittest
import importlib
import subprocess
sys.dont_write_bytecode = True
PKG_NAME = 'morse'
morse_hey_jude = '.... . -.-- .--- ..- -.. .'
morse_quick_fox = '- .... . --.- ..- .. -.-. -.- -... .-. --- .-- -. ..-. --- -..- .--- ..- -- .--. ... --- ...- . .-. - .... . .-.. .- --.. -.-- -.. --- --. .-.-.-'
class TestDecodeMorse(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.assertGreaterEqual(cls, sys.version_info[0], 3)
cls.module = importlib.import_module(PKG_NAME)
def test_hey_jude(self):
actual = self.module.decode_morse(morse_hey_jude)
self.assertEqual(actual, 'HEY JUDE')
def test_basic_letters(self):
self.assertEqual(self.module.decode_morse('.-'), 'A')
self.assertEqual(self.module.decode_morse('.'), 'E')
self.assertEqual(self.module.decode_morse('..'), 'I')
self.assertEqual(self.module.decode_morse('. .'), 'EE')
self.assertEqual(self.module.decode_morse('. .'), 'E E')
self.assertEqual(self.module.decode_morse('...---...'), 'SOS')
self.assertEqual(self.module.decode_morse('... --- ...'), 'SOS')
self.assertEqual(self.module.decode_morse('... --- ...'), 'S O S')
def test_extra_spaces(self):
self.assertEqual(self.module.decode_morse(' . '), 'E')
self.assertEqual(self.module.decode_morse(' . . '), 'E E')
def test_complex(self):
morse = ' ...---... -.-.-- - .... . --.- ..- .. -.-. -.- -... .-. --- .-- -. ..-. --- -..- .--- ..- -- .--. ... --- ...- . .-. - .... . ' ' .-.. .- --.. -.-- -.. --- --. .-.-.- '
actual = self.module.decode_morse(morse)
self.assertEqual(actual, 'SOS! THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG.')
def test_flake8(self):
result = subprocess.run(['flake8', self.module.__file__])
self.assertEqual(result.returncode, 0)
def test_author_string(self):
self.assertNotEqual(self.module.__author__, '???')
class TestDecodeBits(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.assertGreaterEqual(cls, sys.version_info[0], 3)
cls.module = importlib.import_module(PKG_NAME)
def test_et_phone_home(self):
bits = '11000000111111000000000000001100111111001111110011000000110011001100110000001111110011111100111111000000111111001100000011000000000000001100110011001100000011111100111111001111110000001111110011111100000011'
morse = self.module.decode_bits(bits)
self.assertEqual(morse, '. - .--. .... --- -. . .... --- -- .')
def test_hey_jude_2x(self):
bits = '1100110011001100000011000000111111001100111111001111110000000000000011001111110011111100111111000000110011001111110000001111110011001100000011'
morse = self.module.decode_bits(bits)
self.assertEqual(morse, morse_hey_jude)
def test_hey_jude_6x(self):
bits = '111111000000111111000000111111000000111111000000000000000000111111000000000000000000111111111111111111000000111111000000111111111111111111000000111111111111111111000000000000000000000000000000000000000000111111000000111111111111111111000000111111111111111111000000111111111111111111000000000000000000111111000000111111000000111111111111111111000000000000000000111111111111111111000000111111000000111111000000000000000000111111'
morse = self.module.decode_bits(bits)
self.assertEqual(morse, morse_hey_jude)
def test_basic_bits(self):
self.assertEqual(self.module.decode_bits('1'), '.') self.assertEqual(self.module.decode_bits('101'), '..') self.assertEqual(self.module.decode_bits('10001'), '. .') self.assertEqual(self.module.decode_bits('10111'), '.-') self.assertEqual(self.module.decode_bits('1110111'), '--')
def test_multiple_bits_per_dot(self):
self.assertEqual(self.module.decode_bits('111'), '.') self.assertEqual(self.module.decode_bits('1111111'), '.') self.assertEqual(self.module.decode_bits('110011'), '..') self.assertEqual(self.module.decode_bits('111000111'), '..') self.assertEqual(self.module.decode_bits('111110000011111'), '..') self.assertEqual(self.module.decode_bits('111000000000111'), '. .') self.assertEqual(self.module.decode_bits('11111100111111'), '--') self.assertEqual(self.module.decode_bits('111000111000111'), '...')
def test_extra_zeroes(self):
self.assertEqual(self.module.decode_bits('01110'), '.')
self.assertEqual(self.module.decode_bits('000000011100000'), '.')
def test_long_message_1x(self):
bits = (
'0001110001010101000100000001110111010111000101011100010100011101'
'0111010001110101110000000111010101000101110100011101110111000101'
'1101110001110100000001010111010001110111011100011101010111000000'
'01011101110111000101011100011101110001011101110100010101000000011'
'10111011100010101011100010001011101000000011100010101010001000000'
'01011101010001011100011101110101000111010111011100000001110101000'
'11101110111000111011101000101110101110101110'
)
actual = self.module.decode_bits(bits)
self.assertEqual(actual, morse_quick_fox)
def test_long_message_5x(self):
bits = (
'1111111111111110000000000000001111100000111110000011111000001111'
'1000000000000000111110000000000000000000000000000000000011111111'
'1111111000001111111111111110000011111000001111111111111110000000'
'0000000011111000001111100000111111111111111000000000000000111110'
'0000111110000000000000001111111111111110000011111000001111111111'
'1111100000111110000000000000001111111111111110000011111000001111'
'1111111111100000000000000000000000000000000000111111111111111000'
'00111110000011111000001111100000000000000011111000001111111111111'
'11000001111100000000000000011111111111111100000111111111111111000'
'00111111111111111000000000000000111110000011111111111111100000111'
'11111111111100000000000000011111111111111100000111110000000000000'
'00000000000000000000001111100000111110000011111111111111100000111'
'11000000000000000111111111111111000001111111111111110000011111111'
'111111100000000000000011111111111111100000111110000011111000001111'
'11111111111000000000000000000000000000000000001111100000111111111'
'11111100000111111111111111000001111111111111110000000000000001111'
'10000011111000001111111111111110000000000000001111111111111110000'
'011111111111111100000000000000011111000001111111111111110000011111'
'111111111100000111110000000000000001111100000111110000011111000000'
'000000000000000000000000000001111111111111110000011111111111111100'
'000111111111111111000000000000000111110000011111000001111100000111'
'111111111111000000000000000111110000000000000001111100000111111111'
'111111000001111100000000000000000000000000000000000111111111111111'
'0000000000000001111100000111110000011111000001111100000000000000011111000000000000000000000000000000000001111100000111111111111111000001111100000111110000000000000001111100000111111111111111000000000000000111111111111111000001111111111111110000011111000001111100000000000000011111111111111100000111110000011111111111111100000111111111111111000000000000000000000000000000000001111111111111110000011111000001111100000000000000011111111111111100000111111111111111000001111111111111110000000000000001111111111111110000011111111111111100000111110000000000000001111100000111111111111111000001111100000111111111111111000001111100000111111111111111'
)
actual = self.module.decode_bits(bits)
self.assertEqual(actual, morse_quick_fox)
if __name__ == '__main__':
unittest.main(verbosity=2)
| true
| true
|
f709d9bb17cf6263074bdfc8c1b3ba2b28108867
| 9,762
|
py
|
Python
|
cloudkitty/dataframe.py
|
wanghuiict/cloudkitty
|
11ff713042eb0354f497f7051130630c46860735
|
[
"Apache-2.0"
] | 97
|
2015-10-18T02:53:17.000Z
|
2022-03-07T05:15:39.000Z
|
cloudkitty/dataframe.py
|
shanafang9/cloudkitty
|
911c90569ccb09ecf0d7aa11a5a707c8ebda09cf
|
[
"Apache-2.0"
] | 1
|
2022-01-20T16:35:22.000Z
|
2022-01-20T16:35:22.000Z
|
cloudkitty/dataframe.py
|
shanafang9/cloudkitty
|
911c90569ccb09ecf0d7aa11a5a707c8ebda09cf
|
[
"Apache-2.0"
] | 54
|
2015-10-27T10:55:02.000Z
|
2022-02-18T08:23:19.000Z
|
# Copyright 2019 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
import datetime
import decimal
import functools
import voluptuous
from werkzeug import datastructures
from cloudkitty.utils import json
from cloudkitty.utils import tz as tzutils
from cloudkitty.utils import validation as vutils
# NOTE(peschk_l): qty and price are converted to strings to avoid
# floating-point conversion issues:
# Decimal(0.121) == Decimal('0.12099999999999999644728632119')
# Decimal(str(0.121)) == Decimal('0.121')
DATAPOINT_SCHEMA = voluptuous.Schema({
voluptuous.Required('vol'): {
voluptuous.Required('unit'): vutils.get_string_type(),
voluptuous.Required('qty'): voluptuous.Coerce(str),
},
voluptuous.Required('rating', default={}): {
voluptuous.Required('price', default=0):
voluptuous.Coerce(str),
},
voluptuous.Required('groupby'): vutils.DictTypeValidator(str, str),
voluptuous.Required('metadata'): vutils.DictTypeValidator(str, str),
})
_DataPointBase = collections.namedtuple(
"DataPoint",
field_names=("unit", "qty", "price", "groupby", "metadata"))
class DataPoint(_DataPointBase):
def __new__(cls, unit, qty, price, groupby, metadata):
return _DataPointBase.__new__(
cls,
unit or "undefined",
# NOTE(peschk_l): avoids floating-point issues.
decimal.Decimal(str(qty) if isinstance(qty, float) else qty),
decimal.Decimal(str(price) if isinstance(price, float) else price),
datastructures.ImmutableDict(groupby),
datastructures.ImmutableDict(metadata),
)
def set_price(self, price):
"""Sets the price of the DataPoint and returns a new object."""
return self._replace(price=price)
def as_dict(self, legacy=False, mutable=False):
"""Returns a dict representation of the object.
The returned dict is immutable by default and has the
following format::
{
"vol": {
"unit": "GiB",
"qty": 1.2,
},
"rating": {
"price": 0.04,
},
"groupby": {
"group_one": "one",
"group_two": "two",
},
"metadata": {
"attr_one": "one",
"attr_two": "two",
},
}
The dict can also be returned in the legacy (v1 storage) format. In
that case, `groupby` and `metadata` will be removed and merged together
into the `desc` key.
:param legacy: Defaults to False. If True, returned dict is in legacy
format.
:type legacy: bool
:param mutable: Defaults to False. If True, returns a normal dict
instead of an ImmutableDict.
:type mutable: bool
"""
output = {
"vol": {
"unit": self.unit,
"qty": self.qty,
},
"rating": {
"price": self.price,
},
"groupby": dict(self.groupby) if mutable else self.groupby,
"metadata": dict(self.metadata) if mutable else self.metadata,
}
if legacy:
desc = output.pop("metadata")
desc.update(output.pop("groupby"))
output['desc'] = desc
return output if mutable else datastructures.ImmutableDict(output)
def json(self, legacy=False):
"""Returns a json representation of the dict returned by `as_dict`.
:param legacy: Defaults to False. If True, returned dict is in legacy
format.
:type legacy: bool
:rtype: str
"""
return json.dumps(self.as_dict(legacy=legacy, mutable=True))
@classmethod
def from_dict(cls, dict_, legacy=False):
"""Returns a new DataPoint instance build from a dict.
:param dict_: Dict to build the DataPoint from
:type dict_: dict
:param legacy: Set to true to convert the dict to a the new format
before validating it.
:rtype: DataPoint
"""
try:
if legacy:
dict_['groupby'] = dict_.pop('desc')
dict_['metadata'] = {}
valid = DATAPOINT_SCHEMA(dict_)
return cls(
unit=valid["vol"]["unit"],
qty=valid["vol"]["qty"],
price=valid["rating"]["price"],
groupby=valid["groupby"],
metadata=valid["metadata"],
)
except (voluptuous.Invalid, KeyError) as e:
raise ValueError("{} isn't a valid DataPoint: {}".format(dict_, e))
@property
def desc(self):
output = dict(self.metadata)
output.update(self.groupby)
return datastructures.ImmutableDict(output)
DATAFRAME_SCHEMA = voluptuous.Schema({
voluptuous.Required('period'): {
voluptuous.Required('begin'): voluptuous.Any(
datetime.datetime, voluptuous.Coerce(tzutils.dt_from_iso)),
voluptuous.Required('end'): voluptuous.Any(
datetime.datetime, voluptuous.Coerce(tzutils.dt_from_iso)),
},
voluptuous.Required('usage'): vutils.IterableValuesDict(
str, DataPoint.from_dict),
})
class DataFrame(object):
__slots__ = ("start", "end", "_usage")
def __init__(self, start, end, usage=None):
if not isinstance(start, datetime.datetime):
raise TypeError(
'"start" must be of type datetime.datetime, not {}'.format(
type(start)))
if not isinstance(end, datetime.datetime):
raise TypeError(
'"end" must be of type datetime.datetime, not {}'.format(
type(end)))
if usage is not None and not isinstance(usage, dict):
raise TypeError(
'"usage" must be a dict, not {}'.format(type(usage)))
self.start = start
self.end = end
self._usage = collections.OrderedDict()
if usage:
for key in sorted(usage.keys()):
self.add_points(usage[key], key)
def as_dict(self, legacy=False, mutable=False):
output = {
"period": {"begin": self.start, "end": self.end},
"usage": {
key: [v.as_dict(legacy=legacy, mutable=mutable) for v in val]
for key, val in self._usage.items()
},
}
return output if mutable else datastructures.ImmutableDict(output)
def json(self, legacy=False):
return json.dumps(self.as_dict(legacy=legacy, mutable=True))
@classmethod
def from_dict(cls, dict_, legacy=False):
try:
schema = DATAFRAME_SCHEMA
if legacy:
validator = functools.partial(DataPoint.from_dict, legacy=True)
# NOTE(peschk_l): __name__ is required for voluptuous exception
# message formatting
validator.__name__ = 'DataPoint.from_dict'
# NOTE(peschk_l): In case the legacy format is required, we
# create a new schema where DataPoint.from_dict is called with
# legacy=True. The "extend" method does create a new objects,
# and replaces existing keys with new ones.
schema = DATAFRAME_SCHEMA.extend({
voluptuous.Required('usage'): vutils.IterableValuesDict(
str, validator
),
})
valid = schema(dict_)
return cls(
valid["period"]["begin"],
valid["period"]["end"],
usage=valid["usage"])
except (voluptuous.error.Invalid, KeyError) as e:
raise ValueError("{} isn't a valid DataFrame: {}".format(dict_, e))
def add_points(self, points, type_):
"""Adds multiple points to the DataFrame
:param points: DataPoints to add.
:type point: list of DataPoints
"""
if type_ in self._usage:
self._usage[type_] += points
else:
self._usage[type_] = points
def add_point(self, point, type_):
"""Adds a single point to the DataFrame
:param point: DataPoint to add.
:type point: DataPoint
"""
if type_ in self._usage:
self._usage[type_].append(point)
else:
self._usage[type_] = [point]
def iterpoints(self):
"""Iterates over all datapoints of the dataframe.
Yields (type, point) tuples.
:rtype: (str, DataPoint)
"""
for type_, points in self._usage.items():
for point in points:
yield type_, point
def itertypes(self):
"""Iterates over all types of the dataframe.
Yields (type, (point, )) tuples.
:rtype: (str, (DataPoint, ))
"""
for type_, points in self._usage.items():
yield type_, points
def __repr__(self):
return 'DataFrame(metrics=[{}])'.format(','.join(self._usage.keys()))
| 34.864286
| 79
| 0.571809
|
import collections
import datetime
import decimal
import functools
import voluptuous
from werkzeug import datastructures
from cloudkitty.utils import json
from cloudkitty.utils import tz as tzutils
from cloudkitty.utils import validation as vutils
DATAPOINT_SCHEMA = voluptuous.Schema({
voluptuous.Required('vol'): {
voluptuous.Required('unit'): vutils.get_string_type(),
voluptuous.Required('qty'): voluptuous.Coerce(str),
},
voluptuous.Required('rating', default={}): {
voluptuous.Required('price', default=0):
voluptuous.Coerce(str),
},
voluptuous.Required('groupby'): vutils.DictTypeValidator(str, str),
voluptuous.Required('metadata'): vutils.DictTypeValidator(str, str),
})
_DataPointBase = collections.namedtuple(
"DataPoint",
field_names=("unit", "qty", "price", "groupby", "metadata"))
class DataPoint(_DataPointBase):
def __new__(cls, unit, qty, price, groupby, metadata):
return _DataPointBase.__new__(
cls,
unit or "undefined",
decimal.Decimal(str(qty) if isinstance(qty, float) else qty),
decimal.Decimal(str(price) if isinstance(price, float) else price),
datastructures.ImmutableDict(groupby),
datastructures.ImmutableDict(metadata),
)
def set_price(self, price):
return self._replace(price=price)
def as_dict(self, legacy=False, mutable=False):
output = {
"vol": {
"unit": self.unit,
"qty": self.qty,
},
"rating": {
"price": self.price,
},
"groupby": dict(self.groupby) if mutable else self.groupby,
"metadata": dict(self.metadata) if mutable else self.metadata,
}
if legacy:
desc = output.pop("metadata")
desc.update(output.pop("groupby"))
output['desc'] = desc
return output if mutable else datastructures.ImmutableDict(output)
def json(self, legacy=False):
return json.dumps(self.as_dict(legacy=legacy, mutable=True))
@classmethod
def from_dict(cls, dict_, legacy=False):
try:
if legacy:
dict_['groupby'] = dict_.pop('desc')
dict_['metadata'] = {}
valid = DATAPOINT_SCHEMA(dict_)
return cls(
unit=valid["vol"]["unit"],
qty=valid["vol"]["qty"],
price=valid["rating"]["price"],
groupby=valid["groupby"],
metadata=valid["metadata"],
)
except (voluptuous.Invalid, KeyError) as e:
raise ValueError("{} isn't a valid DataPoint: {}".format(dict_, e))
@property
def desc(self):
output = dict(self.metadata)
output.update(self.groupby)
return datastructures.ImmutableDict(output)
DATAFRAME_SCHEMA = voluptuous.Schema({
voluptuous.Required('period'): {
voluptuous.Required('begin'): voluptuous.Any(
datetime.datetime, voluptuous.Coerce(tzutils.dt_from_iso)),
voluptuous.Required('end'): voluptuous.Any(
datetime.datetime, voluptuous.Coerce(tzutils.dt_from_iso)),
},
voluptuous.Required('usage'): vutils.IterableValuesDict(
str, DataPoint.from_dict),
})
class DataFrame(object):
__slots__ = ("start", "end", "_usage")
def __init__(self, start, end, usage=None):
if not isinstance(start, datetime.datetime):
raise TypeError(
'"start" must be of type datetime.datetime, not {}'.format(
type(start)))
if not isinstance(end, datetime.datetime):
raise TypeError(
'"end" must be of type datetime.datetime, not {}'.format(
type(end)))
if usage is not None and not isinstance(usage, dict):
raise TypeError(
'"usage" must be a dict, not {}'.format(type(usage)))
self.start = start
self.end = end
self._usage = collections.OrderedDict()
if usage:
for key in sorted(usage.keys()):
self.add_points(usage[key], key)
def as_dict(self, legacy=False, mutable=False):
output = {
"period": {"begin": self.start, "end": self.end},
"usage": {
key: [v.as_dict(legacy=legacy, mutable=mutable) for v in val]
for key, val in self._usage.items()
},
}
return output if mutable else datastructures.ImmutableDict(output)
def json(self, legacy=False):
return json.dumps(self.as_dict(legacy=legacy, mutable=True))
@classmethod
def from_dict(cls, dict_, legacy=False):
try:
schema = DATAFRAME_SCHEMA
if legacy:
validator = functools.partial(DataPoint.from_dict, legacy=True)
# NOTE(peschk_l): __name__ is required for voluptuous exception
# message formatting
validator.__name__ = 'DataPoint.from_dict'
# NOTE(peschk_l): In case the legacy format is required, we
# create a new schema where DataPoint.from_dict is called with
# legacy=True. The "extend" method does create a new objects,
# and replaces existing keys with new ones.
schema = DATAFRAME_SCHEMA.extend({
voluptuous.Required('usage'): vutils.IterableValuesDict(
str, validator
),
})
valid = schema(dict_)
return cls(
valid["period"]["begin"],
valid["period"]["end"],
usage=valid["usage"])
except (voluptuous.error.Invalid, KeyError) as e:
raise ValueError("{} isn't a valid DataFrame: {}".format(dict_, e))
def add_points(self, points, type_):
if type_ in self._usage:
self._usage[type_] += points
else:
self._usage[type_] = points
def add_point(self, point, type_):
if type_ in self._usage:
self._usage[type_].append(point)
else:
self._usage[type_] = [point]
def iterpoints(self):
for type_, points in self._usage.items():
for point in points:
yield type_, point
def itertypes(self):
for type_, points in self._usage.items():
yield type_, points
def __repr__(self):
return 'DataFrame(metrics=[{}])'.format(','.join(self._usage.keys()))
| true
| true
|
f709da8f28d7264755b7b14dc5e8586d8b1e7424
| 2,523
|
py
|
Python
|
src/collective/solr/browser/controlpanel.py
|
IMIO/collective.solr
|
844219eb3968b34d2b83a7bd5f59340d676d149e
|
[
"ZPL-1.1"
] | null | null | null |
src/collective/solr/browser/controlpanel.py
|
IMIO/collective.solr
|
844219eb3968b34d2b83a7bd5f59340d676d149e
|
[
"ZPL-1.1"
] | null | null | null |
src/collective/solr/browser/controlpanel.py
|
IMIO/collective.solr
|
844219eb3968b34d2b83a7bd5f59340d676d149e
|
[
"ZPL-1.1"
] | null | null | null |
# -*- coding: utf-8 -*-
from plone.app.registry.browser import controlpanel
from plone.protect.interfaces import IDisableCSRFProtection
from collective.solr.interfaces import ISolrSchema, _
from plone.restapi.controlpanels import RegistryConfigletPanel
from Products.CMFPlone.utils import safe_unicode
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.PythonScripts.PythonScript import PythonScript
from zope.component import adapter
from zope.interface import alsoProvides
from zope.interface import Interface
@adapter(Interface, Interface)
class SolrControlpanelAdapter(RegistryConfigletPanel):
schema = ISolrSchema
configlet_id = "SolrSettings"
configlet_category_id = "Products"
schema_prefix = "collective.solr"
class SolrControlPanelForm(controlpanel.RegistryEditForm):
id = "SolrControlPanel"
label = _("label_solr_settings", default="Solr settings")
schema = ISolrSchema
schema_prefix = "collective.solr"
boost_script_id = "solr_boost_index_values"
def getContent(self):
content = super(SolrControlPanelForm, self).getContent()
portal = self.context
if self.boost_script_id in portal:
boost_script = safe_unicode(portal[self.boost_script_id].read())
# strip script metadata for display
content.boost_script = "\n".join(
[
line
for line in boost_script.splitlines()
if not line.startswith("##")
]
)
alsoProvides(self.request, IDisableCSRFProtection)
return content
def applyChanges(self, data):
changes = super(SolrControlPanelForm, self).applyChanges(data)
boost_script = data.get("boost_script", "")
if "##parameters=data\n" not in boost_script:
boost_script = "##parameters=data\n" + boost_script
portal = self.context
if self.boost_script_id not in self.context:
# "special" documents get boosted during indexing...
portal[self.boost_script_id] = PythonScript(self.boost_script_id)
# since we create a PythonScript in ZODB we need to
# disable CSRF protection
alsoProvides(self.request, IDisableCSRFProtection)
portal[self.boost_script_id].write(boost_script)
return changes
class SolrControlPanel(controlpanel.ControlPanelFormWrapper):
form = SolrControlPanelForm
index = ViewPageTemplateFile("controlpanel.pt")
| 37.656716
| 77
| 0.70432
|
from plone.app.registry.browser import controlpanel
from plone.protect.interfaces import IDisableCSRFProtection
from collective.solr.interfaces import ISolrSchema, _
from plone.restapi.controlpanels import RegistryConfigletPanel
from Products.CMFPlone.utils import safe_unicode
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.PythonScripts.PythonScript import PythonScript
from zope.component import adapter
from zope.interface import alsoProvides
from zope.interface import Interface
@adapter(Interface, Interface)
class SolrControlpanelAdapter(RegistryConfigletPanel):
schema = ISolrSchema
configlet_id = "SolrSettings"
configlet_category_id = "Products"
schema_prefix = "collective.solr"
class SolrControlPanelForm(controlpanel.RegistryEditForm):
id = "SolrControlPanel"
label = _("label_solr_settings", default="Solr settings")
schema = ISolrSchema
schema_prefix = "collective.solr"
boost_script_id = "solr_boost_index_values"
def getContent(self):
content = super(SolrControlPanelForm, self).getContent()
portal = self.context
if self.boost_script_id in portal:
boost_script = safe_unicode(portal[self.boost_script_id].read())
content.boost_script = "\n".join(
[
line
for line in boost_script.splitlines()
if not line.startswith("##")
]
)
alsoProvides(self.request, IDisableCSRFProtection)
return content
def applyChanges(self, data):
changes = super(SolrControlPanelForm, self).applyChanges(data)
boost_script = data.get("boost_script", "")
if "##parameters=data\n" not in boost_script:
boost_script = "##parameters=data\n" + boost_script
portal = self.context
if self.boost_script_id not in self.context:
portal[self.boost_script_id] = PythonScript(self.boost_script_id)
alsoProvides(self.request, IDisableCSRFProtection)
portal[self.boost_script_id].write(boost_script)
return changes
class SolrControlPanel(controlpanel.ControlPanelFormWrapper):
form = SolrControlPanelForm
index = ViewPageTemplateFile("controlpanel.pt")
| true
| true
|
f709dac114e4abf7ce428aa013816678149fbf10
| 19
|
py
|
Python
|
btd6_memory_info/generated/SteamNative/Platform/Linux32/linux32.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/SteamNative/Platform/Linux32/linux32.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/SteamNative/Platform/Linux32/linux32.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
class Linux32: pass
| 19
| 19
| 0.842105
|
class Linux32: pass
| true
| true
|
f709db49836f0a337d880632a4ab2b1e17e82610
| 1,896
|
py
|
Python
|
hooks/hook-pygame.py
|
tappi287/rf2_video_settings
|
6ae73c63f48e6d515a9efb653f236dea0494d9f1
|
[
"MIT"
] | 8
|
2020-12-09T17:34:40.000Z
|
2022-02-21T10:15:09.000Z
|
hooks/hook-pygame.py
|
tappi287/rf2_video_settings
|
6ae73c63f48e6d515a9efb653f236dea0494d9f1
|
[
"MIT"
] | 11
|
2021-02-27T00:21:47.000Z
|
2022-02-25T14:41:56.000Z
|
hooks/hook-pygame.py
|
tappi287/rf2_video_settings
|
6ae73c63f48e6d515a9efb653f236dea0494d9f1
|
[
"MIT"
] | 2
|
2021-06-28T21:11:53.000Z
|
2022-02-06T17:20:18.000Z
|
"""
binaries hook for pygame seems to be required for pygame 2.0 Windows.
Otherwise some essential DLLs will not be transfered to the exe.
And also put hooks for datas, resources that pygame uses, to work
correctly with pyinstaller
"""
import os
import platform
from pygame import __file__ as pygame_main_file
# Get pygame's folder
pygame_folder = os.path.dirname(os.path.abspath(pygame_main_file))
# datas is the variable that pyinstaller looks for while processing hooks
datas = []
# exclude some unneeded binaries
exclude_bin = ('libFLAC-8', 'libfreetype-6', 'libjpeg-9', 'libmodplug-1', 'libmpg123-0', 'libogg-0', 'libopus-0',
'libopusfile-0', 'libpng16-16', 'libtiff-5', 'libvorbis-0', 'libvorbisfile-3', 'libwebp-7', 'portmidi',
'SDL2_image', 'SDL2_mixer', 'SDL2_ttf')
# A helper to append the relative path of a resource to hook variable - datas
def _append_to_datas(file_path):
global datas
res_path = os.path.join(pygame_folder, file_path)
if os.path.exists(res_path):
datas.append((res_path, "pygame"))
# First append the font file, then based on the OS, append pygame icon file
_append_to_datas("freesansbold.ttf")
if platform.system() == "Darwin":
_append_to_datas("pygame_icon.tiff")
else:
_append_to_datas("pygame_icon.bmp")
if platform.system() == "Windows":
from PyInstaller.utils.hooks import collect_dynamic_libs
pre_binaries = collect_dynamic_libs('pygame')
binaries = []
for b in pre_binaries:
binary, location = b
filename = os.path.split(binary)[-1]
if filename.removesuffix('.dll') in exclude_bin:
print('Custom pygame hook excluding binary:', filename)
continue
# settles all the DLLs into the top level folder, which prevents duplication
# with the DLLs already being put there.
binaries.append((binary, "."))
| 33.857143
| 118
| 0.701477
|
import os
import platform
from pygame import __file__ as pygame_main_file
pygame_folder = os.path.dirname(os.path.abspath(pygame_main_file))
# datas is the variable that pyinstaller looks for while processing hooks
datas = []
# exclude some unneeded binaries
exclude_bin = ('libFLAC-8', 'libfreetype-6', 'libjpeg-9', 'libmodplug-1', 'libmpg123-0', 'libogg-0', 'libopus-0',
'libopusfile-0', 'libpng16-16', 'libtiff-5', 'libvorbis-0', 'libvorbisfile-3', 'libwebp-7', 'portmidi',
'SDL2_image', 'SDL2_mixer', 'SDL2_ttf')
# A helper to append the relative path of a resource to hook variable - datas
def _append_to_datas(file_path):
global datas
res_path = os.path.join(pygame_folder, file_path)
if os.path.exists(res_path):
datas.append((res_path, "pygame"))
# First append the font file, then based on the OS, append pygame icon file
_append_to_datas("freesansbold.ttf")
if platform.system() == "Darwin":
_append_to_datas("pygame_icon.tiff")
else:
_append_to_datas("pygame_icon.bmp")
if platform.system() == "Windows":
from PyInstaller.utils.hooks import collect_dynamic_libs
pre_binaries = collect_dynamic_libs('pygame')
binaries = []
for b in pre_binaries:
binary, location = b
filename = os.path.split(binary)[-1]
if filename.removesuffix('.dll') in exclude_bin:
print('Custom pygame hook excluding binary:', filename)
continue
# settles all the DLLs into the top level folder, which prevents duplication
# with the DLLs already being put there.
binaries.append((binary, "."))
| true
| true
|
f709dc15484ea8df0fa2f57cf6db9d359d0459e7
| 3,324
|
py
|
Python
|
covid/scraper/scraper/spiders/chem_archive.py
|
IanVermes/covid-timeseries
|
9ba0fe8bfb031b4cf39fe879e2ee2b4c57acfb6d
|
[
"MIT"
] | null | null | null |
covid/scraper/scraper/spiders/chem_archive.py
|
IanVermes/covid-timeseries
|
9ba0fe8bfb031b4cf39fe879e2ee2b4c57acfb6d
|
[
"MIT"
] | null | null | null |
covid/scraper/scraper/spiders/chem_archive.py
|
IanVermes/covid-timeseries
|
9ba0fe8bfb031b4cf39fe879e2ee2b4c57acfb6d
|
[
"MIT"
] | null | null | null |
import scrapy
import json
import datetime
POSTED_DATE_FORMAT = "%Y-%m-%d"
# BOOKMARK is cursor that tracks just how far back we should scrape each time
BOOKMARK = datetime.datetime(
year=2020, month=1, day=1
) # TODO factor bookmark into its own logic
class ChemRXIVSpider(scrapy.Spider):
name = "chemrxiv"
start_urls = [
"https://chemrxiv.org/api/institutions/259/items?types=&licenses=&orderBy=published_date&orderType=desc&limit=40&search=&categories=&itemTypes=articles"
]
id_prefix = "chemrxiv"
def parse(self, response):
# Chem archrive features an infinite scrolling site that makes a JSON request
# for 40 new items upon each scrolling event. The first request is without a
# cursor query. The first response returns 40 items + a cursor. Subsequent
# requests need this cursor.
json_data = json.loads(response.body_as_unicode())
cursor = self._extract_cursor(json_data)
article_stubs = self._extract_stubs(json_data)
dates = []
for stub in article_stubs:
data = self._process_stub(stub)
dates.append(self._get_publication_date(stub))
yield data
if dates:
oldest_date = min(dates)
else:
oldest_date = None
next_page = self._next_json_page(cursor)
if oldest_date is not None and self._is_page_new(oldest_date):
self.logger.info(f"Follow to next page: {next_page}")
yield response.follow(next_page, callback=self.parse)
else:
self.logger.info(
f"Do not follow to next page, bookmark reached: {BOOKMARK}"
)
def _extract_cursor(self, json_data):
return json_data["cursor"]
def _extract_stubs(self, json_data):
return json_data["items"]
def _process_stub(self, stub_data):
data = {
"title": self._get_article_title(stub_data),
"url": self._get_article_url(stub_data),
"posted": self._get_article_posted_date(stub_data),
"is_revision": self._get_revision_status(stub_data),
"id": self._get_article_id(stub_data),
}
return data
def _get_article_title(self, stub_data):
return stub_data["data"]["title"]
def _get_article_url(self, stub_data):
return stub_data["data"]["publicUrl"]
def _get_article_posted_date(self, stub_data):
date_string = stub_data["data"]["timeline"]["posted"]
date_string = date_string.strip("Z")
date_time = datetime.datetime.fromisoformat(date_string)
date = date_time.strftime(POSTED_DATE_FORMAT)
return date
def _get_revision_status(self, stub_data):
version = stub_data["data"]["version"]
return version > 1
def _get_article_id(self, stub_data):
return self.id_prefix + "_" + str(stub_data["data"]["id"])
def _get_publication_date(self, stub_data):
date_string = stub_data["data"]["publishedDate"]
date_string = date_string.strip("Z")
return datetime.datetime.fromisoformat(date_string)
def _is_page_new(self, date):
return date > BOOKMARK
def _next_json_page(self, cursor):
base = self.start_urls[0]
return base + f"&cursor={cursor}"
| 33.575758
| 160
| 0.651625
|
import scrapy
import json
import datetime
POSTED_DATE_FORMAT = "%Y-%m-%d"
BOOKMARK = datetime.datetime(
year=2020, month=1, day=1
)
class ChemRXIVSpider(scrapy.Spider):
name = "chemrxiv"
start_urls = [
"https://chemrxiv.org/api/institutions/259/items?types=&licenses=&orderBy=published_date&orderType=desc&limit=40&search=&categories=&itemTypes=articles"
]
id_prefix = "chemrxiv"
def parse(self, response):
json_data = json.loads(response.body_as_unicode())
cursor = self._extract_cursor(json_data)
article_stubs = self._extract_stubs(json_data)
dates = []
for stub in article_stubs:
data = self._process_stub(stub)
dates.append(self._get_publication_date(stub))
yield data
if dates:
oldest_date = min(dates)
else:
oldest_date = None
next_page = self._next_json_page(cursor)
if oldest_date is not None and self._is_page_new(oldest_date):
self.logger.info(f"Follow to next page: {next_page}")
yield response.follow(next_page, callback=self.parse)
else:
self.logger.info(
f"Do not follow to next page, bookmark reached: {BOOKMARK}"
)
def _extract_cursor(self, json_data):
return json_data["cursor"]
def _extract_stubs(self, json_data):
return json_data["items"]
def _process_stub(self, stub_data):
data = {
"title": self._get_article_title(stub_data),
"url": self._get_article_url(stub_data),
"posted": self._get_article_posted_date(stub_data),
"is_revision": self._get_revision_status(stub_data),
"id": self._get_article_id(stub_data),
}
return data
def _get_article_title(self, stub_data):
return stub_data["data"]["title"]
def _get_article_url(self, stub_data):
return stub_data["data"]["publicUrl"]
def _get_article_posted_date(self, stub_data):
date_string = stub_data["data"]["timeline"]["posted"]
date_string = date_string.strip("Z")
date_time = datetime.datetime.fromisoformat(date_string)
date = date_time.strftime(POSTED_DATE_FORMAT)
return date
def _get_revision_status(self, stub_data):
version = stub_data["data"]["version"]
return version > 1
def _get_article_id(self, stub_data):
return self.id_prefix + "_" + str(stub_data["data"]["id"])
def _get_publication_date(self, stub_data):
date_string = stub_data["data"]["publishedDate"]
date_string = date_string.strip("Z")
return datetime.datetime.fromisoformat(date_string)
def _is_page_new(self, date):
return date > BOOKMARK
def _next_json_page(self, cursor):
base = self.start_urls[0]
return base + f"&cursor={cursor}"
| true
| true
|
f709dd6cd4a7b3bebd5b0ef0464275cfb26ad043
| 407
|
py
|
Python
|
managair_server/asgi.py
|
ClairBerlin/managair
|
44af9f73039ecdb7dd959dacda5470a103795ac3
|
[
"BSD-3-Clause"
] | 4
|
2020-11-22T17:07:14.000Z
|
2020-11-26T07:53:08.000Z
|
managair_server/asgi.py
|
ClairBerlin/managair
|
44af9f73039ecdb7dd959dacda5470a103795ac3
|
[
"BSD-3-Clause"
] | 18
|
2020-12-04T07:48:13.000Z
|
2022-01-26T18:09:33.000Z
|
managair_server/asgi.py
|
ClairBerlin/managair
|
44af9f73039ecdb7dd959dacda5470a103795ac3
|
[
"BSD-3-Clause"
] | 1
|
2021-01-15T10:41:33.000Z
|
2021-01-15T10:41:33.000Z
|
"""
ASGI config for managair_server project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "managair_server.settings")
application = get_asgi_application()
| 23.941176
| 78
| 0.793612
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "managair_server.settings")
application = get_asgi_application()
| true
| true
|
f709ddc3dbd9c527b7a989ae3912f0fba65d1fdc
| 17,847
|
py
|
Python
|
selfdrive/thermald/thermald.py
|
doudoune144/openpilot-1
|
61b8df815ff47c9cf013b0a40fd86357629595d9
|
[
"MIT"
] | null | null | null |
selfdrive/thermald/thermald.py
|
doudoune144/openpilot-1
|
61b8df815ff47c9cf013b0a40fd86357629595d9
|
[
"MIT"
] | null | null | null |
selfdrive/thermald/thermald.py
|
doudoune144/openpilot-1
|
61b8df815ff47c9cf013b0a40fd86357629595d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import datetime
import os
import time
from pathlib import Path
from typing import Dict, Optional, Tuple
from collections import namedtuple, OrderedDict
import psutil
from smbus2 import SMBus
import cereal.messaging as messaging
from cereal import log
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import interp
from common.params import Params, ParamKeyType
from common.realtime import DT_TRML, sec_since_boot
from common.dict_helpers import strip_deprecated_keys
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.controls.lib.pid import PIDController
from selfdrive.hardware import EON, TICI, PC, HARDWARE
from selfdrive.loggerd.config import get_available_percent
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5 if TICI else 70.0
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
prebuiltfile = '/data/openpilot/prebuilt'
def read_tz(x):
if x is None:
return 0
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def setup_eon_fan():
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
last_eon_fan_val = None
def set_eon_fan(val):
global last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val - 1) << 6])
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
def handle_fan_eon(controller, max_cpu_temp, fan_speed, ignition):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
set_eon_fan(fan_speed // 16384)
return fan_speed
def handle_fan_uno(controller, max_cpu_temp, fan_speed, ignition):
new_speed = int(interp(max_cpu_temp, [40.0, 80.0], [0, 80]))
if not ignition:
new_speed = min(30, new_speed)
return new_speed
last_ignition = False
def handle_fan_tici(controller, max_cpu_temp, fan_speed, ignition):
global last_ignition
controller.neg_limit = -(80 if ignition else 30)
controller.pos_limit = -(30 if ignition else 0)
if ignition != last_ignition:
controller.reset()
target = 75
fan_pwr_out = -int(controller.update(setpoint=target,
measurement=max_cpu_temp,
feedforward=interp(target,[60, 100],[-80, 0])
))
fan_pwr_out = max(fan_pwr_out, 30) if ignition else min(fan_pwr_out, 30)
last_ignition = ignition
return fan_pwr_out
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def thermald_thread():
pm = messaging.PubMaster(['deviceState'])
pandaState_timeout = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
pandaState_sock = messaging.sub_sock('pandaStates', timeout=pandaState_timeout)
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "managerState"])
fan_speed = 0
count = 0
onroad_conditions = {
"ignition": False,
}
startup_conditions = {}
startup_conditions_prev = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
network_type = NetworkType.none
network_strength = NetworkStrength.unknown
network_info = None
modem_version = None
registered_count = 0
nvme_temps = None
modem_temps = None
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
pandaState_prev = None
should_start_prev = False
in_car = False
handle_fan = None
is_uno = False
ui_running_prev = False
params = Params()
power_monitor = PowerMonitoring()
no_panda_cnt = 0
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
# TODO: use PI controller for UNO
controller = PIDController(k_p=2, k_i=2e-3, k_f=1, neg_limit=-80, pos_limit=0, rate=(1 / DT_TRML))
# Leave flag for loggerd to indicate device was left onroad
if params.get_bool("IsOnroad"):
params.put_bool("BootedOnroad", True)
is_openpilot_dir = True
while True:
pandaStates = messaging.recv_sock(pandaState_sock, wait=True)
sm.update(0)
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if pandaStates is not None and len(pandaStates.pandaStates) > 0:
pandaState = pandaStates.pandaStates[0]
# If we lose connection to the panda, wait 5 seconds before going offroad
if pandaState.pandaType == log.PandaState.PandaType.unknown:
no_panda_cnt += 1
if no_panda_cnt > DISCONNECT_TIMEOUT / DT_TRML:
if onroad_conditions["ignition"]:
cloudlog.error("Lost panda connection while onroad")
onroad_conditions["ignition"] = False
else:
no_panda_cnt = 0
onroad_conditions["ignition"] = pandaState.ignitionLine or pandaState.ignitionCan
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if handle_fan is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
cloudlog.info("Setting up TICI fan handler")
handle_fan = handle_fan_tici
elif is_uno or PC:
cloudlog.info("Setting up UNO fan handler")
handle_fan = handle_fan_uno
else:
cloudlog.info("Setting up EON fan handler")
setup_eon_fan()
handle_fan = handle_fan_eon
# Handle disconnect
if pandaState_prev is not None:
if pandaState.pandaType == log.PandaState.PandaType.unknown and \
pandaState_prev.pandaType != log.PandaState.PandaType.unknown:
params.clear_all(ParamKeyType.CLEAR_ON_PANDA_DISCONNECT)
pandaState_prev = pandaState
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
network_strength = HARDWARE.get_network_strength(network_type)
network_info = HARDWARE.get_network_info() # pylint: disable=assignment-from-none
nvme_temps = HARDWARE.get_nvme_temperatures()
modem_temps = HARDWARE.get_modem_temperatures()
# Log modem version once
if modem_version is None:
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
if modem_version is not None:
cloudlog.warning(f"Modem version: {modem_version}")
if TICI and (network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
except Exception:
cloudlog.exception("Error getting network status")
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = network_type
msg.deviceState.networkStrength = network_strength
if network_info is not None:
msg.deviceState.networkInfo = network_info
if nvme_temps is not None:
msg.deviceState.nvmeTempC = nvme_temps
if modem_temps is not None:
msg.deviceState.modemTempC = modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if handle_fan is not None:
fan_speed = handle_fan(controller, max_comp_temp, fan_speed, onroad_conditions["ignition"])
msg.deviceState.fanSpeedPercentDesired = fan_speed
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = (now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
if TICI:
set_offroad_alert_if_changed("Offroad_StorageMissing", (not Path("/data/media").is_mount()))
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
HARDWARE.set_power_save(not should_start)
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
prebuilt_on = params.get_bool("PrebuiltOn")
if not os.path.isdir("/data/openpilot"):
if is_openpilot_dir:
os.system("cd /data/params/d; rm -f DongleId") # Delete DongleID if the Openpilot directory disappears, Seems you want to switch fork/branch.
is_openpilot_dir = False
elif not os.path.isfile(prebuiltfile) and prebuilt_on and is_openpilot_dir:
os.system("cd /data/openpilot; touch prebuilt")
elif os.path.isfile(prebuiltfile) and not prebuilt_on:
os.system("cd /data/openpilot; rm -f prebuilt")
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
msg.deviceState.powerDrawW = current_power_draw if current_power_draw is not None else 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.info(f"shutting device down, offroad since {off_ts}")
# TODO: add function for blocking cloudlog instead of sleep
time.sleep(10)
HARDWARE.shutdown()
# If UI has crashed, set the brightness to reasonable non-zero value
ui_running = "ui" in (p.name for p in sm["managerState"].processes if p.running)
if ui_running_prev and not ui_running:
HARDWARE.set_screen_brightness(20)
ui_running_prev = ui_running
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=(strip_deprecated_keys(pandaStates.to_dict()) if pandaStates else None),
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
thermald_thread()
if __name__ == "__main__":
main()
| 39.74833
| 159
| 0.724884
|
import datetime
import os
import time
from pathlib import Path
from typing import Dict, Optional, Tuple
from collections import namedtuple, OrderedDict
import psutil
from smbus2 import SMBus
import cereal.messaging as messaging
from cereal import log
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import interp
from common.params import Params, ParamKeyType
from common.realtime import DT_TRML, sec_since_boot
from common.dict_helpers import strip_deprecated_keys
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.controls.lib.pid import PIDController
from selfdrive.hardware import EON, TICI, PC, HARDWARE
from selfdrive.loggerd.config import get_available_percent
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. TEMP_TAU = 5. DISCONNECT_TIMEOUT = 5.
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5 if TICI else 70.0
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
prebuiltfile = '/data/openpilot/prebuilt'
def read_tz(x):
if x is None:
return 0
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def setup_eon_fan():
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
last_eon_fan_val = None
def set_eon_fan(val):
global last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val - 1) << 6])
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
def handle_fan_eon(controller, max_cpu_temp, fan_speed, ignition):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
set_eon_fan(fan_speed // 16384)
return fan_speed
def handle_fan_uno(controller, max_cpu_temp, fan_speed, ignition):
new_speed = int(interp(max_cpu_temp, [40.0, 80.0], [0, 80]))
if not ignition:
new_speed = min(30, new_speed)
return new_speed
last_ignition = False
def handle_fan_tici(controller, max_cpu_temp, fan_speed, ignition):
global last_ignition
controller.neg_limit = -(80 if ignition else 30)
controller.pos_limit = -(30 if ignition else 0)
if ignition != last_ignition:
controller.reset()
target = 75
fan_pwr_out = -int(controller.update(setpoint=target,
measurement=max_cpu_temp,
feedforward=interp(target,[60, 100],[-80, 0])
))
fan_pwr_out = max(fan_pwr_out, 30) if ignition else min(fan_pwr_out, 30)
last_ignition = ignition
return fan_pwr_out
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def thermald_thread():
pm = messaging.PubMaster(['deviceState'])
pandaState_timeout = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
pandaState_sock = messaging.sub_sock('pandaStates', timeout=pandaState_timeout)
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "managerState"])
fan_speed = 0
count = 0
onroad_conditions = {
"ignition": False,
}
startup_conditions = {}
startup_conditions_prev = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
network_type = NetworkType.none
network_strength = NetworkStrength.unknown
network_info = None
modem_version = None
registered_count = 0
nvme_temps = None
modem_temps = None
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
pandaState_prev = None
should_start_prev = False
in_car = False
handle_fan = None
is_uno = False
ui_running_prev = False
params = Params()
power_monitor = PowerMonitoring()
no_panda_cnt = 0
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
# TODO: use PI controller for UNO
controller = PIDController(k_p=2, k_i=2e-3, k_f=1, neg_limit=-80, pos_limit=0, rate=(1 / DT_TRML))
# Leave flag for loggerd to indicate device was left onroad
if params.get_bool("IsOnroad"):
params.put_bool("BootedOnroad", True)
is_openpilot_dir = True
while True:
pandaStates = messaging.recv_sock(pandaState_sock, wait=True)
sm.update(0)
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if pandaStates is not None and len(pandaStates.pandaStates) > 0:
pandaState = pandaStates.pandaStates[0]
# If we lose connection to the panda, wait 5 seconds before going offroad
if pandaState.pandaType == log.PandaState.PandaType.unknown:
no_panda_cnt += 1
if no_panda_cnt > DISCONNECT_TIMEOUT / DT_TRML:
if onroad_conditions["ignition"]:
cloudlog.error("Lost panda connection while onroad")
onroad_conditions["ignition"] = False
else:
no_panda_cnt = 0
onroad_conditions["ignition"] = pandaState.ignitionLine or pandaState.ignitionCan
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if handle_fan is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
cloudlog.info("Setting up TICI fan handler")
handle_fan = handle_fan_tici
elif is_uno or PC:
cloudlog.info("Setting up UNO fan handler")
handle_fan = handle_fan_uno
else:
cloudlog.info("Setting up EON fan handler")
setup_eon_fan()
handle_fan = handle_fan_eon
# Handle disconnect
if pandaState_prev is not None:
if pandaState.pandaType == log.PandaState.PandaType.unknown and \
pandaState_prev.pandaType != log.PandaState.PandaType.unknown:
params.clear_all(ParamKeyType.CLEAR_ON_PANDA_DISCONNECT)
pandaState_prev = pandaState
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
network_strength = HARDWARE.get_network_strength(network_type)
network_info = HARDWARE.get_network_info() # pylint: disable=assignment-from-none
nvme_temps = HARDWARE.get_nvme_temperatures()
modem_temps = HARDWARE.get_modem_temperatures()
# Log modem version once
if modem_version is None:
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
if modem_version is not None:
cloudlog.warning(f"Modem version: {modem_version}")
if TICI and (network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
except Exception:
cloudlog.exception("Error getting network status")
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = network_type
msg.deviceState.networkStrength = network_strength
if network_info is not None:
msg.deviceState.networkInfo = network_info
if nvme_temps is not None:
msg.deviceState.nvmeTempC = nvme_temps
if modem_temps is not None:
msg.deviceState.modemTempC = modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if handle_fan is not None:
fan_speed = handle_fan(controller, max_comp_temp, fan_speed, onroad_conditions["ignition"])
msg.deviceState.fanSpeedPercentDesired = fan_speed
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = (now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
if TICI:
set_offroad_alert_if_changed("Offroad_StorageMissing", (not Path("/data/media").is_mount()))
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
HARDWARE.set_power_save(not should_start)
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
prebuilt_on = params.get_bool("PrebuiltOn")
if not os.path.isdir("/data/openpilot"):
if is_openpilot_dir:
os.system("cd /data/params/d; rm -f DongleId") # Delete DongleID if the Openpilot directory disappears, Seems you want to switch fork/branch.
is_openpilot_dir = False
elif not os.path.isfile(prebuiltfile) and prebuilt_on and is_openpilot_dir:
os.system("cd /data/openpilot; touch prebuilt")
elif os.path.isfile(prebuiltfile) and not prebuilt_on:
os.system("cd /data/openpilot; rm -f prebuilt")
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
msg.deviceState.powerDrawW = current_power_draw if current_power_draw is not None else 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.info(f"shutting device down, offroad since {off_ts}")
# TODO: add function for blocking cloudlog instead of sleep
time.sleep(10)
HARDWARE.shutdown()
# If UI has crashed, set the brightness to reasonable non-zero value
ui_running = "ui" in (p.name for p in sm["managerState"].processes if p.running)
if ui_running_prev and not ui_running:
HARDWARE.set_screen_brightness(20)
ui_running_prev = ui_running
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=(strip_deprecated_keys(pandaStates.to_dict()) if pandaStates else None),
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
thermald_thread()
if __name__ == "__main__":
main()
| true
| true
|
f709dee36890489202122bfddea93ac90a757167
| 788
|
py
|
Python
|
utils.py
|
zhaohengyin/irgail_example
|
89f7661b5ab08bdf79686eaf8933ad7b5badced4
|
[
"MIT"
] | 1
|
2021-11-01T16:39:38.000Z
|
2021-11-01T16:39:38.000Z
|
utils.py
|
zhaohengyin/irgail_example
|
89f7661b5ab08bdf79686eaf8933ad7b5badced4
|
[
"MIT"
] | null | null | null |
utils.py
|
zhaohengyin/irgail_example
|
89f7661b5ab08bdf79686eaf8933ad7b5badced4
|
[
"MIT"
] | 1
|
2022-03-31T08:41:24.000Z
|
2022-03-31T08:41:24.000Z
|
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
def build_mlp(input_dim, output_dim, hidden_units=[64, 64],
hidden_activation=nn.Tanh(), output_activation=None):
layers = []
units = input_dim
for next_units in hidden_units:
layers.append(nn.Linear(units, next_units))
layers.append(hidden_activation)
units = next_units
layers.append(nn.Linear(units, output_dim))
if output_activation is not None:
layers.append(output_activation)
return nn.Sequential(*layers)
def dict_concat(x):
return torch.cat([value for key, value in x.items()], dim=0)
def dict_config_concat(x):
return torch.cat([torch.cat((value, key.repeat(value.size(0),1)), dim=1) for key, value in x.items()], dim=0)
| 32.833333
| 113
| 0.694162
|
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
def build_mlp(input_dim, output_dim, hidden_units=[64, 64],
hidden_activation=nn.Tanh(), output_activation=None):
layers = []
units = input_dim
for next_units in hidden_units:
layers.append(nn.Linear(units, next_units))
layers.append(hidden_activation)
units = next_units
layers.append(nn.Linear(units, output_dim))
if output_activation is not None:
layers.append(output_activation)
return nn.Sequential(*layers)
def dict_concat(x):
return torch.cat([value for key, value in x.items()], dim=0)
def dict_config_concat(x):
return torch.cat([torch.cat((value, key.repeat(value.size(0),1)), dim=1) for key, value in x.items()], dim=0)
| true
| true
|
f709dee74479c981102c7a864b5177e2cac5f198
| 3,167
|
py
|
Python
|
aiida_sirius/calculations/md.py
|
simonpintarelli/aiida-sirius
|
5dc968cc4a98a5d0b018f54c4c7023b2a2682795
|
[
"MIT"
] | null | null | null |
aiida_sirius/calculations/md.py
|
simonpintarelli/aiida-sirius
|
5dc968cc4a98a5d0b018f54c4c7023b2a2682795
|
[
"MIT"
] | null | null | null |
aiida_sirius/calculations/md.py
|
simonpintarelli/aiida-sirius
|
5dc968cc4a98a5d0b018f54c4c7023b2a2682795
|
[
"MIT"
] | null | null | null |
from .scf_base import SiriusBaseCalculation, make_sirius_json
from aiida.plugins import DataFactory
from aiida.common import datastructures
import tempfile
import json
import yaml
import six
SiriusMDParameters = DataFactory('sirius.md')
SinglefileData = DataFactory('singlefile')
ArrayData = DataFactory('array')
List = DataFactory('list')
class SiriusMDCalculation(SiriusBaseCalculation):
@classmethod
def define(cls, spec):
super(SiriusMDCalculation, cls).define(spec)
spec.input('sirius_md_params', valid_type=SiriusMDParameters, help='MD Parameters')
spec.input('metadata.options.parser_name', valid_type=six.string_types, default='sirius.md')
spec.input('metadata.options.output_filename', valid_type=six.string_types, default='sirius.md.out')
spec.output('md', valid_type=SinglefileData)
spec.output('md_results', valid_type=List)
def prepare_for_submission(self, folder):
"""
Create input files.
sirius.json,
input.yml
:param folder: an `aiida.common.folders.Folder` where the plugin should temporarily place all files needed by
the calculation.
:return: `aiida.common.datastructures.CalcInfo` instance
"""
codeinfo = datastructures.CodeInfo()
output_filename = self.metadata.options.output_filename
codeinfo.cmdline_params = ['--input=input.yml']
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.stdout_name = self.metadata.options.output_filename
codeinfo.withmpi = self.inputs.metadata.options.withmpi
# with config from input
structure = self.inputs.structure
kpoints = self.inputs.kpoints
magnetization = self.inputs.magnetization
# sirius_json = make_sirius_json(self.inputs.sirius_config.get_dict()['parameters'],
sirius_json = self.inputs.sirius_config.get_dict()
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as sirius_tmpfile:
# insert Pseudopotentials directly into json
sirius_json = self._read_pseudos(sirius_json)
# dump to file
json.dump(sirius_json, sirius_tmpfile)
sirius_config = SinglefileData(file=sirius_tmpfile.name)
sirius_config.store()
# prepare YAML input for NLCG
with tempfile.NamedTemporaryFile(mode='w', suffix='.yml', delete=False) as sirius_md_yaml:
out = yaml.dump({'parameters': self.inputs.sirius_md_params.get_dict()})
md_tmpfile_name = sirius_md_yaml.name
sirius_md_yaml.write(out)
sirius_md_config = SinglefileData(file=md_tmpfile_name)
sirius_md_config.store()
# Prepare a `CalcInfo` to be returned to the engine
calcinfo = datastructures.CalcInfo()
calcinfo.codes_info = [codeinfo]
calcinfo.local_copy_list = [
(sirius_config.uuid, sirius_config.filename, 'sirius.json'),
(sirius_md_config.uuid, sirius_md_config.filename, 'input.yml')
]
calcinfo.retrieve_list = [self.metadata.options.output_filename, 'md_results.json']
return calcinfo
| 43.383562
| 117
| 0.696558
|
from .scf_base import SiriusBaseCalculation, make_sirius_json
from aiida.plugins import DataFactory
from aiida.common import datastructures
import tempfile
import json
import yaml
import six
SiriusMDParameters = DataFactory('sirius.md')
SinglefileData = DataFactory('singlefile')
ArrayData = DataFactory('array')
List = DataFactory('list')
class SiriusMDCalculation(SiriusBaseCalculation):
@classmethod
def define(cls, spec):
super(SiriusMDCalculation, cls).define(spec)
spec.input('sirius_md_params', valid_type=SiriusMDParameters, help='MD Parameters')
spec.input('metadata.options.parser_name', valid_type=six.string_types, default='sirius.md')
spec.input('metadata.options.output_filename', valid_type=six.string_types, default='sirius.md.out')
spec.output('md', valid_type=SinglefileData)
spec.output('md_results', valid_type=List)
def prepare_for_submission(self, folder):
codeinfo = datastructures.CodeInfo()
output_filename = self.metadata.options.output_filename
codeinfo.cmdline_params = ['--input=input.yml']
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.stdout_name = self.metadata.options.output_filename
codeinfo.withmpi = self.inputs.metadata.options.withmpi
structure = self.inputs.structure
kpoints = self.inputs.kpoints
magnetization = self.inputs.magnetization
sirius_json = self.inputs.sirius_config.get_dict()
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as sirius_tmpfile:
sirius_json = self._read_pseudos(sirius_json)
json.dump(sirius_json, sirius_tmpfile)
sirius_config = SinglefileData(file=sirius_tmpfile.name)
sirius_config.store()
with tempfile.NamedTemporaryFile(mode='w', suffix='.yml', delete=False) as sirius_md_yaml:
out = yaml.dump({'parameters': self.inputs.sirius_md_params.get_dict()})
md_tmpfile_name = sirius_md_yaml.name
sirius_md_yaml.write(out)
sirius_md_config = SinglefileData(file=md_tmpfile_name)
sirius_md_config.store()
calcinfo = datastructures.CalcInfo()
calcinfo.codes_info = [codeinfo]
calcinfo.local_copy_list = [
(sirius_config.uuid, sirius_config.filename, 'sirius.json'),
(sirius_md_config.uuid, sirius_md_config.filename, 'input.yml')
]
calcinfo.retrieve_list = [self.metadata.options.output_filename, 'md_results.json']
return calcinfo
| true
| true
|
f709e0169cf531779c077a3169173978a2d4127b
| 14,174
|
py
|
Python
|
homeassistant/components/sensor/netatmo.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 3
|
2019-01-31T13:41:37.000Z
|
2020-05-20T14:22:18.000Z
|
homeassistant/components/sensor/netatmo.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:32:11.000Z
|
2022-01-13T01:19:23.000Z
|
homeassistant/components/sensor/netatmo.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 1
|
2021-05-31T08:13:56.000Z
|
2021-05-31T08:13:56.000Z
|
"""
Support for the NetAtmo Weather Service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.netatmo/
"""
import logging
from time import time
import threading
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
TEMP_CELSIUS, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE,
STATE_UNKNOWN)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_MODULES = 'modules'
CONF_STATION = 'station'
DEPENDENCIES = ['netatmo']
# This is the NetAtmo data upload interval in seconds
NETATMO_UPDATE_INTERVAL = 600
SENSOR_TYPES = {
'temperature': ['Temperature', TEMP_CELSIUS, None,
DEVICE_CLASS_TEMPERATURE],
'co2': ['CO2', 'ppm', 'mdi:cloud', None],
'pressure': ['Pressure', 'mbar', 'mdi:gauge', None],
'noise': ['Noise', 'dB', 'mdi:volume-high', None],
'humidity': ['Humidity', '%', None, DEVICE_CLASS_HUMIDITY],
'rain': ['Rain', 'mm', 'mdi:weather-rainy', None],
'sum_rain_1': ['sum_rain_1', 'mm', 'mdi:weather-rainy', None],
'sum_rain_24': ['sum_rain_24', 'mm', 'mdi:weather-rainy', None],
'battery_vp': ['Battery', '', 'mdi:battery', None],
'battery_lvl': ['Battery_lvl', '', 'mdi:battery', None],
'min_temp': ['Min Temp.', TEMP_CELSIUS, 'mdi:thermometer', None],
'max_temp': ['Max Temp.', TEMP_CELSIUS, 'mdi:thermometer', None],
'windangle': ['Angle', '', 'mdi:compass', None],
'windangle_value': ['Angle Value', 'º', 'mdi:compass', None],
'windstrength': ['Strength', 'km/h', 'mdi:weather-windy', None],
'gustangle': ['Gust Angle', '', 'mdi:compass', None],
'gustangle_value': ['Gust Angle Value', 'º', 'mdi:compass', None],
'guststrength': ['Gust Strength', 'km/h', 'mdi:weather-windy', None],
'rf_status': ['Radio', '', 'mdi:signal', None],
'rf_status_lvl': ['Radio_lvl', '', 'mdi:signal', None],
'wifi_status': ['Wifi', '', 'mdi:wifi', None],
'wifi_status_lvl': ['Wifi_lvl', 'dBm', 'mdi:wifi', None],
}
MODULE_SCHEMA = vol.Schema({
vol.Required(cv.string):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_STATION): cv.string,
vol.Optional(CONF_MODULES): MODULE_SCHEMA,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Netatmo weather sensors."""
netatmo = hass.components.netatmo
data = NetAtmoData(netatmo.NETATMO_AUTH, config.get(CONF_STATION, None))
dev = []
import pyatmo
try:
if CONF_MODULES in config:
# Iterate each module
for module_name, monitored_conditions in\
config[CONF_MODULES].items():
# Test if module exists
if module_name not in data.get_module_names():
_LOGGER.error('Module name: "%s" not found', module_name)
continue
# Only create sensors for monitored properties
for variable in monitored_conditions:
dev.append(NetAtmoSensor(data, module_name, variable))
else:
for module_name in data.get_module_names():
for variable in\
data.station_data.monitoredConditions(module_name):
if variable in SENSOR_TYPES.keys():
dev.append(NetAtmoSensor(data, module_name, variable))
else:
_LOGGER.warning("Ignoring unknown var %s for mod %s",
variable, module_name)
except pyatmo.NoDevice:
return None
add_entities(dev, True)
class NetAtmoSensor(Entity):
"""Implementation of a Netatmo sensor."""
def __init__(self, netatmo_data, module_name, sensor_type):
"""Initialize the sensor."""
self._name = 'Netatmo {} {}'.format(module_name,
SENSOR_TYPES[sensor_type][0])
self.netatmo_data = netatmo_data
self.module_name = module_name
self.type = sensor_type
self._state = None
self._device_class = SENSOR_TYPES[self.type][3]
self._icon = SENSOR_TYPES[self.type][2]
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
module_id = self.netatmo_data.\
station_data.moduleByName(module=module_name)['_id']
self.module_id = module_id[1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from NetAtmo API and updates the states."""
self.netatmo_data.update()
data = self.netatmo_data.data.get(self.module_name)
if data is None:
_LOGGER.warning("No data found for %s", self.module_name)
self._state = STATE_UNKNOWN
return
if self.type == 'temperature':
self._state = round(data['Temperature'], 1)
elif self.type == 'humidity':
self._state = data['Humidity']
elif self.type == 'rain':
self._state = data['Rain']
elif self.type == 'sum_rain_1':
self._state = data['sum_rain_1']
elif self.type == 'sum_rain_24':
self._state = data['sum_rain_24']
elif self.type == 'noise':
self._state = data['Noise']
elif self.type == 'co2':
self._state = data['CO2']
elif self.type == 'pressure':
self._state = round(data['Pressure'], 1)
elif self.type == 'battery_lvl':
self._state = data['battery_vp']
elif self.type == 'battery_vp' and self.module_id == '6':
if data['battery_vp'] >= 5590:
self._state = "Full"
elif data['battery_vp'] >= 5180:
self._state = "High"
elif data['battery_vp'] >= 4770:
self._state = "Medium"
elif data['battery_vp'] >= 4360:
self._state = "Low"
elif data['battery_vp'] < 4360:
self._state = "Very Low"
elif self.type == 'battery_vp' and self.module_id == '5':
if data['battery_vp'] >= 5500:
self._state = "Full"
elif data['battery_vp'] >= 5000:
self._state = "High"
elif data['battery_vp'] >= 4500:
self._state = "Medium"
elif data['battery_vp'] >= 4000:
self._state = "Low"
elif data['battery_vp'] < 4000:
self._state = "Very Low"
elif self.type == 'battery_vp' and self.module_id == '3':
if data['battery_vp'] >= 5640:
self._state = "Full"
elif data['battery_vp'] >= 5280:
self._state = "High"
elif data['battery_vp'] >= 4920:
self._state = "Medium"
elif data['battery_vp'] >= 4560:
self._state = "Low"
elif data['battery_vp'] < 4560:
self._state = "Very Low"
elif self.type == 'battery_vp' and self.module_id == '2':
if data['battery_vp'] >= 5500:
self._state = "Full"
elif data['battery_vp'] >= 5000:
self._state = "High"
elif data['battery_vp'] >= 4500:
self._state = "Medium"
elif data['battery_vp'] >= 4000:
self._state = "Low"
elif data['battery_vp'] < 4000:
self._state = "Very Low"
elif self.type == 'min_temp':
self._state = data['min_temp']
elif self.type == 'max_temp':
self._state = data['max_temp']
elif self.type == 'windangle_value':
self._state = data['WindAngle']
elif self.type == 'windangle':
if data['WindAngle'] >= 330:
self._state = "N (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 300:
self._state = "NW (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 240:
self._state = "W (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 210:
self._state = "SW (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 150:
self._state = "S (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 120:
self._state = "SE (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 60:
self._state = "E (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 30:
self._state = "NE (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 0:
self._state = "N (%d\xb0)" % data['WindAngle']
elif self.type == 'windstrength':
self._state = data['WindStrength']
elif self.type == 'gustangle_value':
self._state = data['GustAngle']
elif self.type == 'gustangle':
if data['GustAngle'] >= 330:
self._state = "N (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 300:
self._state = "NW (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 240:
self._state = "W (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 210:
self._state = "SW (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 150:
self._state = "S (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 120:
self._state = "SE (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 60:
self._state = "E (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 30:
self._state = "NE (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 0:
self._state = "N (%d\xb0)" % data['GustAngle']
elif self.type == 'guststrength':
self._state = data['GustStrength']
elif self.type == 'rf_status_lvl':
self._state = data['rf_status']
elif self.type == 'rf_status':
if data['rf_status'] >= 90:
self._state = "Low"
elif data['rf_status'] >= 76:
self._state = "Medium"
elif data['rf_status'] >= 60:
self._state = "High"
elif data['rf_status'] <= 59:
self._state = "Full"
elif self.type == 'wifi_status_lvl':
self._state = data['wifi_status']
elif self.type == 'wifi_status':
if data['wifi_status'] >= 86:
self._state = "Low"
elif data['wifi_status'] >= 71:
self._state = "Medium"
elif data['wifi_status'] >= 56:
self._state = "High"
elif data['wifi_status'] <= 55:
self._state = "Full"
class NetAtmoData:
"""Get the latest data from NetAtmo."""
def __init__(self, auth, station):
"""Initialize the data object."""
self.auth = auth
self.data = None
self.station_data = None
self.station = station
self._next_update = time()
self._update_in_progress = threading.Lock()
def get_module_names(self):
"""Return all module available on the API as a list."""
self.update()
return self.data.keys()
def update(self):
"""Call the Netatmo API to update the data.
This method is not throttled by the builtin Throttle decorator
but with a custom logic, which takes into account the time
of the last update from the cloud.
"""
if time() < self._next_update or \
not self._update_in_progress.acquire(False):
return
try:
import pyatmo
try:
self.station_data = pyatmo.WeatherStationData(self.auth)
except TypeError:
_LOGGER.error("Failed to connect to NetAtmo")
return # finally statement will be executed
if self.station is not None:
self.data = self.station_data.lastData(
station=self.station, exclude=3600)
else:
self.data = self.station_data.lastData(exclude=3600)
newinterval = 0
for module in self.data:
if 'When' in self.data[module]:
newinterval = self.data[module]['When']
break
if newinterval:
# Try and estimate when fresh data will be available
newinterval += NETATMO_UPDATE_INTERVAL - time()
if newinterval > NETATMO_UPDATE_INTERVAL - 30:
newinterval = NETATMO_UPDATE_INTERVAL
else:
if newinterval < NETATMO_UPDATE_INTERVAL / 2:
# Never hammer the NetAtmo API more than
# twice per update interval
newinterval = NETATMO_UPDATE_INTERVAL / 2
_LOGGER.info(
"NetAtmo refresh interval reset to %d seconds",
newinterval)
else:
# Last update time not found, fall back to default value
newinterval = NETATMO_UPDATE_INTERVAL
self._next_update = time() + newinterval
finally:
self._update_in_progress.release()
| 39.703081
| 78
| 0.546494
|
import logging
from time import time
import threading
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
TEMP_CELSIUS, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE,
STATE_UNKNOWN)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_MODULES = 'modules'
CONF_STATION = 'station'
DEPENDENCIES = ['netatmo']
NETATMO_UPDATE_INTERVAL = 600
SENSOR_TYPES = {
'temperature': ['Temperature', TEMP_CELSIUS, None,
DEVICE_CLASS_TEMPERATURE],
'co2': ['CO2', 'ppm', 'mdi:cloud', None],
'pressure': ['Pressure', 'mbar', 'mdi:gauge', None],
'noise': ['Noise', 'dB', 'mdi:volume-high', None],
'humidity': ['Humidity', '%', None, DEVICE_CLASS_HUMIDITY],
'rain': ['Rain', 'mm', 'mdi:weather-rainy', None],
'sum_rain_1': ['sum_rain_1', 'mm', 'mdi:weather-rainy', None],
'sum_rain_24': ['sum_rain_24', 'mm', 'mdi:weather-rainy', None],
'battery_vp': ['Battery', '', 'mdi:battery', None],
'battery_lvl': ['Battery_lvl', '', 'mdi:battery', None],
'min_temp': ['Min Temp.', TEMP_CELSIUS, 'mdi:thermometer', None],
'max_temp': ['Max Temp.', TEMP_CELSIUS, 'mdi:thermometer', None],
'windangle': ['Angle', '', 'mdi:compass', None],
'windangle_value': ['Angle Value', 'º', 'mdi:compass', None],
'windstrength': ['Strength', 'km/h', 'mdi:weather-windy', None],
'gustangle': ['Gust Angle', '', 'mdi:compass', None],
'gustangle_value': ['Gust Angle Value', 'º', 'mdi:compass', None],
'guststrength': ['Gust Strength', 'km/h', 'mdi:weather-windy', None],
'rf_status': ['Radio', '', 'mdi:signal', None],
'rf_status_lvl': ['Radio_lvl', '', 'mdi:signal', None],
'wifi_status': ['Wifi', '', 'mdi:wifi', None],
'wifi_status_lvl': ['Wifi_lvl', 'dBm', 'mdi:wifi', None],
}
MODULE_SCHEMA = vol.Schema({
vol.Required(cv.string):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_STATION): cv.string,
vol.Optional(CONF_MODULES): MODULE_SCHEMA,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
netatmo = hass.components.netatmo
data = NetAtmoData(netatmo.NETATMO_AUTH, config.get(CONF_STATION, None))
dev = []
import pyatmo
try:
if CONF_MODULES in config:
for module_name, monitored_conditions in\
config[CONF_MODULES].items():
if module_name not in data.get_module_names():
_LOGGER.error('Module name: "%s" not found', module_name)
continue
for variable in monitored_conditions:
dev.append(NetAtmoSensor(data, module_name, variable))
else:
for module_name in data.get_module_names():
for variable in\
data.station_data.monitoredConditions(module_name):
if variable in SENSOR_TYPES.keys():
dev.append(NetAtmoSensor(data, module_name, variable))
else:
_LOGGER.warning("Ignoring unknown var %s for mod %s",
variable, module_name)
except pyatmo.NoDevice:
return None
add_entities(dev, True)
class NetAtmoSensor(Entity):
def __init__(self, netatmo_data, module_name, sensor_type):
self._name = 'Netatmo {} {}'.format(module_name,
SENSOR_TYPES[sensor_type][0])
self.netatmo_data = netatmo_data
self.module_name = module_name
self.type = sensor_type
self._state = None
self._device_class = SENSOR_TYPES[self.type][3]
self._icon = SENSOR_TYPES[self.type][2]
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
module_id = self.netatmo_data.\
station_data.moduleByName(module=module_name)['_id']
self.module_id = module_id[1]
@property
def name(self):
return self._name
@property
def icon(self):
return self._icon
@property
def device_class(self):
return self._device_class
@property
def state(self):
return self._state
@property
def unit_of_measurement(self):
return self._unit_of_measurement
def update(self):
self.netatmo_data.update()
data = self.netatmo_data.data.get(self.module_name)
if data is None:
_LOGGER.warning("No data found for %s", self.module_name)
self._state = STATE_UNKNOWN
return
if self.type == 'temperature':
self._state = round(data['Temperature'], 1)
elif self.type == 'humidity':
self._state = data['Humidity']
elif self.type == 'rain':
self._state = data['Rain']
elif self.type == 'sum_rain_1':
self._state = data['sum_rain_1']
elif self.type == 'sum_rain_24':
self._state = data['sum_rain_24']
elif self.type == 'noise':
self._state = data['Noise']
elif self.type == 'co2':
self._state = data['CO2']
elif self.type == 'pressure':
self._state = round(data['Pressure'], 1)
elif self.type == 'battery_lvl':
self._state = data['battery_vp']
elif self.type == 'battery_vp' and self.module_id == '6':
if data['battery_vp'] >= 5590:
self._state = "Full"
elif data['battery_vp'] >= 5180:
self._state = "High"
elif data['battery_vp'] >= 4770:
self._state = "Medium"
elif data['battery_vp'] >= 4360:
self._state = "Low"
elif data['battery_vp'] < 4360:
self._state = "Very Low"
elif self.type == 'battery_vp' and self.module_id == '5':
if data['battery_vp'] >= 5500:
self._state = "Full"
elif data['battery_vp'] >= 5000:
self._state = "High"
elif data['battery_vp'] >= 4500:
self._state = "Medium"
elif data['battery_vp'] >= 4000:
self._state = "Low"
elif data['battery_vp'] < 4000:
self._state = "Very Low"
elif self.type == 'battery_vp' and self.module_id == '3':
if data['battery_vp'] >= 5640:
self._state = "Full"
elif data['battery_vp'] >= 5280:
self._state = "High"
elif data['battery_vp'] >= 4920:
self._state = "Medium"
elif data['battery_vp'] >= 4560:
self._state = "Low"
elif data['battery_vp'] < 4560:
self._state = "Very Low"
elif self.type == 'battery_vp' and self.module_id == '2':
if data['battery_vp'] >= 5500:
self._state = "Full"
elif data['battery_vp'] >= 5000:
self._state = "High"
elif data['battery_vp'] >= 4500:
self._state = "Medium"
elif data['battery_vp'] >= 4000:
self._state = "Low"
elif data['battery_vp'] < 4000:
self._state = "Very Low"
elif self.type == 'min_temp':
self._state = data['min_temp']
elif self.type == 'max_temp':
self._state = data['max_temp']
elif self.type == 'windangle_value':
self._state = data['WindAngle']
elif self.type == 'windangle':
if data['WindAngle'] >= 330:
self._state = "N (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 300:
self._state = "NW (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 240:
self._state = "W (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 210:
self._state = "SW (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 150:
self._state = "S (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 120:
self._state = "SE (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 60:
self._state = "E (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 30:
self._state = "NE (%d\xb0)" % data['WindAngle']
elif data['WindAngle'] >= 0:
self._state = "N (%d\xb0)" % data['WindAngle']
elif self.type == 'windstrength':
self._state = data['WindStrength']
elif self.type == 'gustangle_value':
self._state = data['GustAngle']
elif self.type == 'gustangle':
if data['GustAngle'] >= 330:
self._state = "N (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 300:
self._state = "NW (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 240:
self._state = "W (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 210:
self._state = "SW (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 150:
self._state = "S (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 120:
self._state = "SE (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 60:
self._state = "E (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 30:
self._state = "NE (%d\xb0)" % data['GustAngle']
elif data['GustAngle'] >= 0:
self._state = "N (%d\xb0)" % data['GustAngle']
elif self.type == 'guststrength':
self._state = data['GustStrength']
elif self.type == 'rf_status_lvl':
self._state = data['rf_status']
elif self.type == 'rf_status':
if data['rf_status'] >= 90:
self._state = "Low"
elif data['rf_status'] >= 76:
self._state = "Medium"
elif data['rf_status'] >= 60:
self._state = "High"
elif data['rf_status'] <= 59:
self._state = "Full"
elif self.type == 'wifi_status_lvl':
self._state = data['wifi_status']
elif self.type == 'wifi_status':
if data['wifi_status'] >= 86:
self._state = "Low"
elif data['wifi_status'] >= 71:
self._state = "Medium"
elif data['wifi_status'] >= 56:
self._state = "High"
elif data['wifi_status'] <= 55:
self._state = "Full"
class NetAtmoData:
def __init__(self, auth, station):
self.auth = auth
self.data = None
self.station_data = None
self.station = station
self._next_update = time()
self._update_in_progress = threading.Lock()
def get_module_names(self):
self.update()
return self.data.keys()
def update(self):
if time() < self._next_update or \
not self._update_in_progress.acquire(False):
return
try:
import pyatmo
try:
self.station_data = pyatmo.WeatherStationData(self.auth)
except TypeError:
_LOGGER.error("Failed to connect to NetAtmo")
return
if self.station is not None:
self.data = self.station_data.lastData(
station=self.station, exclude=3600)
else:
self.data = self.station_data.lastData(exclude=3600)
newinterval = 0
for module in self.data:
if 'When' in self.data[module]:
newinterval = self.data[module]['When']
break
if newinterval:
newinterval += NETATMO_UPDATE_INTERVAL - time()
if newinterval > NETATMO_UPDATE_INTERVAL - 30:
newinterval = NETATMO_UPDATE_INTERVAL
else:
if newinterval < NETATMO_UPDATE_INTERVAL / 2:
newinterval = NETATMO_UPDATE_INTERVAL / 2
_LOGGER.info(
"NetAtmo refresh interval reset to %d seconds",
newinterval)
else:
newinterval = NETATMO_UPDATE_INTERVAL
self._next_update = time() + newinterval
finally:
self._update_in_progress.release()
| true
| true
|
f709e2dc077fbe724912bba68ee39dfd98f80d1e
| 30,132
|
py
|
Python
|
main.py
|
AswinR12/Minecraft
|
aff05fedf918a9500a597e89a26be62277ad717d
|
[
"MIT"
] | null | null | null |
main.py
|
AswinR12/Minecraft
|
aff05fedf918a9500a597e89a26be62277ad717d
|
[
"MIT"
] | null | null | null |
main.py
|
AswinR12/Minecraft
|
aff05fedf918a9500a597e89a26be62277ad717d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import division
import sys
import math
import random
import time
from collections import deque
from pyglet import image
from pyglet.gl import *
from pyglet.graphics import TextureGroup
from pyglet.window import key, mouse
TICKS_PER_SEC = 60
# Size of sectors used to ease block loading.
SECTOR_SIZE = 16
WALKING_SPEED = 5
FLYING_SPEED = 15
GRAVITY = 20.0
MAX_JUMP_HEIGHT = 1.0 # About the height of a block.
# To derive the formula for calculating jump speed, first solve
# v_t = v_0 + a * t
# for the time at which you achieve maximum height, where a is the acceleration
# due to gravity and v_t = 0. This gives:
# t = - v_0 / a
# Use t and the desired MAX_JUMP_HEIGHT to solve for v_0 (jump speed) in
# s = s_0 + v_0 * t + (a * t^2) / 2
JUMP_SPEED = math.sqrt(2 * GRAVITY * MAX_JUMP_HEIGHT)
TERMINAL_VELOCITY = 50
PLAYER_HEIGHT = 2
if sys.version_info[0] >= 3:
xrange = range
def cube_vertices(x, y, z, n):
""" Return the vertices of the cube at position x, y, z with size 2*n.
"""
return [
x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top
x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom
x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left
x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right
x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front
x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back
]
def tex_coord(x, y, n=4):
""" Return the bounding vertices of the texture square.
"""
m = 1.0 / n
dx = x * m
dy = y * m
return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m
def tex_coords(top, bottom, side):
""" Return a list of the texture squares for the top, bottom and side.
"""
top = tex_coord(*top)
bottom = tex_coord(*bottom)
side = tex_coord(*side)
result = []
result.extend(top)
result.extend(bottom)
result.extend(side * 4)
return result
TEXTURE_PATH = 'texture.png'
GRASS = tex_coords((1, 0), (0, 1), (0, 0))
SAND = tex_coords((1, 1), (1, 1), (1, 1))
BRICK = tex_coords((2, 0), (2, 0), (2, 0))
STONE = tex_coords((2, 1), (2, 1), (2, 1))
FACES = [
( 0, 1, 0),
( 0,-1, 0),
(-1, 0, 0),
( 1, 0, 0),
( 0, 0, 1),
( 0, 0,-1),
]
def normalize(position):
""" Accepts `position` of arbitrary precision and returns the block
containing that position.
Parameters
----------
position : tuple of len 3
Returns
-------
block_position : tuple of ints of len 3
"""
x, y, z = position
x, y, z = (int(round(x)), int(round(y)), int(round(z)))
return (x, y, z)
def sectorize(position):
""" Returns a tuple representing the sector for the given `position`.
Parameters
----------
position : tuple of len 3
Returns
-------
sector : tuple of len 3
"""
x, y, z = normalize(position)
x, y, z = x // SECTOR_SIZE, y // SECTOR_SIZE, z // SECTOR_SIZE
return (x, 0, z)
class Model(object):
def __init__(self):
# A Batch is a collection of vertex lists for batched rendering.
self.batch = pyglet.graphics.Batch()
# A TextureGroup manages an OpenGL texture.
self.group = TextureGroup(image.load(TEXTURE_PATH).get_texture())
# A mapping from position to the texture of the block at that position.
# This defines all the blocks that are currently in the world.
self.world = {}
# Same mapping as `world` but only contains blocks that are shown.
self.shown = {}
# Mapping from position to a pyglet `VertextList` for all shown blocks.
self._shown = {}
# Mapping from sector to a list of positions inside that sector.
self.sectors = {}
# Simple function queue implementation. The queue is populated with
# _show_block() and _hide_block() calls
self.queue = deque()
self._initialize()
def _initialize(self):
""" Initialize the world by placing all the blocks.
"""
n = 80 # 1/2 width and height of world
s = 1 # step size
y = 0 # initial y height
for x in xrange(-n, n + 1, s):
for z in xrange(-n, n + 1, s):
# create a layer stone an grass everywhere.
self.add_block((x, y - 2, z), GRASS, immediate=False)
self.add_block((x, y - 3, z), STONE, immediate=False)
if x in (-n, n) or z in (-n, n):
# create outer walls.
for dy in xrange(-2, 3):
self.add_block((x, y + dy, z), STONE, immediate=False)
# generate the hills randomly
o = n - 10
for _ in xrange(120):
a = random.randint(-o, o) # x position of the hill
b = random.randint(-o, o) # z position of the hill
c = -1 # base of the hill
h = random.randint(1, 6) # height of the hill
s = random.randint(4, 8) # 2 * s is the side length of the hill
d = 1 # how quickly to taper off the hills
t = random.choice([GRASS, SAND, BRICK])
for y in xrange(c, c + h):
for x in xrange(a - s, a + s + 1):
for z in xrange(b - s, b + s + 1):
if (x - a) ** 2 + (z - b) ** 2 > (s + 1) ** 2:
continue
if (x - 0) ** 2 + (z - 0) ** 2 < 5 ** 2:
continue
self.add_block((x, y, z), t, immediate=False)
s -= d # decrement side lenth so hills taper off
def hit_test(self, position, vector, max_distance=8):
""" Line of sight search from current position. If a block is
intersected it is returned, along with the block previously in the line
of sight. If no block is found, return None, None.
Parameters
----------
position : tuple of len 3
The (x, y, z) position to check visibility from.
vector : tuple of len 3
The line of sight vector.
max_distance : int
How many blocks away to search for a hit.
"""
m = 8
x, y, z = position
dx, dy, dz = vector
previous = None
for _ in xrange(max_distance * m):
key = normalize((x, y, z))
if key != previous and key in self.world:
return key, previous
previous = key
x, y, z = x + dx / m, y + dy / m, z + dz / m
return None, None
def exposed(self, position):
""" Returns False is given `position` is surrounded on all 6 sides by
blocks, True otherwise.
"""
x, y, z = position
for dx, dy, dz in FACES:
if (x + dx, y + dy, z + dz) not in self.world:
return True
return False
def add_block(self, position, texture, immediate=True):
""" Add a block with the given `texture` and `position` to the world.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to add.
texture : list of len 3
The coordinates of the texture squares. Use `tex_coords()` to
generate.
immediate : bool
Whether or not to draw the block immediately.
"""
if position in self.world:
self.remove_block(position, immediate)
self.world[position] = texture
self.sectors.setdefault(sectorize(position), []).append(position)
if immediate:
if self.exposed(position):
self.show_block(position)
self.check_neighbors(position)
def remove_block(self, position, immediate=True):
""" Remove the block at the given `position`.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to remove.
immediate : bool
Whether or not to immediately remove block from canvas.
"""
del self.world[position]
self.sectors[sectorize(position)].remove(position)
if immediate:
if position in self.shown:
self.hide_block(position)
self.check_neighbors(position)
def check_neighbors(self, position):
""" Check all blocks surrounding `position` and ensure their visual
state is current. This means hiding blocks that are not exposed and
ensuring that all exposed blocks are shown. Usually used after a block
is added or removed.
"""
x, y, z = position
for dx, dy, dz in FACES:
key = (x + dx, y + dy, z + dz)
if key not in self.world:
continue
if self.exposed(key):
if key not in self.shown:
self.show_block(key)
else:
if key in self.shown:
self.hide_block(key)
def show_block(self, position, immediate=True):
""" Show the block at the given `position`. This method assumes the
block has already been added with add_block()
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to show.
immediate : bool
Whether or not to show the block immediately.
"""
texture = self.world[position]
self.shown[position] = texture
if immediate:
self._show_block(position, texture)
else:
self._enqueue(self._show_block, position, texture)
def _show_block(self, position, texture):
""" Private implementation of the `show_block()` method.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to show.
texture : list of len 3
The coordinates of the texture squares. Use `tex_coords()` to
generate.
"""
x, y, z = position
vertex_data = cube_vertices(x, y, z, 0.5)
texture_data = list(texture)
# create vertex list
# FIXME Maybe `add_indexed()` should be used instead
self._shown[position] = self.batch.add(24, GL_QUADS, self.group,
('v3f/static', vertex_data),
('t2f/static', texture_data))
def hide_block(self, position, immediate=True):
""" Hide the block at the given `position`. Hiding does not remove the
block from the world.
Parameters
----------
position : tuple of len 3
The (x, y, z) position of the block to hide.
immediate : bool
Whether or not to immediately remove the block from the canvas.
"""
self.shown.pop(position)
if immediate:
self._hide_block(position)
else:
self._enqueue(self._hide_block, position)
def _hide_block(self, position):
""" Private implementation of the 'hide_block()` method.
"""
self._shown.pop(position).delete()
def show_sector(self, sector):
""" Ensure all blocks in the given sector that should be shown are
drawn to the canvas.
"""
for position in self.sectors.get(sector, []):
if position not in self.shown and self.exposed(position):
self.show_block(position, False)
def hide_sector(self, sector):
""" Ensure all blocks in the given sector that should be hidden are
removed from the canvas.
"""
for position in self.sectors.get(sector, []):
if position in self.shown:
self.hide_block(position, False)
def change_sectors(self, before, after):
""" Move from sector `before` to sector `after`. A sector is a
contiguous x, y sub-region of world. Sectors are used to speed up
world rendering.
"""
before_set = set()
after_set = set()
pad = 4
for dx in xrange(-pad, pad + 1):
for dy in [0]: # xrange(-pad, pad + 1):
for dz in xrange(-pad, pad + 1):
if dx ** 2 + dy ** 2 + dz ** 2 > (pad + 1) ** 2:
continue
if before:
x, y, z = before
before_set.add((x + dx, y + dy, z + dz))
if after:
x, y, z = after
after_set.add((x + dx, y + dy, z + dz))
show = after_set - before_set
hide = before_set - after_set
for sector in show:
self.show_sector(sector)
for sector in hide:
self.hide_sector(sector)
def _enqueue(self, func, *args):
""" Add `func` to the internal queue.
"""
self.queue.append((func, args))
def _dequeue(self):
""" Pop the top function from the internal queue and call it.
"""
func, args = self.queue.popleft()
func(*args)
def process_queue(self):
""" Process the entire queue while taking periodic breaks. This allows
the game loop to run smoothly. The queue contains calls to
_show_block() and _hide_block() so this method should be called if
add_block() or remove_block() was called with immediate=False
"""
start = time.process_time()
while self.queue and time.process_time() - start < 1.0 / TICKS_PER_SEC:
self._dequeue()
def process_entire_queue(self):
""" Process the entire queue with no breaks.
"""
while self.queue:
self._dequeue()
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(Window, self).__init__(*args, **kwargs)
# Whether or not the window exclusively captures the mouse.
self.exclusive = False
# When flying gravity has no effect and speed is increased.
self.flying = False
# Strafing is moving lateral to the direction you are facing,
# e.g. moving to the left or right while continuing to face forward.
#
# First element is -1 when moving forward, 1 when moving back, and 0
# otherwise. The second element is -1 when moving left, 1 when moving
# right, and 0 otherwise.
self.strafe = [0, 0]
# Current (x, y, z) position in the world, specified with floats. Note
# that, perhaps unlike in math class, the y-axis is the vertical axis.
self.position = (0, 0, 0)
# First element is rotation of the player in the x-z plane (ground
# plane) measured from the z-axis down. The second is the rotation
# angle from the ground plane up. Rotation is in degrees.
#
# The vertical plane rotation ranges from -90 (looking straight down) to
# 90 (looking straight up). The horizontal rotation range is unbounded.
self.rotation = (0, 0)
# Which sector the player is currently in.
self.sector = None
# The crosshairs at the center of the screen.
self.reticle = None
# Velocity in the y (upward) direction.
self.dy = 0
# A list of blocks the player can place. Hit num keys to cycle.
self.inventory = [BRICK, GRASS, SAND]
# The current block the user can place. Hit num keys to cycle.
self.block = self.inventory[0]
# Convenience list of num keys.
self.num_keys = [
key._1, key._2, key._3, key._4, key._5,
key._6, key._7, key._8, key._9, key._0]
# Instance of the model that handles the world.
self.model = Model()
# The label that is displayed in the top left of the canvas.
self.label = pyglet.text.Label('', font_name='Arial', font_size=18,
x=10, y=self.height - 10, anchor_x='left', anchor_y='top',
color=(0, 0, 0, 255))
# This call schedules the `update()` method to be called
# TICKS_PER_SEC. This is the main game event loop.
pyglet.clock.schedule_interval(self.update, 1.0 / TICKS_PER_SEC)
def set_exclusive_mouse(self, exclusive):
""" If `exclusive` is True, the game will capture the mouse, if False
the game will ignore the mouse.
"""
super(Window, self).set_exclusive_mouse(exclusive)
self.exclusive = exclusive
def get_sight_vector(self):
""" Returns the current line of sight vector indicating the direction
the player is looking.
"""
x, y = self.rotation
# y ranges from -90 to 90, or -pi/2 to pi/2, so m ranges from 0 to 1 and
# is 1 when looking ahead parallel to the ground and 0 when looking
# straight up or down.
m = math.cos(math.radians(y))
# dy ranges from -1 to 1 and is -1 when looking straight down and 1 when
# looking straight up.
dy = math.sin(math.radians(y))
dx = math.cos(math.radians(x - 90)) * m
dz = math.sin(math.radians(x - 90)) * m
return (dx, dy, dz)
def get_motion_vector(self):
""" Returns the current motion vector indicating the velocity of the
player.
Returns
-------
vector : tuple of len 3
Tuple containing the velocity in x, y, and z respectively.
"""
if any(self.strafe):
x, y = self.rotation
strafe = math.degrees(math.atan2(*self.strafe))
y_angle = math.radians(y)
x_angle = math.radians(x + strafe)
if self.flying:
m = math.cos(y_angle)
dy = math.sin(y_angle)
if self.strafe[1]:
# Moving left or right.
dy = 0.0
m = 1
if self.strafe[0] > 0:
# Moving backwards.
dy *= -1
# When you are flying up or down, you have less left and right
# motion.
dx = math.cos(x_angle) * m
dz = math.sin(x_angle) * m
else:
dy = 0.0
dx = math.cos(x_angle)
dz = math.sin(x_angle)
else:
dy = 0.0
dx = 0.0
dz = 0.0
return (dx, dy, dz)
def update(self, dt):
""" This method is scheduled to be called repeatedly by the pyglet
clock.
Parameters
----------
dt : float
The change in time since the last call.
"""
self.model.process_queue()
sector = sectorize(self.position)
if sector != self.sector:
self.model.change_sectors(self.sector, sector)
if self.sector is None:
self.model.process_entire_queue()
self.sector = sector
m = 8
dt = min(dt, 0.2)
for _ in xrange(m):
self._update(dt / m)
def _update(self, dt):
""" Private implementation of the `update()` method. This is where most
of the motion logic lives, along with gravity and collision detection.
Parameters
----------
dt : float
The change in time since the last call.
"""
# walking
speed = FLYING_SPEED if self.flying else WALKING_SPEED
d = dt * speed # distance covered this tick.
dx, dy, dz = self.get_motion_vector()
# New position in space, before accounting for gravity.
dx, dy, dz = dx * d, dy * d, dz * d
# gravity
if not self.flying:
# Update your vertical speed: if you are falling, speed up until you
# hit terminal velocity; if you are jumping, slow down until you
# start falling.
self.dy -= dt * GRAVITY
self.dy = max(self.dy, -TERMINAL_VELOCITY)
dy += self.dy * dt
# collisions
x, y, z = self.position
x, y, z = self.collide((x + dx, y + dy, z + dz), PLAYER_HEIGHT)
self.position = (x, y, z)
def collide(self, position, height):
""" Checks to see if the player at the given `position` and `height`
is colliding with any blocks in the world.
Parameters
----------
position : tuple of len 3
The (x, y, z) position to check for collisions at.
height : int or float
The height of the player.
Returns
-------
position : tuple of len 3
The new position of the player taking into account collisions.
"""
# How much overlap with a dimension of a surrounding block you need to
# have to count as a collision. If 0, touching terrain at all counts as
# a collision. If .49, you sink into the ground, as if walking through
# tall grass. If >= .5, you'll fall through the ground.
pad = 0.25
p = list(position)
np = normalize(position)
for face in FACES: # check all surrounding blocks
for i in xrange(3): # check each dimension independently
if not face[i]:
continue
# How much overlap you have with this dimension.
d = (p[i] - np[i]) * face[i]
if d < pad:
continue
for dy in xrange(height): # check each height
op = list(np)
op[1] -= dy
op[i] += face[i]
if tuple(op) not in self.model.world:
continue
p[i] -= (d - pad) * face[i]
if face == (0, -1, 0) or face == (0, 1, 0):
# You are colliding with the ground or ceiling, so stop
# falling / rising.
self.dy = 0
break
return tuple(p)
def on_mouse_press(self, x, y, button, modifiers):
""" Called when a mouse button is pressed. See pyglet docs for button
amd modifier mappings.
Parameters
----------
x, y : int
The coordinates of the mouse click. Always center of the screen if
the mouse is captured.
button : int
Number representing mouse button that was clicked. 1 = left button,
4 = right button.
modifiers : int
Number representing any modifying keys that were pressed when the
mouse button was clicked.
"""
if self.exclusive:
vector = self.get_sight_vector()
block, previous = self.model.hit_test(self.position, vector)
if (button == mouse.RIGHT) or \
((button == mouse.LEFT) and (modifiers & key.MOD_CTRL)):
# ON OSX, control + left click = right click.
if previous:
self.model.add_block(previous, self.block)
elif button == pyglet.window.mouse.LEFT and block:
texture = self.model.world[block]
if texture != STONE:
self.model.remove_block(block)
else:
self.set_exclusive_mouse(True)
def on_mouse_motion(self, x, y, dx, dy):
""" Called when the player moves the mouse.
Parameters
----------
x, y : int
The coordinates of the mouse click. Always center of the screen if
the mouse is captured.
dx, dy : float
The movement of the mouse.
"""
if self.exclusive:
m = 0.15
x, y = self.rotation
x, y = x + dx * m, y + dy * m
y = max(-90, min(90, y))
self.rotation = (x, y)
def on_key_press(self, symbol, modifiers):
""" Called when the player presses a key. See pyglet docs for key
mappings.
Parameters
----------
symbol : int
Number representing the key that was pressed.
modifiers : int
Number representing any modifying keys that were pressed.
"""
if symbol == key.W:
self.strafe[0] -= 1
elif symbol == key.S:
self.strafe[0] += 1
elif symbol == key.A:
self.strafe[1] -= 1
elif symbol == key.D:
self.strafe[1] += 1
elif symbol == key.SPACE:
if self.dy == 0:
self.dy = JUMP_SPEED
elif symbol == key.ESCAPE:
self.set_exclusive_mouse(False)
elif symbol == key.TAB:
self.flying = not self.flying
elif symbol in self.num_keys:
index = (symbol - self.num_keys[0]) % len(self.inventory)
self.block = self.inventory[index]
def on_key_release(self, symbol, modifiers):
""" Called when the player releases a key. See pyglet docs for key
mappings.
Parameters
----------
symbol : int
Number representing the key that was pressed.
modifiers : int
Number representing any modifying keys that were pressed.
"""
if symbol == key.W:
self.strafe[0] += 1
elif symbol == key.S:
self.strafe[0] -= 1
elif symbol == key.A:
self.strafe[1] += 1
elif symbol == key.D:
self.strafe[1] -= 1
def on_resize(self, width, height):
""" Called when the window is resized to a new `width` and `height`.
"""
# label
self.label.y = height - 10
# reticle
if self.reticle:
self.reticle.delete()
x, y = self.width // 2, self.height // 2
n = 10
self.reticle = pyglet.graphics.vertex_list(4,
('v2i', (x - n, y, x + n, y, x, y - n, x, y + n))
)
def set_2d(self):
""" Configure OpenGL to draw in 2d.
"""
width, height = self.get_size()
glDisable(GL_DEPTH_TEST)
viewport = self.get_viewport_size()
glViewport(0, 0, max(1, viewport[0]), max(1, viewport[1]))
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, max(1, width), 0, max(1, height), -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def set_3d(self):
""" Configure OpenGL to draw in 3d.
"""
width, height = self.get_size()
glEnable(GL_DEPTH_TEST)
viewport = self.get_viewport_size()
glViewport(0, 0, max(1, viewport[0]), max(1, viewport[1]))
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(65.0, width / float(height), 0.1, 60.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
x, y = self.rotation
glRotatef(x, 0, 1, 0)
glRotatef(-y, math.cos(math.radians(x)), 0, math.sin(math.radians(x)))
x, y, z = self.position
glTranslatef(-x, -y, -z)
def on_draw(self):
""" Called by pyglet to draw the canvas.
"""
self.clear()
self.set_3d()
glColor3d(1, 1, 1)
self.model.batch.draw()
self.draw_focused_block()
self.set_2d()
self.draw_label()
self.draw_reticle()
def draw_focused_block(self):
""" Draw black edges around the block that is currently under the
crosshairs.
"""
vector = self.get_sight_vector()
block = self.model.hit_test(self.position, vector)[0]
if block:
x, y, z = block
vertex_data = cube_vertices(x, y, z, 0.51)
glColor3d(0, 0, 0)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
pyglet.graphics.draw(24, GL_QUADS, ('v3f/static', vertex_data))
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
def draw_label(self):
""" Draw the label in the top left of the screen.
"""
x, y, z = self.position
self.label.text = '%02d (%.2f, %.2f, %.2f) %d / %d' % (
pyglet.clock.get_fps(), x, y, z,
len(self.model._shown), len(self.model.world))
self.label.draw()
def draw_reticle(self):
""" Draw the crosshairs in the center of the screen.
"""
glColor3d(0, 0, 0)
self.reticle.draw(GL_LINES)
def setup_fog():
""" Configure the OpenGL fog properties.
"""
# Enable fog. Fog "blends a fog color with each rasterized pixel fragment's
# post-texturing color."
glEnable(GL_FOG)
# Set the fog color.
glFogfv(GL_FOG_COLOR, (GLfloat * 4)(0.5, 0.69, 1.0, 1))
# Say we have no preference between rendering speed and quality.
glHint(GL_FOG_HINT, GL_DONT_CARE)
# Specify the equation used to compute the blending factor.
glFogi(GL_FOG_MODE, GL_LINEAR)
# How close and far away fog starts and ends. The closer the start and end,
# the denser the fog in the fog range.
glFogf(GL_FOG_START, 20.0)
glFogf(GL_FOG_END, 60.0)
def setup():
""" Basic OpenGL configuration.
"""
# Set the color of "clear", i.e. the sky, in rgba.
glClearColor(0.5, 0.69, 1.0, 1)
# Enable culling (not rendering) of back-facing facets -- facets that aren't
# visible to you.
glEnable(GL_CULL_FACE)
# Set the texture minification/magnification function to GL_NEAREST (nearest
# in Manhattan distance) to the specified texture coordinates. GL_NEAREST
# "is generally faster than GL_LINEAR, but it can produce textured images
# with sharper edges because the transition between texture elements is not
# as smooth."
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
setup_fog()
def main():
window = Window(width=800, height=600, caption='Pyglet', resizable=True)
# Hide the mouse cursor and prevent the mouse from leaving the window.
window.set_exclusive_mouse(True)
setup()
pyglet.app.run()
if __name__ == '__main__':
main()
| 33.331858
| 80
| 0.551374
|
from __future__ import division
import sys
import math
import random
import time
from collections import deque
from pyglet import image
from pyglet.gl import *
from pyglet.graphics import TextureGroup
from pyglet.window import key, mouse
TICKS_PER_SEC = 60
SECTOR_SIZE = 16
WALKING_SPEED = 5
FLYING_SPEED = 15
GRAVITY = 20.0
MAX_JUMP_HEIGHT = 1.0 JUMP_SPEED = math.sqrt(2 * GRAVITY * MAX_JUMP_HEIGHT)
TERMINAL_VELOCITY = 50
PLAYER_HEIGHT = 2
if sys.version_info[0] >= 3:
xrange = range
def cube_vertices(x, y, z, n):
return [
x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, ]
def tex_coord(x, y, n=4):
m = 1.0 / n
dx = x * m
dy = y * m
return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m
def tex_coords(top, bottom, side):
top = tex_coord(*top)
bottom = tex_coord(*bottom)
side = tex_coord(*side)
result = []
result.extend(top)
result.extend(bottom)
result.extend(side * 4)
return result
TEXTURE_PATH = 'texture.png'
GRASS = tex_coords((1, 0), (0, 1), (0, 0))
SAND = tex_coords((1, 1), (1, 1), (1, 1))
BRICK = tex_coords((2, 0), (2, 0), (2, 0))
STONE = tex_coords((2, 1), (2, 1), (2, 1))
FACES = [
( 0, 1, 0),
( 0,-1, 0),
(-1, 0, 0),
( 1, 0, 0),
( 0, 0, 1),
( 0, 0,-1),
]
def normalize(position):
x, y, z = position
x, y, z = (int(round(x)), int(round(y)), int(round(z)))
return (x, y, z)
def sectorize(position):
x, y, z = normalize(position)
x, y, z = x // SECTOR_SIZE, y // SECTOR_SIZE, z // SECTOR_SIZE
return (x, 0, z)
class Model(object):
def __init__(self):
self.batch = pyglet.graphics.Batch()
self.group = TextureGroup(image.load(TEXTURE_PATH).get_texture())
self.world = {}
self.shown = {}
self._shown = {}
self.sectors = {}
self.queue = deque()
self._initialize()
def _initialize(self):
n = 80 s = 1 y = 0 for x in xrange(-n, n + 1, s):
for z in xrange(-n, n + 1, s):
self.add_block((x, y - 2, z), GRASS, immediate=False)
self.add_block((x, y - 3, z), STONE, immediate=False)
if x in (-n, n) or z in (-n, n):
for dy in xrange(-2, 3):
self.add_block((x, y + dy, z), STONE, immediate=False)
o = n - 10
for _ in xrange(120):
a = random.randint(-o, o) b = random.randint(-o, o) c = -1 h = random.randint(1, 6) s = random.randint(4, 8) d = 1 t = random.choice([GRASS, SAND, BRICK])
for y in xrange(c, c + h):
for x in xrange(a - s, a + s + 1):
for z in xrange(b - s, b + s + 1):
if (x - a) ** 2 + (z - b) ** 2 > (s + 1) ** 2:
continue
if (x - 0) ** 2 + (z - 0) ** 2 < 5 ** 2:
continue
self.add_block((x, y, z), t, immediate=False)
s -= d
def hit_test(self, position, vector, max_distance=8):
m = 8
x, y, z = position
dx, dy, dz = vector
previous = None
for _ in xrange(max_distance * m):
key = normalize((x, y, z))
if key != previous and key in self.world:
return key, previous
previous = key
x, y, z = x + dx / m, y + dy / m, z + dz / m
return None, None
def exposed(self, position):
x, y, z = position
for dx, dy, dz in FACES:
if (x + dx, y + dy, z + dz) not in self.world:
return True
return False
def add_block(self, position, texture, immediate=True):
if position in self.world:
self.remove_block(position, immediate)
self.world[position] = texture
self.sectors.setdefault(sectorize(position), []).append(position)
if immediate:
if self.exposed(position):
self.show_block(position)
self.check_neighbors(position)
def remove_block(self, position, immediate=True):
del self.world[position]
self.sectors[sectorize(position)].remove(position)
if immediate:
if position in self.shown:
self.hide_block(position)
self.check_neighbors(position)
def check_neighbors(self, position):
x, y, z = position
for dx, dy, dz in FACES:
key = (x + dx, y + dy, z + dz)
if key not in self.world:
continue
if self.exposed(key):
if key not in self.shown:
self.show_block(key)
else:
if key in self.shown:
self.hide_block(key)
def show_block(self, position, immediate=True):
texture = self.world[position]
self.shown[position] = texture
if immediate:
self._show_block(position, texture)
else:
self._enqueue(self._show_block, position, texture)
def _show_block(self, position, texture):
x, y, z = position
vertex_data = cube_vertices(x, y, z, 0.5)
texture_data = list(texture)
self._shown[position] = self.batch.add(24, GL_QUADS, self.group,
('v3f/static', vertex_data),
('t2f/static', texture_data))
def hide_block(self, position, immediate=True):
self.shown.pop(position)
if immediate:
self._hide_block(position)
else:
self._enqueue(self._hide_block, position)
def _hide_block(self, position):
self._shown.pop(position).delete()
def show_sector(self, sector):
for position in self.sectors.get(sector, []):
if position not in self.shown and self.exposed(position):
self.show_block(position, False)
def hide_sector(self, sector):
for position in self.sectors.get(sector, []):
if position in self.shown:
self.hide_block(position, False)
def change_sectors(self, before, after):
before_set = set()
after_set = set()
pad = 4
for dx in xrange(-pad, pad + 1):
for dy in [0]: for dz in xrange(-pad, pad + 1):
if dx ** 2 + dy ** 2 + dz ** 2 > (pad + 1) ** 2:
continue
if before:
x, y, z = before
before_set.add((x + dx, y + dy, z + dz))
if after:
x, y, z = after
after_set.add((x + dx, y + dy, z + dz))
show = after_set - before_set
hide = before_set - after_set
for sector in show:
self.show_sector(sector)
for sector in hide:
self.hide_sector(sector)
def _enqueue(self, func, *args):
self.queue.append((func, args))
def _dequeue(self):
func, args = self.queue.popleft()
func(*args)
def process_queue(self):
start = time.process_time()
while self.queue and time.process_time() - start < 1.0 / TICKS_PER_SEC:
self._dequeue()
def process_entire_queue(self):
while self.queue:
self._dequeue()
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(Window, self).__init__(*args, **kwargs)
self.exclusive = False
self.flying = False
self.strafe = [0, 0]
self.position = (0, 0, 0)
self.rotation = (0, 0)
self.sector = None
self.reticle = None
self.dy = 0
self.inventory = [BRICK, GRASS, SAND]
self.block = self.inventory[0]
self.num_keys = [
key._1, key._2, key._3, key._4, key._5,
key._6, key._7, key._8, key._9, key._0]
self.model = Model()
self.label = pyglet.text.Label('', font_name='Arial', font_size=18,
x=10, y=self.height - 10, anchor_x='left', anchor_y='top',
color=(0, 0, 0, 255))
pyglet.clock.schedule_interval(self.update, 1.0 / TICKS_PER_SEC)
def set_exclusive_mouse(self, exclusive):
super(Window, self).set_exclusive_mouse(exclusive)
self.exclusive = exclusive
def get_sight_vector(self):
x, y = self.rotation
m = math.cos(math.radians(y))
dy = math.sin(math.radians(y))
dx = math.cos(math.radians(x - 90)) * m
dz = math.sin(math.radians(x - 90)) * m
return (dx, dy, dz)
def get_motion_vector(self):
if any(self.strafe):
x, y = self.rotation
strafe = math.degrees(math.atan2(*self.strafe))
y_angle = math.radians(y)
x_angle = math.radians(x + strafe)
if self.flying:
m = math.cos(y_angle)
dy = math.sin(y_angle)
if self.strafe[1]:
dy = 0.0
m = 1
if self.strafe[0] > 0:
dy *= -1
dx = math.cos(x_angle) * m
dz = math.sin(x_angle) * m
else:
dy = 0.0
dx = math.cos(x_angle)
dz = math.sin(x_angle)
else:
dy = 0.0
dx = 0.0
dz = 0.0
return (dx, dy, dz)
def update(self, dt):
self.model.process_queue()
sector = sectorize(self.position)
if sector != self.sector:
self.model.change_sectors(self.sector, sector)
if self.sector is None:
self.model.process_entire_queue()
self.sector = sector
m = 8
dt = min(dt, 0.2)
for _ in xrange(m):
self._update(dt / m)
def _update(self, dt):
speed = FLYING_SPEED if self.flying else WALKING_SPEED
d = dt * speed dx, dy, dz = self.get_motion_vector()
dx, dy, dz = dx * d, dy * d, dz * d
if not self.flying:
self.dy -= dt * GRAVITY
self.dy = max(self.dy, -TERMINAL_VELOCITY)
dy += self.dy * dt
x, y, z = self.position
x, y, z = self.collide((x + dx, y + dy, z + dz), PLAYER_HEIGHT)
self.position = (x, y, z)
def collide(self, position, height):
pad = 0.25
p = list(position)
np = normalize(position)
for face in FACES: # check all surrounding blocks
for i in xrange(3): # check each dimension independently
if not face[i]:
continue
# How much overlap you have with this dimension.
d = (p[i] - np[i]) * face[i]
if d < pad:
continue
for dy in xrange(height): # check each height
op = list(np)
op[1] -= dy
op[i] += face[i]
if tuple(op) not in self.model.world:
continue
p[i] -= (d - pad) * face[i]
if face == (0, -1, 0) or face == (0, 1, 0):
# You are colliding with the ground or ceiling, so stop
# falling / rising.
self.dy = 0
break
return tuple(p)
def on_mouse_press(self, x, y, button, modifiers):
if self.exclusive:
vector = self.get_sight_vector()
block, previous = self.model.hit_test(self.position, vector)
if (button == mouse.RIGHT) or \
((button == mouse.LEFT) and (modifiers & key.MOD_CTRL)):
# ON OSX, control + left click = right click.
if previous:
self.model.add_block(previous, self.block)
elif button == pyglet.window.mouse.LEFT and block:
texture = self.model.world[block]
if texture != STONE:
self.model.remove_block(block)
else:
self.set_exclusive_mouse(True)
def on_mouse_motion(self, x, y, dx, dy):
if self.exclusive:
m = 0.15
x, y = self.rotation
x, y = x + dx * m, y + dy * m
y = max(-90, min(90, y))
self.rotation = (x, y)
def on_key_press(self, symbol, modifiers):
if symbol == key.W:
self.strafe[0] -= 1
elif symbol == key.S:
self.strafe[0] += 1
elif symbol == key.A:
self.strafe[1] -= 1
elif symbol == key.D:
self.strafe[1] += 1
elif symbol == key.SPACE:
if self.dy == 0:
self.dy = JUMP_SPEED
elif symbol == key.ESCAPE:
self.set_exclusive_mouse(False)
elif symbol == key.TAB:
self.flying = not self.flying
elif symbol in self.num_keys:
index = (symbol - self.num_keys[0]) % len(self.inventory)
self.block = self.inventory[index]
def on_key_release(self, symbol, modifiers):
if symbol == key.W:
self.strafe[0] += 1
elif symbol == key.S:
self.strafe[0] -= 1
elif symbol == key.A:
self.strafe[1] += 1
elif symbol == key.D:
self.strafe[1] -= 1
def on_resize(self, width, height):
# label
self.label.y = height - 10
# reticle
if self.reticle:
self.reticle.delete()
x, y = self.width // 2, self.height // 2
n = 10
self.reticle = pyglet.graphics.vertex_list(4,
('v2i', (x - n, y, x + n, y, x, y - n, x, y + n))
)
def set_2d(self):
width, height = self.get_size()
glDisable(GL_DEPTH_TEST)
viewport = self.get_viewport_size()
glViewport(0, 0, max(1, viewport[0]), max(1, viewport[1]))
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, max(1, width), 0, max(1, height), -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def set_3d(self):
width, height = self.get_size()
glEnable(GL_DEPTH_TEST)
viewport = self.get_viewport_size()
glViewport(0, 0, max(1, viewport[0]), max(1, viewport[1]))
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(65.0, width / float(height), 0.1, 60.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
x, y = self.rotation
glRotatef(x, 0, 1, 0)
glRotatef(-y, math.cos(math.radians(x)), 0, math.sin(math.radians(x)))
x, y, z = self.position
glTranslatef(-x, -y, -z)
def on_draw(self):
self.clear()
self.set_3d()
glColor3d(1, 1, 1)
self.model.batch.draw()
self.draw_focused_block()
self.set_2d()
self.draw_label()
self.draw_reticle()
def draw_focused_block(self):
vector = self.get_sight_vector()
block = self.model.hit_test(self.position, vector)[0]
if block:
x, y, z = block
vertex_data = cube_vertices(x, y, z, 0.51)
glColor3d(0, 0, 0)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
pyglet.graphics.draw(24, GL_QUADS, ('v3f/static', vertex_data))
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
def draw_label(self):
x, y, z = self.position
self.label.text = '%02d (%.2f, %.2f, %.2f) %d / %d' % (
pyglet.clock.get_fps(), x, y, z,
len(self.model._shown), len(self.model.world))
self.label.draw()
def draw_reticle(self):
glColor3d(0, 0, 0)
self.reticle.draw(GL_LINES)
def setup_fog():
# Enable fog. Fog "blends a fog color with each rasterized pixel fragment's
# post-texturing color."
glEnable(GL_FOG)
glFogfv(GL_FOG_COLOR, (GLfloat * 4)(0.5, 0.69, 1.0, 1))
glHint(GL_FOG_HINT, GL_DONT_CARE)
glFogi(GL_FOG_MODE, GL_LINEAR)
glFogf(GL_FOG_START, 20.0)
glFogf(GL_FOG_END, 60.0)
def setup():
glClearColor(0.5, 0.69, 1.0, 1)
# visible to you.
glEnable(GL_CULL_FACE)
# Set the texture minification/magnification function to GL_NEAREST (nearest
# in Manhattan distance) to the specified texture coordinates. GL_NEAREST
# "is generally faster than GL_LINEAR, but it can produce textured images
# with sharper edges because the transition between texture elements is not
# as smooth."
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
setup_fog()
def main():
window = Window(width=800, height=600, caption='Pyglet', resizable=True)
# Hide the mouse cursor and prevent the mouse from leaving the window.
window.set_exclusive_mouse(True)
setup()
pyglet.app.run()
if __name__ == '__main__':
main()
| true
| true
|
f709e44133af35e725e4b68ac6b7a629db5beee6
| 2,755
|
py
|
Python
|
gameoflife.py
|
zvolsky/game-of-live
|
b890a7a242eb77e40827a491f6dae35db8e76791
|
[
"MIT"
] | null | null | null |
gameoflife.py
|
zvolsky/game-of-live
|
b890a7a242eb77e40827a491f6dae35db8e76791
|
[
"MIT"
] | null | null | null |
gameoflife.py
|
zvolsky/game-of-live
|
b890a7a242eb77e40827a491f6dae35db8e76791
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
def get_next_one(wint):
"""
returns next generation as list of rows where each row contain string of 0|1 characters
:param wint array of integers 0|1 older generation
:retval next_one list of rows next generation
"""
next_one = []
rows = len(wint)
cols = len(wint[0])
def fixed_i(i):
# return divmod(i, rows)[1] , but lets optimize this a little
if 0 <= i < rows:
return i
return i % rows
def fixed_j(j):
if 0 <= j < cols:
return j
return divmod(j, cols)[1]
def neighbors_include_me(center_i, center_j):
neighbors_and_me = 0
for i in range(center_i - 1, center_i + 2):
for j in range(center_j - 1, center_j + 2):
neighbors_and_me += wint[fixed_i(i)][fixed_j(j)]
return neighbors_and_me
for i, row in enumerate(wint):
next_row = ''
for j, elem in enumerate(row):
neighbors = neighbors_include_me(i, j) - elem
if elem and 2 <= neighbors <= 3 or not elem and neighbors == 3:
next_row += '1'
else:
next_row += '0'
next_one.append(next_row)
return next_one
def word_to_int(world):
"""
converts list of strings (where each char is an element) into array of integers 0|1
array means list of rows where each row contains list of elements 0|1
"""
wint = []
for row in world:
wint.append(tuple(map(int, tuple(row))))
return wint
def validated_world(world):
if type(world) not in (tuple, list) or len(world) == 0:
raise TypeError('need a non empty list')
cols = None
for row in world:
if type(row) != str:
raise TypeError('list elements must be strings')
if cols is None:
cols = len(row)
if not cols:
raise TypeError('strings inside the list must be non empty')
elif len(row) != cols:
raise TypeError('strings inside the list must have the same length')
if row.replace('0', '').replace('1', ''):
raise TypeError('allowed characters are: 01')
return world
def from_stdin():
console = sys.stdin.isatty()
world = []
row_no = 1
while True:
try:
row = input(('row %s (empty to start) : ' % row_no) if console else '')
except EOFError:
break
if not row:
break
world.append(row)
row_no += 1
main(world)
def main(world):
for row in get_next_one(word_to_int(validated_world(world))):
print(row)
if __name__ == '__main__':
from_stdin()
| 28.697917
| 95
| 0.561525
|
import sys
def get_next_one(wint):
next_one = []
rows = len(wint)
cols = len(wint[0])
def fixed_i(i):
if 0 <= i < rows:
return i
return i % rows
def fixed_j(j):
if 0 <= j < cols:
return j
return divmod(j, cols)[1]
def neighbors_include_me(center_i, center_j):
neighbors_and_me = 0
for i in range(center_i - 1, center_i + 2):
for j in range(center_j - 1, center_j + 2):
neighbors_and_me += wint[fixed_i(i)][fixed_j(j)]
return neighbors_and_me
for i, row in enumerate(wint):
next_row = ''
for j, elem in enumerate(row):
neighbors = neighbors_include_me(i, j) - elem
if elem and 2 <= neighbors <= 3 or not elem and neighbors == 3:
next_row += '1'
else:
next_row += '0'
next_one.append(next_row)
return next_one
def word_to_int(world):
wint = []
for row in world:
wint.append(tuple(map(int, tuple(row))))
return wint
def validated_world(world):
if type(world) not in (tuple, list) or len(world) == 0:
raise TypeError('need a non empty list')
cols = None
for row in world:
if type(row) != str:
raise TypeError('list elements must be strings')
if cols is None:
cols = len(row)
if not cols:
raise TypeError('strings inside the list must be non empty')
elif len(row) != cols:
raise TypeError('strings inside the list must have the same length')
if row.replace('0', '').replace('1', ''):
raise TypeError('allowed characters are: 01')
return world
def from_stdin():
console = sys.stdin.isatty()
world = []
row_no = 1
while True:
try:
row = input(('row %s (empty to start) : ' % row_no) if console else '')
except EOFError:
break
if not row:
break
world.append(row)
row_no += 1
main(world)
def main(world):
for row in get_next_one(word_to_int(validated_world(world))):
print(row)
if __name__ == '__main__':
from_stdin()
| true
| true
|
f709e4c35e93001402d4351c8505981f77b535e5
| 562
|
py
|
Python
|
senior project/code and resources/setup.py
|
amiel445566/my-tetris
|
e738d791b685dbf6356e252450d366c747205087
|
[
"MIT"
] | null | null | null |
senior project/code and resources/setup.py
|
amiel445566/my-tetris
|
e738d791b685dbf6356e252450d366c747205087
|
[
"MIT"
] | null | null | null |
senior project/code and resources/setup.py
|
amiel445566/my-tetris
|
e738d791b685dbf6356e252450d366c747205087
|
[
"MIT"
] | null | null | null |
import sys
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {"optimize": 2,
"packages": ["dbm"],
"include_files": ["image(s)", "font(s)", "db", "README.txt"]
}
setup( name = "Tetris Code",
version = "3.6",
options = {"build_exe": build_exe_options},
description = "My take on Tetris.",
executables = [Executable("tetris_code.py", base="Win32GUI", icon="image(s)\\favicon.ico")])
| 37.466667
| 100
| 0.581851
|
import sys
from cx_Freeze import setup, Executable
build_exe_options = {"optimize": 2,
"packages": ["dbm"],
"include_files": ["image(s)", "font(s)", "db", "README.txt"]
}
setup( name = "Tetris Code",
version = "3.6",
options = {"build_exe": build_exe_options},
description = "My take on Tetris.",
executables = [Executable("tetris_code.py", base="Win32GUI", icon="image(s)\\favicon.ico")])
| true
| true
|
f709e537d90b2efbc045e695c664188d4d468ba0
| 6,501
|
py
|
Python
|
reprounzip/reprounzip/unpackers/common/packages.py
|
BinalModi/reprozip
|
0edec88ab63cb1b724af291579a74a654d0bbe07
|
[
"BSD-3-Clause"
] | null | null | null |
reprounzip/reprounzip/unpackers/common/packages.py
|
BinalModi/reprozip
|
0edec88ab63cb1b724af291579a74a654d0bbe07
|
[
"BSD-3-Clause"
] | null | null | null |
reprounzip/reprounzip/unpackers/common/packages.py
|
BinalModi/reprozip
|
0edec88ab63cb1b724af291579a74a654d0bbe07
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2014-2017 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
"""Utility functions dealing with package managers.
"""
from __future__ import division, print_function, unicode_literals
import logging
import platform
import subprocess
from reprounzip.unpackers.common.misc import UsageError
from reprounzip.utils import itervalues
logger = logging.getLogger('reprounzip')
THIS_DISTRIBUTION = platform.linux_distribution()[0].lower()
PKG_NOT_INSTALLED = "(not installed)"
class CantFindInstaller(UsageError):
def __init__(self, msg="Can't select a package installer"):
UsageError.__init__(self, msg)
class AptInstaller(object):
"""Installer for deb-based systems (Debian, Ubuntu).
"""
def __init__(self, binary):
self.bin = binary
def install(self, packages, assume_yes=False):
# Installs
options = []
if assume_yes:
options.append('-y')
required_pkgs = set(pkg.name for pkg in packages)
r = subprocess.call([self.bin, 'install'] +
options + list(required_pkgs))
# Checks on packages
pkgs_status = self.get_packages_info(packages)
for pkg, status in itervalues(pkgs_status):
if status is not None:
required_pkgs.discard(pkg.name)
if required_pkgs:
logger.error("Error: some packages could not be installed:%s",
''.join("\n %s" % pkg for pkg in required_pkgs))
return r, pkgs_status
@staticmethod
def get_packages_info(packages):
if not packages:
return {}
p = subprocess.Popen(['dpkg-query',
'--showformat=${Package;-50}\t${Version}\n',
'-W'] +
[pkg.name for pkg in packages],
stdout=subprocess.PIPE)
# name -> (pkg, installed_version)
pkgs_dict = dict((pkg.name, (pkg, PKG_NOT_INSTALLED))
for pkg in packages)
try:
for l in p.stdout:
fields = l.split()
if len(fields) == 2:
name = fields[0].decode('ascii')
status = fields[1].decode('ascii')
pkg, _ = pkgs_dict[name]
pkgs_dict[name] = pkg, status
finally:
p.wait()
return pkgs_dict
def update_script(self):
return '%s update' % self.bin
def install_script(self, packages):
return '%s install -y %s' % (self.bin,
' '.join(pkg.name for pkg in packages))
class YumInstaller(object):
"""Installer for systems using RPM and Yum (Fedora, CentOS, Red-Hat).
"""
@classmethod
def install(cls, packages, assume_yes=False):
options = []
if assume_yes:
options.append('-y')
required_pkgs = set(pkg.name for pkg in packages)
r = subprocess.call(['yum', 'install'] + options + list(required_pkgs))
# Checks on packages
pkgs_status = cls.get_packages_info(packages)
for pkg, status in itervalues(pkgs_status):
if status is not None:
required_pkgs.discard(pkg.name)
if required_pkgs:
logger.error("Error: some packages could not be installed:%s",
''.join("\n %s" % pkg for pkg in required_pkgs))
return r, pkgs_status
@staticmethod
def get_packages_info(packages):
if not packages:
return {}
p = subprocess.Popen(['rpm', '-q'] +
[pkg.name for pkg in packages] +
['--qf', '+%{NAME} %{VERSION}-%{RELEASE}\\n'],
stdout=subprocess.PIPE)
# name -> {pkg, installed_version}
pkgs_dict = dict((pkg.name, (pkg, PKG_NOT_INSTALLED))
for pkg in packages)
try:
for l in p.stdout:
if l[0] == b'+':
fields = l[1:].split()
if len(fields) == 2:
name = fields[0].decode('ascii')
status = fields[1].decode('ascii')
pkg, _ = pkgs_dict[name]
pkgs_dict[name] = pkg, status
finally:
p.wait()
return pkgs_dict
@staticmethod
def update_script():
return ''
@staticmethod
def install_script(packages):
return 'yum install -y %s' % ' '.join(pkg.name for pkg in packages)
def select_installer(pack, runs, target_distribution=THIS_DISTRIBUTION,
check_distrib_compat=True):
"""Selects the right package installer for a Linux distribution.
"""
orig_distribution = runs[0]['distribution'][0].lower()
# Checks that the distributions match
if not check_distrib_compat:
pass
elif (set([orig_distribution, target_distribution]) ==
set(['ubuntu', 'debian'])):
# Packages are more or less the same on Debian and Ubuntu
logger.warning("Installing on %s but pack was generated on %s",
target_distribution.capitalize(),
orig_distribution.capitalize())
elif target_distribution is None:
raise CantFindInstaller("Target distribution is unknown; try using "
"--distribution")
elif orig_distribution != target_distribution:
raise CantFindInstaller(
"Installing on %s but pack was generated on %s" % (
target_distribution.capitalize(),
orig_distribution.capitalize()))
# Selects installation method
if target_distribution == 'ubuntu':
installer = AptInstaller('apt-get')
elif target_distribution == 'debian':
# aptitude is not installed by default, so use apt-get here too
installer = AptInstaller('apt-get')
elif (target_distribution in ('centos', 'centos linux',
'fedora', 'scientific linux') or
target_distribution.startswith('red hat')):
installer = YumInstaller()
else:
raise CantFindInstaller("This distribution, \"%s\", is not supported" %
target_distribution.capitalize())
return installer
| 34.579787
| 79
| 0.565913
|
from __future__ import division, print_function, unicode_literals
import logging
import platform
import subprocess
from reprounzip.unpackers.common.misc import UsageError
from reprounzip.utils import itervalues
logger = logging.getLogger('reprounzip')
THIS_DISTRIBUTION = platform.linux_distribution()[0].lower()
PKG_NOT_INSTALLED = "(not installed)"
class CantFindInstaller(UsageError):
def __init__(self, msg="Can't select a package installer"):
UsageError.__init__(self, msg)
class AptInstaller(object):
def __init__(self, binary):
self.bin = binary
def install(self, packages, assume_yes=False):
# Installs
options = []
if assume_yes:
options.append('-y')
required_pkgs = set(pkg.name for pkg in packages)
r = subprocess.call([self.bin, 'install'] +
options + list(required_pkgs))
# Checks on packages
pkgs_status = self.get_packages_info(packages)
for pkg, status in itervalues(pkgs_status):
if status is not None:
required_pkgs.discard(pkg.name)
if required_pkgs:
logger.error("Error: some packages could not be installed:%s",
''.join("\n %s" % pkg for pkg in required_pkgs))
return r, pkgs_status
@staticmethod
def get_packages_info(packages):
if not packages:
return {}
p = subprocess.Popen(['dpkg-query',
'--showformat=${Package;-50}\t${Version}\n',
'-W'] +
[pkg.name for pkg in packages],
stdout=subprocess.PIPE)
# name -> (pkg, installed_version)
pkgs_dict = dict((pkg.name, (pkg, PKG_NOT_INSTALLED))
for pkg in packages)
try:
for l in p.stdout:
fields = l.split()
if len(fields) == 2:
name = fields[0].decode('ascii')
status = fields[1].decode('ascii')
pkg, _ = pkgs_dict[name]
pkgs_dict[name] = pkg, status
finally:
p.wait()
return pkgs_dict
def update_script(self):
return '%s update' % self.bin
def install_script(self, packages):
return '%s install -y %s' % (self.bin,
' '.join(pkg.name for pkg in packages))
class YumInstaller(object):
@classmethod
def install(cls, packages, assume_yes=False):
options = []
if assume_yes:
options.append('-y')
required_pkgs = set(pkg.name for pkg in packages)
r = subprocess.call(['yum', 'install'] + options + list(required_pkgs))
# Checks on packages
pkgs_status = cls.get_packages_info(packages)
for pkg, status in itervalues(pkgs_status):
if status is not None:
required_pkgs.discard(pkg.name)
if required_pkgs:
logger.error("Error: some packages could not be installed:%s",
''.join("\n %s" % pkg for pkg in required_pkgs))
return r, pkgs_status
@staticmethod
def get_packages_info(packages):
if not packages:
return {}
p = subprocess.Popen(['rpm', '-q'] +
[pkg.name for pkg in packages] +
['--qf', '+%{NAME} %{VERSION}-%{RELEASE}\\n'],
stdout=subprocess.PIPE)
# name -> {pkg, installed_version}
pkgs_dict = dict((pkg.name, (pkg, PKG_NOT_INSTALLED))
for pkg in packages)
try:
for l in p.stdout:
if l[0] == b'+':
fields = l[1:].split()
if len(fields) == 2:
name = fields[0].decode('ascii')
status = fields[1].decode('ascii')
pkg, _ = pkgs_dict[name]
pkgs_dict[name] = pkg, status
finally:
p.wait()
return pkgs_dict
@staticmethod
def update_script():
return ''
@staticmethod
def install_script(packages):
return 'yum install -y %s' % ' '.join(pkg.name for pkg in packages)
def select_installer(pack, runs, target_distribution=THIS_DISTRIBUTION,
check_distrib_compat=True):
orig_distribution = runs[0]['distribution'][0].lower()
# Checks that the distributions match
if not check_distrib_compat:
pass
elif (set([orig_distribution, target_distribution]) ==
set(['ubuntu', 'debian'])):
# Packages are more or less the same on Debian and Ubuntu
logger.warning("Installing on %s but pack was generated on %s",
target_distribution.capitalize(),
orig_distribution.capitalize())
elif target_distribution is None:
raise CantFindInstaller("Target distribution is unknown; try using "
"--distribution")
elif orig_distribution != target_distribution:
raise CantFindInstaller(
"Installing on %s but pack was generated on %s" % (
target_distribution.capitalize(),
orig_distribution.capitalize()))
# Selects installation method
if target_distribution == 'ubuntu':
installer = AptInstaller('apt-get')
elif target_distribution == 'debian':
# aptitude is not installed by default, so use apt-get here too
installer = AptInstaller('apt-get')
elif (target_distribution in ('centos', 'centos linux',
'fedora', 'scientific linux') or
target_distribution.startswith('red hat')):
installer = YumInstaller()
else:
raise CantFindInstaller("This distribution, \"%s\", is not supported" %
target_distribution.capitalize())
return installer
| true
| true
|
f709e682d5539283db2e19111debac252c2cc9d9
| 32,992
|
py
|
Python
|
kubernetes_asyncio/client/models/extensions_v1beta1_pod_security_policy_spec.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/extensions_v1beta1_pod_security_policy_spec.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/extensions_v1beta1_pod_security_policy_spec.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ExtensionsV1beta1PodSecurityPolicySpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allow_privilege_escalation': 'bool',
'allowed_csi_drivers': 'list[ExtensionsV1beta1AllowedCSIDriver]',
'allowed_capabilities': 'list[str]',
'allowed_flex_volumes': 'list[ExtensionsV1beta1AllowedFlexVolume]',
'allowed_host_paths': 'list[ExtensionsV1beta1AllowedHostPath]',
'allowed_proc_mount_types': 'list[str]',
'allowed_unsafe_sysctls': 'list[str]',
'default_add_capabilities': 'list[str]',
'default_allow_privilege_escalation': 'bool',
'forbidden_sysctls': 'list[str]',
'fs_group': 'ExtensionsV1beta1FSGroupStrategyOptions',
'host_ipc': 'bool',
'host_network': 'bool',
'host_pid': 'bool',
'host_ports': 'list[ExtensionsV1beta1HostPortRange]',
'privileged': 'bool',
'read_only_root_filesystem': 'bool',
'required_drop_capabilities': 'list[str]',
'run_as_group': 'ExtensionsV1beta1RunAsGroupStrategyOptions',
'run_as_user': 'ExtensionsV1beta1RunAsUserStrategyOptions',
'se_linux': 'ExtensionsV1beta1SELinuxStrategyOptions',
'supplemental_groups': 'ExtensionsV1beta1SupplementalGroupsStrategyOptions',
'volumes': 'list[str]'
}
attribute_map = {
'allow_privilege_escalation': 'allowPrivilegeEscalation',
'allowed_csi_drivers': 'allowedCSIDrivers',
'allowed_capabilities': 'allowedCapabilities',
'allowed_flex_volumes': 'allowedFlexVolumes',
'allowed_host_paths': 'allowedHostPaths',
'allowed_proc_mount_types': 'allowedProcMountTypes',
'allowed_unsafe_sysctls': 'allowedUnsafeSysctls',
'default_add_capabilities': 'defaultAddCapabilities',
'default_allow_privilege_escalation': 'defaultAllowPrivilegeEscalation',
'forbidden_sysctls': 'forbiddenSysctls',
'fs_group': 'fsGroup',
'host_ipc': 'hostIPC',
'host_network': 'hostNetwork',
'host_pid': 'hostPID',
'host_ports': 'hostPorts',
'privileged': 'privileged',
'read_only_root_filesystem': 'readOnlyRootFilesystem',
'required_drop_capabilities': 'requiredDropCapabilities',
'run_as_group': 'runAsGroup',
'run_as_user': 'runAsUser',
'se_linux': 'seLinux',
'supplemental_groups': 'supplementalGroups',
'volumes': 'volumes'
}
def __init__(self, allow_privilege_escalation=None, allowed_csi_drivers=None, allowed_capabilities=None, allowed_flex_volumes=None, allowed_host_paths=None, allowed_proc_mount_types=None, allowed_unsafe_sysctls=None, default_add_capabilities=None, default_allow_privilege_escalation=None, forbidden_sysctls=None, fs_group=None, host_ipc=None, host_network=None, host_pid=None, host_ports=None, privileged=None, read_only_root_filesystem=None, required_drop_capabilities=None, run_as_group=None, run_as_user=None, se_linux=None, supplemental_groups=None, volumes=None): # noqa: E501
"""ExtensionsV1beta1PodSecurityPolicySpec - a model defined in OpenAPI""" # noqa: E501
self._allow_privilege_escalation = None
self._allowed_csi_drivers = None
self._allowed_capabilities = None
self._allowed_flex_volumes = None
self._allowed_host_paths = None
self._allowed_proc_mount_types = None
self._allowed_unsafe_sysctls = None
self._default_add_capabilities = None
self._default_allow_privilege_escalation = None
self._forbidden_sysctls = None
self._fs_group = None
self._host_ipc = None
self._host_network = None
self._host_pid = None
self._host_ports = None
self._privileged = None
self._read_only_root_filesystem = None
self._required_drop_capabilities = None
self._run_as_group = None
self._run_as_user = None
self._se_linux = None
self._supplemental_groups = None
self._volumes = None
self.discriminator = None
if allow_privilege_escalation is not None:
self.allow_privilege_escalation = allow_privilege_escalation
if allowed_csi_drivers is not None:
self.allowed_csi_drivers = allowed_csi_drivers
if allowed_capabilities is not None:
self.allowed_capabilities = allowed_capabilities
if allowed_flex_volumes is not None:
self.allowed_flex_volumes = allowed_flex_volumes
if allowed_host_paths is not None:
self.allowed_host_paths = allowed_host_paths
if allowed_proc_mount_types is not None:
self.allowed_proc_mount_types = allowed_proc_mount_types
if allowed_unsafe_sysctls is not None:
self.allowed_unsafe_sysctls = allowed_unsafe_sysctls
if default_add_capabilities is not None:
self.default_add_capabilities = default_add_capabilities
if default_allow_privilege_escalation is not None:
self.default_allow_privilege_escalation = default_allow_privilege_escalation
if forbidden_sysctls is not None:
self.forbidden_sysctls = forbidden_sysctls
self.fs_group = fs_group
if host_ipc is not None:
self.host_ipc = host_ipc
if host_network is not None:
self.host_network = host_network
if host_pid is not None:
self.host_pid = host_pid
if host_ports is not None:
self.host_ports = host_ports
if privileged is not None:
self.privileged = privileged
if read_only_root_filesystem is not None:
self.read_only_root_filesystem = read_only_root_filesystem
if required_drop_capabilities is not None:
self.required_drop_capabilities = required_drop_capabilities
if run_as_group is not None:
self.run_as_group = run_as_group
self.run_as_user = run_as_user
self.se_linux = se_linux
self.supplemental_groups = supplemental_groups
if volumes is not None:
self.volumes = volumes
@property
def allow_privilege_escalation(self):
"""Gets the allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. # noqa: E501
:return: The allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._allow_privilege_escalation
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, allow_privilege_escalation):
"""Sets the allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec.
allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. # noqa: E501
:param allow_privilege_escalation: The allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._allow_privilege_escalation = allow_privilege_escalation
@property
def allowed_csi_drivers(self):
"""Gets the allowed_csi_drivers of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value means no CSI drivers can run inline within a pod spec. # noqa: E501
:return: The allowed_csi_drivers of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[ExtensionsV1beta1AllowedCSIDriver]
"""
return self._allowed_csi_drivers
@allowed_csi_drivers.setter
def allowed_csi_drivers(self, allowed_csi_drivers):
"""Sets the allowed_csi_drivers of this ExtensionsV1beta1PodSecurityPolicySpec.
AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value means no CSI drivers can run inline within a pod spec. # noqa: E501
:param allowed_csi_drivers: The allowed_csi_drivers of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[ExtensionsV1beta1AllowedCSIDriver]
"""
self._allowed_csi_drivers = allowed_csi_drivers
@property
def allowed_capabilities(self):
"""Gets the allowed_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities. # noqa: E501
:return: The allowed_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._allowed_capabilities
@allowed_capabilities.setter
def allowed_capabilities(self, allowed_capabilities):
"""Sets the allowed_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec.
allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities. # noqa: E501
:param allowed_capabilities: The allowed_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._allowed_capabilities = allowed_capabilities
@property
def allowed_flex_volumes(self):
"""Gets the allowed_flex_volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field. # noqa: E501
:return: The allowed_flex_volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[ExtensionsV1beta1AllowedFlexVolume]
"""
return self._allowed_flex_volumes
@allowed_flex_volumes.setter
def allowed_flex_volumes(self, allowed_flex_volumes):
"""Sets the allowed_flex_volumes of this ExtensionsV1beta1PodSecurityPolicySpec.
allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field. # noqa: E501
:param allowed_flex_volumes: The allowed_flex_volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[ExtensionsV1beta1AllowedFlexVolume]
"""
self._allowed_flex_volumes = allowed_flex_volumes
@property
def allowed_host_paths(self):
"""Gets the allowed_host_paths of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used. # noqa: E501
:return: The allowed_host_paths of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[ExtensionsV1beta1AllowedHostPath]
"""
return self._allowed_host_paths
@allowed_host_paths.setter
def allowed_host_paths(self, allowed_host_paths):
"""Sets the allowed_host_paths of this ExtensionsV1beta1PodSecurityPolicySpec.
allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used. # noqa: E501
:param allowed_host_paths: The allowed_host_paths of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[ExtensionsV1beta1AllowedHostPath]
"""
self._allowed_host_paths = allowed_host_paths
@property
def allowed_proc_mount_types(self):
"""Gets the allowed_proc_mount_types of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled. # noqa: E501
:return: The allowed_proc_mount_types of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._allowed_proc_mount_types
@allowed_proc_mount_types.setter
def allowed_proc_mount_types(self, allowed_proc_mount_types):
"""Sets the allowed_proc_mount_types of this ExtensionsV1beta1PodSecurityPolicySpec.
AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled. # noqa: E501
:param allowed_proc_mount_types: The allowed_proc_mount_types of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._allowed_proc_mount_types = allowed_proc_mount_types
@property
def allowed_unsafe_sysctls(self):
"""Gets the allowed_unsafe_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc. # noqa: E501
:return: The allowed_unsafe_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._allowed_unsafe_sysctls
@allowed_unsafe_sysctls.setter
def allowed_unsafe_sysctls(self, allowed_unsafe_sysctls):
"""Sets the allowed_unsafe_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec.
allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc. # noqa: E501
:param allowed_unsafe_sysctls: The allowed_unsafe_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._allowed_unsafe_sysctls = allowed_unsafe_sysctls
@property
def default_add_capabilities(self):
"""Gets the default_add_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list. # noqa: E501
:return: The default_add_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._default_add_capabilities
@default_add_capabilities.setter
def default_add_capabilities(self, default_add_capabilities):
"""Sets the default_add_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec.
defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list. # noqa: E501
:param default_add_capabilities: The default_add_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._default_add_capabilities = default_add_capabilities
@property
def default_allow_privilege_escalation(self):
"""Gets the default_allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process. # noqa: E501
:return: The default_allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._default_allow_privilege_escalation
@default_allow_privilege_escalation.setter
def default_allow_privilege_escalation(self, default_allow_privilege_escalation):
"""Sets the default_allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec.
defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process. # noqa: E501
:param default_allow_privilege_escalation: The default_allow_privilege_escalation of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._default_allow_privilege_escalation = default_allow_privilege_escalation
@property
def forbidden_sysctls(self):
"""Gets the forbidden_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. Examples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc. # noqa: E501
:return: The forbidden_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._forbidden_sysctls
@forbidden_sysctls.setter
def forbidden_sysctls(self, forbidden_sysctls):
"""Sets the forbidden_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec.
forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. Examples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc. # noqa: E501
:param forbidden_sysctls: The forbidden_sysctls of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._forbidden_sysctls = forbidden_sysctls
@property
def fs_group(self):
"""Gets the fs_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:return: The fs_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: ExtensionsV1beta1FSGroupStrategyOptions
"""
return self._fs_group
@fs_group.setter
def fs_group(self, fs_group):
"""Sets the fs_group of this ExtensionsV1beta1PodSecurityPolicySpec.
:param fs_group: The fs_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: ExtensionsV1beta1FSGroupStrategyOptions
"""
if fs_group is None:
raise ValueError("Invalid value for `fs_group`, must not be `None`") # noqa: E501
self._fs_group = fs_group
@property
def host_ipc(self):
"""Gets the host_ipc of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
hostIPC determines if the policy allows the use of HostIPC in the pod spec. # noqa: E501
:return: The host_ipc of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._host_ipc
@host_ipc.setter
def host_ipc(self, host_ipc):
"""Sets the host_ipc of this ExtensionsV1beta1PodSecurityPolicySpec.
hostIPC determines if the policy allows the use of HostIPC in the pod spec. # noqa: E501
:param host_ipc: The host_ipc of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._host_ipc = host_ipc
@property
def host_network(self):
"""Gets the host_network of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. # noqa: E501
:return: The host_network of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._host_network
@host_network.setter
def host_network(self, host_network):
"""Sets the host_network of this ExtensionsV1beta1PodSecurityPolicySpec.
hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. # noqa: E501
:param host_network: The host_network of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._host_network = host_network
@property
def host_pid(self):
"""Gets the host_pid of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
hostPID determines if the policy allows the use of HostPID in the pod spec. # noqa: E501
:return: The host_pid of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._host_pid
@host_pid.setter
def host_pid(self, host_pid):
"""Sets the host_pid of this ExtensionsV1beta1PodSecurityPolicySpec.
hostPID determines if the policy allows the use of HostPID in the pod spec. # noqa: E501
:param host_pid: The host_pid of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._host_pid = host_pid
@property
def host_ports(self):
"""Gets the host_ports of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
hostPorts determines which host port ranges are allowed to be exposed. # noqa: E501
:return: The host_ports of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[ExtensionsV1beta1HostPortRange]
"""
return self._host_ports
@host_ports.setter
def host_ports(self, host_ports):
"""Sets the host_ports of this ExtensionsV1beta1PodSecurityPolicySpec.
hostPorts determines which host port ranges are allowed to be exposed. # noqa: E501
:param host_ports: The host_ports of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[ExtensionsV1beta1HostPortRange]
"""
self._host_ports = host_ports
@property
def privileged(self):
"""Gets the privileged of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
privileged determines if a pod can request to be run as privileged. # noqa: E501
:return: The privileged of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._privileged
@privileged.setter
def privileged(self, privileged):
"""Sets the privileged of this ExtensionsV1beta1PodSecurityPolicySpec.
privileged determines if a pod can request to be run as privileged. # noqa: E501
:param privileged: The privileged of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._privileged = privileged
@property
def read_only_root_filesystem(self):
"""Gets the read_only_root_filesystem of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to. # noqa: E501
:return: The read_only_root_filesystem of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: bool
"""
return self._read_only_root_filesystem
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, read_only_root_filesystem):
"""Sets the read_only_root_filesystem of this ExtensionsV1beta1PodSecurityPolicySpec.
readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to. # noqa: E501
:param read_only_root_filesystem: The read_only_root_filesystem of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._read_only_root_filesystem = read_only_root_filesystem
@property
def required_drop_capabilities(self):
"""Gets the required_drop_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added. # noqa: E501
:return: The required_drop_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._required_drop_capabilities
@required_drop_capabilities.setter
def required_drop_capabilities(self, required_drop_capabilities):
"""Sets the required_drop_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec.
requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added. # noqa: E501
:param required_drop_capabilities: The required_drop_capabilities of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._required_drop_capabilities = required_drop_capabilities
@property
def run_as_group(self):
"""Gets the run_as_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:return: The run_as_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: ExtensionsV1beta1RunAsGroupStrategyOptions
"""
return self._run_as_group
@run_as_group.setter
def run_as_group(self, run_as_group):
"""Sets the run_as_group of this ExtensionsV1beta1PodSecurityPolicySpec.
:param run_as_group: The run_as_group of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: ExtensionsV1beta1RunAsGroupStrategyOptions
"""
self._run_as_group = run_as_group
@property
def run_as_user(self):
"""Gets the run_as_user of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:return: The run_as_user of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: ExtensionsV1beta1RunAsUserStrategyOptions
"""
return self._run_as_user
@run_as_user.setter
def run_as_user(self, run_as_user):
"""Sets the run_as_user of this ExtensionsV1beta1PodSecurityPolicySpec.
:param run_as_user: The run_as_user of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: ExtensionsV1beta1RunAsUserStrategyOptions
"""
if run_as_user is None:
raise ValueError("Invalid value for `run_as_user`, must not be `None`") # noqa: E501
self._run_as_user = run_as_user
@property
def se_linux(self):
"""Gets the se_linux of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:return: The se_linux of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: ExtensionsV1beta1SELinuxStrategyOptions
"""
return self._se_linux
@se_linux.setter
def se_linux(self, se_linux):
"""Sets the se_linux of this ExtensionsV1beta1PodSecurityPolicySpec.
:param se_linux: The se_linux of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: ExtensionsV1beta1SELinuxStrategyOptions
"""
if se_linux is None:
raise ValueError("Invalid value for `se_linux`, must not be `None`") # noqa: E501
self._se_linux = se_linux
@property
def supplemental_groups(self):
"""Gets the supplemental_groups of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:return: The supplemental_groups of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: ExtensionsV1beta1SupplementalGroupsStrategyOptions
"""
return self._supplemental_groups
@supplemental_groups.setter
def supplemental_groups(self, supplemental_groups):
"""Sets the supplemental_groups of this ExtensionsV1beta1PodSecurityPolicySpec.
:param supplemental_groups: The supplemental_groups of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: ExtensionsV1beta1SupplementalGroupsStrategyOptions
"""
if supplemental_groups is None:
raise ValueError("Invalid value for `supplemental_groups`, must not be `None`") # noqa: E501
self._supplemental_groups = supplemental_groups
@property
def volumes(self):
"""Gets the volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'. # noqa: E501
:return: The volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:rtype: list[str]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""Sets the volumes of this ExtensionsV1beta1PodSecurityPolicySpec.
volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'. # noqa: E501
:param volumes: The volumes of this ExtensionsV1beta1PodSecurityPolicySpec. # noqa: E501
:type: list[str]
"""
self._volumes = volumes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionsV1beta1PodSecurityPolicySpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 45.506207
| 586
| 0.710839
|
import pprint
import re
import six
class ExtensionsV1beta1PodSecurityPolicySpec(object):
openapi_types = {
'allow_privilege_escalation': 'bool',
'allowed_csi_drivers': 'list[ExtensionsV1beta1AllowedCSIDriver]',
'allowed_capabilities': 'list[str]',
'allowed_flex_volumes': 'list[ExtensionsV1beta1AllowedFlexVolume]',
'allowed_host_paths': 'list[ExtensionsV1beta1AllowedHostPath]',
'allowed_proc_mount_types': 'list[str]',
'allowed_unsafe_sysctls': 'list[str]',
'default_add_capabilities': 'list[str]',
'default_allow_privilege_escalation': 'bool',
'forbidden_sysctls': 'list[str]',
'fs_group': 'ExtensionsV1beta1FSGroupStrategyOptions',
'host_ipc': 'bool',
'host_network': 'bool',
'host_pid': 'bool',
'host_ports': 'list[ExtensionsV1beta1HostPortRange]',
'privileged': 'bool',
'read_only_root_filesystem': 'bool',
'required_drop_capabilities': 'list[str]',
'run_as_group': 'ExtensionsV1beta1RunAsGroupStrategyOptions',
'run_as_user': 'ExtensionsV1beta1RunAsUserStrategyOptions',
'se_linux': 'ExtensionsV1beta1SELinuxStrategyOptions',
'supplemental_groups': 'ExtensionsV1beta1SupplementalGroupsStrategyOptions',
'volumes': 'list[str]'
}
attribute_map = {
'allow_privilege_escalation': 'allowPrivilegeEscalation',
'allowed_csi_drivers': 'allowedCSIDrivers',
'allowed_capabilities': 'allowedCapabilities',
'allowed_flex_volumes': 'allowedFlexVolumes',
'allowed_host_paths': 'allowedHostPaths',
'allowed_proc_mount_types': 'allowedProcMountTypes',
'allowed_unsafe_sysctls': 'allowedUnsafeSysctls',
'default_add_capabilities': 'defaultAddCapabilities',
'default_allow_privilege_escalation': 'defaultAllowPrivilegeEscalation',
'forbidden_sysctls': 'forbiddenSysctls',
'fs_group': 'fsGroup',
'host_ipc': 'hostIPC',
'host_network': 'hostNetwork',
'host_pid': 'hostPID',
'host_ports': 'hostPorts',
'privileged': 'privileged',
'read_only_root_filesystem': 'readOnlyRootFilesystem',
'required_drop_capabilities': 'requiredDropCapabilities',
'run_as_group': 'runAsGroup',
'run_as_user': 'runAsUser',
'se_linux': 'seLinux',
'supplemental_groups': 'supplementalGroups',
'volumes': 'volumes'
}
def __init__(self, allow_privilege_escalation=None, allowed_csi_drivers=None, allowed_capabilities=None, allowed_flex_volumes=None, allowed_host_paths=None, allowed_proc_mount_types=None, allowed_unsafe_sysctls=None, default_add_capabilities=None, default_allow_privilege_escalation=None, forbidden_sysctls=None, fs_group=None, host_ipc=None, host_network=None, host_pid=None, host_ports=None, privileged=None, read_only_root_filesystem=None, required_drop_capabilities=None, run_as_group=None, run_as_user=None, se_linux=None, supplemental_groups=None, volumes=None):
self._allow_privilege_escalation = None
self._allowed_csi_drivers = None
self._allowed_capabilities = None
self._allowed_flex_volumes = None
self._allowed_host_paths = None
self._allowed_proc_mount_types = None
self._allowed_unsafe_sysctls = None
self._default_add_capabilities = None
self._default_allow_privilege_escalation = None
self._forbidden_sysctls = None
self._fs_group = None
self._host_ipc = None
self._host_network = None
self._host_pid = None
self._host_ports = None
self._privileged = None
self._read_only_root_filesystem = None
self._required_drop_capabilities = None
self._run_as_group = None
self._run_as_user = None
self._se_linux = None
self._supplemental_groups = None
self._volumes = None
self.discriminator = None
if allow_privilege_escalation is not None:
self.allow_privilege_escalation = allow_privilege_escalation
if allowed_csi_drivers is not None:
self.allowed_csi_drivers = allowed_csi_drivers
if allowed_capabilities is not None:
self.allowed_capabilities = allowed_capabilities
if allowed_flex_volumes is not None:
self.allowed_flex_volumes = allowed_flex_volumes
if allowed_host_paths is not None:
self.allowed_host_paths = allowed_host_paths
if allowed_proc_mount_types is not None:
self.allowed_proc_mount_types = allowed_proc_mount_types
if allowed_unsafe_sysctls is not None:
self.allowed_unsafe_sysctls = allowed_unsafe_sysctls
if default_add_capabilities is not None:
self.default_add_capabilities = default_add_capabilities
if default_allow_privilege_escalation is not None:
self.default_allow_privilege_escalation = default_allow_privilege_escalation
if forbidden_sysctls is not None:
self.forbidden_sysctls = forbidden_sysctls
self.fs_group = fs_group
if host_ipc is not None:
self.host_ipc = host_ipc
if host_network is not None:
self.host_network = host_network
if host_pid is not None:
self.host_pid = host_pid
if host_ports is not None:
self.host_ports = host_ports
if privileged is not None:
self.privileged = privileged
if read_only_root_filesystem is not None:
self.read_only_root_filesystem = read_only_root_filesystem
if required_drop_capabilities is not None:
self.required_drop_capabilities = required_drop_capabilities
if run_as_group is not None:
self.run_as_group = run_as_group
self.run_as_user = run_as_user
self.se_linux = se_linux
self.supplemental_groups = supplemental_groups
if volumes is not None:
self.volumes = volumes
@property
def allow_privilege_escalation(self):
return self._allow_privilege_escalation
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, allow_privilege_escalation):
self._allow_privilege_escalation = allow_privilege_escalation
@property
def allowed_csi_drivers(self):
return self._allowed_csi_drivers
@allowed_csi_drivers.setter
def allowed_csi_drivers(self, allowed_csi_drivers):
self._allowed_csi_drivers = allowed_csi_drivers
@property
def allowed_capabilities(self):
return self._allowed_capabilities
@allowed_capabilities.setter
def allowed_capabilities(self, allowed_capabilities):
self._allowed_capabilities = allowed_capabilities
@property
def allowed_flex_volumes(self):
return self._allowed_flex_volumes
@allowed_flex_volumes.setter
def allowed_flex_volumes(self, allowed_flex_volumes):
self._allowed_flex_volumes = allowed_flex_volumes
@property
def allowed_host_paths(self):
return self._allowed_host_paths
@allowed_host_paths.setter
def allowed_host_paths(self, allowed_host_paths):
self._allowed_host_paths = allowed_host_paths
@property
def allowed_proc_mount_types(self):
return self._allowed_proc_mount_types
@allowed_proc_mount_types.setter
def allowed_proc_mount_types(self, allowed_proc_mount_types):
self._allowed_proc_mount_types = allowed_proc_mount_types
@property
def allowed_unsafe_sysctls(self):
return self._allowed_unsafe_sysctls
@allowed_unsafe_sysctls.setter
def allowed_unsafe_sysctls(self, allowed_unsafe_sysctls):
self._allowed_unsafe_sysctls = allowed_unsafe_sysctls
@property
def default_add_capabilities(self):
return self._default_add_capabilities
@default_add_capabilities.setter
def default_add_capabilities(self, default_add_capabilities):
self._default_add_capabilities = default_add_capabilities
@property
def default_allow_privilege_escalation(self):
return self._default_allow_privilege_escalation
@default_allow_privilege_escalation.setter
def default_allow_privilege_escalation(self, default_allow_privilege_escalation):
self._default_allow_privilege_escalation = default_allow_privilege_escalation
@property
def forbidden_sysctls(self):
return self._forbidden_sysctls
@forbidden_sysctls.setter
def forbidden_sysctls(self, forbidden_sysctls):
self._forbidden_sysctls = forbidden_sysctls
@property
def fs_group(self):
return self._fs_group
@fs_group.setter
def fs_group(self, fs_group):
if fs_group is None:
raise ValueError("Invalid value for `fs_group`, must not be `None`")
self._fs_group = fs_group
@property
def host_ipc(self):
return self._host_ipc
@host_ipc.setter
def host_ipc(self, host_ipc):
self._host_ipc = host_ipc
@property
def host_network(self):
return self._host_network
@host_network.setter
def host_network(self, host_network):
self._host_network = host_network
@property
def host_pid(self):
return self._host_pid
@host_pid.setter
def host_pid(self, host_pid):
self._host_pid = host_pid
@property
def host_ports(self):
return self._host_ports
@host_ports.setter
def host_ports(self, host_ports):
self._host_ports = host_ports
@property
def privileged(self):
return self._privileged
@privileged.setter
def privileged(self, privileged):
self._privileged = privileged
@property
def read_only_root_filesystem(self):
return self._read_only_root_filesystem
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, read_only_root_filesystem):
self._read_only_root_filesystem = read_only_root_filesystem
@property
def required_drop_capabilities(self):
return self._required_drop_capabilities
@required_drop_capabilities.setter
def required_drop_capabilities(self, required_drop_capabilities):
self._required_drop_capabilities = required_drop_capabilities
@property
def run_as_group(self):
return self._run_as_group
@run_as_group.setter
def run_as_group(self, run_as_group):
self._run_as_group = run_as_group
@property
def run_as_user(self):
return self._run_as_user
@run_as_user.setter
def run_as_user(self, run_as_user):
if run_as_user is None:
raise ValueError("Invalid value for `run_as_user`, must not be `None`")
self._run_as_user = run_as_user
@property
def se_linux(self):
return self._se_linux
@se_linux.setter
def se_linux(self, se_linux):
if se_linux is None:
raise ValueError("Invalid value for `se_linux`, must not be `None`")
self._se_linux = se_linux
@property
def supplemental_groups(self):
return self._supplemental_groups
@supplemental_groups.setter
def supplemental_groups(self, supplemental_groups):
if supplemental_groups is None:
raise ValueError("Invalid value for `supplemental_groups`, must not be `None`")
self._supplemental_groups = supplemental_groups
@property
def volumes(self):
return self._volumes
@volumes.setter
def volumes(self, volumes):
self._volumes = volumes
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ExtensionsV1beta1PodSecurityPolicySpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f709e710939fad538b4dab0bbc8c4fbe215db882
| 34,421
|
py
|
Python
|
python/cuml/feature_extraction/_vectorizers.py
|
siddheshmhatre/cuml
|
ed0e58c6b3ebfc17b944cdad7c04cd4af8860736
|
[
"Apache-2.0"
] | 2,743
|
2018-10-11T17:28:58.000Z
|
2022-03-31T19:20:50.000Z
|
python/cuml/feature_extraction/_vectorizers.py
|
siddheshmhatre/cuml
|
ed0e58c6b3ebfc17b944cdad7c04cd4af8860736
|
[
"Apache-2.0"
] | 4,280
|
2018-10-11T22:29:57.000Z
|
2022-03-31T22:02:44.000Z
|
python/cuml/feature_extraction/_vectorizers.py
|
siddheshmhatre/cuml
|
ed0e58c6b3ebfc17b944cdad7c04cd4af8860736
|
[
"Apache-2.0"
] | 454
|
2018-10-11T17:40:56.000Z
|
2022-03-25T17:07:09.000Z
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cudf import Series
from cuml.common.exceptions import NotFittedError
from cuml.feature_extraction._stop_words import ENGLISH_STOP_WORDS
from cuml.common.sparsefuncs import csr_row_normalize_l1, csr_row_normalize_l2
from cuml.common.sparsefuncs import create_csr_matrix_from_count_df
from functools import partial
import cupy as cp
import numbers
import cudf
from cuml.common.type_utils import CUPY_SPARSE_DTYPES
from cudf.utils.dtypes import min_signed_type
import cuml.common.logger as logger
def _preprocess(doc, lower=False, remove_non_alphanumeric=False, delimiter=" ",
keep_underscore_char=True, remove_single_token_len=True):
"""
Chain together an optional series of text preprocessing steps to
apply to a document.
Parameters
----------
doc: cudf.Series[str]
The string to preprocess
lower: bool
Whether to use str.lower to lowercase all of the text
remove_non_alphanumeric: bool
Whether or not to remove non-alphanumeric characters.
keep_underscore_char: bool
Whether or not to keep the underscore character
Returns
-------
doc: cudf.Series[str]
preprocessed string
"""
if lower:
doc = doc.str.lower()
if remove_non_alphanumeric:
if keep_underscore_char:
# why: sklearn by default keeps `_` char along with alphanumerics
# currently we dont have a easy way of removing
# all chars but `_`
# in cudf.Series[str] below works around it
temp_string = 'cumlSt'
doc = doc.str.replace('_', temp_string, regex=False)
doc = doc.str.filter_alphanum(' ', keep=True)
doc = doc.str.replace(temp_string, '_', regex=False)
else:
doc = doc.str.filter_alphanum(' ', keep=True)
# sklearn by default removes tokens of
# length 1, if its remove alphanumerics
if remove_single_token_len:
doc = doc.str.filter_tokens(2)
return doc
class _VectorizerMixin:
"""
Provides common code for text vectorizers (tokenization logic).
"""
def _remove_stop_words(self, doc):
"""
Remove stop words only if needed.
"""
if self.analyzer == 'word' and self.stop_words is not None:
stop_words = Series(self._get_stop_words())
doc = doc.str.replace_tokens(stop_words,
replacements=self.delimiter,
delimiter=self.delimiter)
return doc
def build_preprocessor(self):
"""
Return a function to preprocess the text before tokenization.
If analyzer == 'word' and stop_words is not None, stop words are
removed from the input documents after preprocessing.
Returns
-------
preprocessor: callable
A function to preprocess the text before tokenization.
"""
if self.preprocessor is not None:
preprocess = self.preprocessor
else:
remove_non_alpha = self.analyzer == 'word'
preprocess = partial(_preprocess, lower=self.lowercase,
remove_non_alphanumeric=remove_non_alpha,
delimiter=self.delimiter)
return lambda doc: self._remove_stop_words(preprocess(doc))
def _get_stop_words(self):
"""
Build or fetch the effective stop words list.
Returns
-------
stop_words: list or None
A list of stop words.
"""
if self.stop_words == "english":
return list(ENGLISH_STOP_WORDS)
elif isinstance(self.stop_words, str):
raise ValueError("not a built-in stop list: %s" % self.stop_words)
elif self.stop_words is None:
return None
else: # assume it's a collection
return list(self.stop_words)
def get_char_ngrams(self, ngram_size, str_series, doc_id_sr):
"""
Handles ngram generation for characters analyzers.
When analyzer is 'char_wb', we generate ngrams within word boundaries,
meaning we need to first tokenize and pad each token with a delimiter.
"""
if self.analyzer == 'char_wb' and ngram_size != 1:
token_count = str_series.str.token_count(self.delimiter)
tokens = str_series.str.tokenize(self.delimiter)
del str_series
padding = Series(self.delimiter).repeat(len(tokens))
tokens = tokens.str.cat(padding)
padding = padding.reset_index(drop=True)
tokens = padding.str.cat(tokens)
tokens = tokens.reset_index(drop=True)
ngram_sr = tokens.str.character_ngrams(n=ngram_size)
doc_id_df = cudf.DataFrame({
'doc_id': doc_id_sr.repeat(token_count).reset_index(drop=True),
# formula to count ngrams given number of letters per token:
'ngram_count': tokens.str.len() - (ngram_size - 1)
})
del tokens
ngram_count = doc_id_df.groupby('doc_id',
sort=True).sum()['ngram_count']
return ngram_sr, ngram_count, token_count
if ngram_size == 1:
token_count = str_series.str.len()
ngram_sr = str_series.str.character_tokenize()
del str_series
elif self.analyzer == 'char':
token_count = str_series.str.len()
ngram_sr = str_series.str.character_ngrams(n=ngram_size)
del str_series
ngram_count = token_count - (ngram_size - 1)
return ngram_sr, ngram_count, token_count
def get_ngrams(self, str_series, ngram_size, doc_id_sr):
"""
This returns the ngrams for the string series
Parameters
----------
str_series : (cudf.Series)
String series to tokenize
ngram_size : int
Gram level to get (1 for unigram, 2 for bigram etc)
doc_id_sr : cudf.Series
Int series containing documents ids
"""
if self.analyzer == 'word':
token_count_sr = str_series.str.token_count(self.delimiter)
ngram_sr = str_series.str.ngrams_tokenize(n=ngram_size,
separator=" ",
delimiter=self.delimiter)
# formula to count ngrams given number of tokens x per doc: x-(n-1)
ngram_count = token_count_sr - (ngram_size - 1)
else:
ngram_sr, ngram_count, token_count_sr = self.get_char_ngrams(
ngram_size, str_series, doc_id_sr
)
not_empty_docs = token_count_sr > 0
doc_id_sr = doc_id_sr[not_empty_docs]
ngram_count = ngram_count[not_empty_docs]
doc_id_sr = doc_id_sr.repeat(ngram_count).reset_index(drop=True)
tokenized_df = cudf.DataFrame()
tokenized_df["doc_id"] = doc_id_sr
tokenized_df["token"] = ngram_sr
return tokenized_df
def _create_tokenized_df(self, docs):
"""
Creates a tokenized DataFrame from a string Series.
Each row describes the token string and the corresponding document id.
"""
min_n, max_n = self.ngram_range
doc_id = cp.arange(start=0, stop=len(docs), dtype=cp.int32)
doc_id = Series(doc_id)
tokenized_df_ls = [
self.get_ngrams(docs, n, doc_id)
for n in range(min_n, max_n + 1)
]
del docs
tokenized_df = cudf.concat(tokenized_df_ls)
tokenized_df = tokenized_df.reset_index(drop=True)
return tokenized_df
def _compute_empty_doc_ids(self, count_df, n_doc):
"""
Compute empty docs ids using the remaining docs, given the total number
of documents.
"""
remaining_docs = count_df['doc_id'].unique()
dtype = min_signed_type(n_doc)
doc_ids = cudf.DataFrame(data={'all_ids': cp.arange(0, n_doc,
dtype=dtype)},
dtype=dtype)
empty_docs = doc_ids - doc_ids.iloc[remaining_docs]
empty_ids = empty_docs[empty_docs['all_ids'].isnull()].index.values
return empty_ids
def _validate_params(self):
"""
Check validity of ngram_range parameter
"""
min_n, max_m = self.ngram_range
msg = ""
if min_n < 1:
msg += "lower boundary must be >= 1. "
if min_n > max_m:
msg += "lower boundary larger than the upper boundary. "
if msg != "":
msg = f"Invalid value for ngram_range={self.ngram_range} {msg}"
raise ValueError(msg)
if hasattr(self, "n_features"):
if not isinstance(self.n_features, numbers.Integral):
raise TypeError(
f"n_features must be integral, got {self.n_features}\
({type(self.n_features)})."
)
def _warn_for_unused_params(self):
if self.analyzer != "word" and self.stop_words is not None:
logger.warn(
"The parameter 'stop_words' will not be used"
" since 'analyzer' != 'word'"
)
def _check_sklearn_params(self, analyzer, sklearn_params):
if callable(analyzer):
raise ValueError(
"cuML does not support callable analyzer,"
" please refer to the cuML documentation for"
" more information."
)
for key, vals in sklearn_params.items():
if vals is not None:
raise TypeError(
"The Scikit-learn variable",
key,
" is not supported in cuML,"
" please read the cuML documentation for"
" more information.",
)
def _document_frequency(X):
"""
Count the number of non-zero values for each feature in X.
"""
doc_freq = (
X[["token", "doc_id"]]
.groupby(["token"], sort=True)
.count()
)
return doc_freq["doc_id"].values
def _term_frequency(X):
"""
Count the number of occurrences of each term in X.
"""
term_freq = (
X[["token", "count"]]
.groupby(["token"], sort=True)
.sum()
)
return term_freq["count"].values
class CountVectorizer(_VectorizerMixin):
"""
Convert a collection of text documents to a matrix of token counts
If you do not provide an a-priori dictionary then the number of features
will be equal to the vocabulary size found by analyzing the data.
Parameters
----------
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the input documents.
If None, no stop words will be used. max_df can be set to a value
to automatically detect and filter stop words based on intra corpus
document frequency of terms.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
analyzer : string, {'word', 'char', 'char_wb'}
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : cudf.Series, optional
If not given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
delimiter : str, whitespace by default
String used as a replacement for stop words if stop_words is not None.
Typically the delimiting character between words is a good choice.
Attributes
----------
vocabulary_ : cudf.Series[str]
Array mapping from feature integer indices to feature name.
stop_words_ : cudf.Series[str]
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
"""
def __init__(self, input=None, encoding=None, decode_error=None,
strip_accents=None, lowercase=True, preprocessor=None,
tokenizer=None, stop_words=None, token_pattern=None,
ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=cp.float32, delimiter=' '):
self.preprocessor = preprocessor
self.analyzer = analyzer
self.lowercase = lowercase
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if not isinstance(max_features, int) or max_features <= 0:
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
self.delimiter = delimiter
if dtype not in CUPY_SPARSE_DTYPES:
msg = f"Expected dtype in {CUPY_SPARSE_DTYPES}, got {dtype}"
raise ValueError(msg)
sklearn_params = {"input": input,
"encoding": encoding,
"decode_error": decode_error,
"strip_accents": strip_accents,
"tokenizer": tokenizer,
"token_pattern": token_pattern}
self._check_sklearn_params(analyzer, sklearn_params)
def _count_vocab(self, tokenized_df):
"""
Count occurrences of tokens in each document.
"""
# Transform string tokens into token indexes from 0 to len(vocab)
# The indexes are based on lexicographical ordering.
tokenized_df['token'] = tokenized_df['token'].astype('category')
tokenized_df['token'] = tokenized_df['token'].cat.set_categories(
self.vocabulary_
)._column.codes
# Count of each token in each document
count_df = (
tokenized_df[["doc_id", "token"]]
.groupby(["doc_id", "token"], sort=True)
.size()
.reset_index()
.rename({0: "count"}, axis=1)
)
return count_df
def _filter_and_renumber(self, df, keep_values, column):
"""
Filter dataframe to keep only values from column matching
keep_values.
"""
df[column] = (
df[column].astype('category')
.cat.set_categories(keep_values)
._column.codes
)
df = df.dropna(subset=column)
return df
def _limit_features(self, count_df, vocab, high, low, limit):
"""
Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
Sets `self.vocabulary_` and `self.stop_words_` with the new values.
"""
if high is None and low is None and limit is None:
self.stop_words_ = None
return count_df
document_frequency = _document_frequency(count_df)
mask = cp.ones(len(document_frequency), dtype=bool)
if high is not None:
mask &= document_frequency <= high
if low is not None:
mask &= document_frequency >= low
if limit is not None and mask.sum() > limit:
term_frequency = _term_frequency(count_df)
mask_inds = (-term_frequency[mask]).argsort()[:limit]
new_mask = cp.zeros(len(document_frequency), dtype=bool)
new_mask[cp.where(mask)[0][mask_inds]] = True
mask = new_mask
keep_idx = cp.where(mask)[0].astype(cp.int32)
keep_num = keep_idx.shape[0]
if keep_num == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
if len(vocab) - keep_num != 0:
count_df = self._filter_and_renumber(count_df, keep_idx, 'token')
self.stop_words_ = vocab[~mask].reset_index(drop=True)
self.vocabulary_ = vocab[mask].reset_index(drop=True)
return count_df
def _preprocess(self, raw_documents):
preprocess = self.build_preprocessor()
return preprocess(raw_documents)
def fit(self, raw_documents):
"""
Build a vocabulary of all tokens in the raw documents.
Parameters
----------
raw_documents : cudf.Series
A Series of string documents
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents):
"""
Build the vocabulary and return document-term matrix.
Equivalent to ``self.fit(X).transform(X)`` but preprocess `X` only
once.
Parameters
----------
raw_documents : cudf.Series
A Series of string documents
Returns
-------
X : cupy csr array of shape (n_samples, n_features)
Document-term matrix.
"""
self._warn_for_unused_params()
self._validate_params()
self._fixed_vocabulary = self.vocabulary is not None
docs = self._preprocess(raw_documents)
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
if self._fixed_vocabulary:
self.vocabulary_ = self.vocabulary
else:
self.vocabulary_ = tokenized_df["token"].unique()
count_df = self._count_vocab(tokenized_df)
if not self._fixed_vocabulary:
max_doc_count = (self.max_df
if isinstance(self.max_df, numbers.Integral)
else self.max_df * n_doc)
min_doc_count = (self.min_df
if isinstance(self.min_df, numbers.Integral)
else self.min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
count_df = self._limit_features(count_df, self.vocabulary_,
max_doc_count,
min_doc_count,
self.max_features)
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(count_df, empty_doc_ids,
n_doc, len(self.vocabulary_),
dtype=self.dtype)
if self.binary:
X.data.fill(1)
return X
def transform(self, raw_documents):
"""
Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : cudf.Series
A Series of string documents
Returns
-------
X : cupy csr array of shape (n_samples, n_features)
Document-term matrix.
"""
if not hasattr(self, "vocabulary_"):
if self.vocabulary is not None:
self.vocabulary_ = self.vocabulary
else:
raise NotFittedError()
docs = self._preprocess(raw_documents)
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
count_df = self._count_vocab(tokenized_df)
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(
count_df, empty_doc_ids, n_doc, len(self.vocabulary_),
dtype=self.dtype
)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""
Return terms per document with nonzero entries in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Document-term matrix.
Returns
-------
X_inv : list of cudf.Series of shape (n_samples,)
List of Series of terms.
"""
vocab = Series(self.vocabulary_)
return [vocab[X[i, :].indices] for i in range(X.shape[0])]
def get_feature_names(self):
"""
Array mapping from feature integer indices to feature name.
Returns
-------
feature_names : Series
A list of feature names.
"""
return self.vocabulary_
class HashingVectorizer(_VectorizerMixin):
"""
Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a cupyx.scipy.sparse matrix
holding token occurrence counts (or binary occurrence information),
possibly normalized as token frequencies if norm='l1' or projected on the
euclidean unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory which is even more important
as GPU's that are often memory constrained
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as
there is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices
to string feature names) which can be a problem when trying to
introspect which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
stop_words : string {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
analyzer : string, {'word', 'char', 'char_wb'}
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
n_features : int, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
binary : bool, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
norm : {'l1', 'l2'}, default='l2'
Norm used to normalize term vectors. None for no normalization.
alternate_sign : bool, default=True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
delimiter : str, whitespace by default
String used as a replacement for stop words if `stop_words` is not
None. Typically the delimiting character between words is a good
choice.
Examples
--------
.. code-block:: python
from cuml.feature_extraction.text import HashingVectorizer
corpus = [
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]
vectorizer = HashingVectorizer(n_features=2**4)
X = vectorizer.fit_transform(corpus)
print(X.shape)
Output:
.. code-block:: python
(4, 16)
See Also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(
self,
input=None,
encoding=None,
decode_error=None,
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=None,
ngram_range=(1, 1),
analyzer="word",
n_features=(2 ** 20),
binary=False,
norm="l2",
alternate_sign=True,
dtype=cp.float32,
delimiter=" ",
):
self.preprocessor = preprocessor
self.analyzer = analyzer
self.lowercase = lowercase
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.dtype = dtype
self.delimiter = delimiter
if dtype not in CUPY_SPARSE_DTYPES:
msg = f"Expected dtype in {CUPY_SPARSE_DTYPES}, got {dtype}"
raise ValueError(msg)
if self.norm not in ("l1", "l2", None):
raise ValueError(f"{self.norm} is not a supported norm")
sklearn_params = {
"input": input,
"encoding": encoding,
"decode_error": decode_error,
"strip_accents": strip_accents,
"tokenizer": tokenizer,
"token_pattern": token_pattern,
}
self._check_sklearn_params(analyzer, sklearn_params)
def partial_fit(self, X, y=None):
"""
Does nothing: This transformer is stateless
This method is just there to mark the fact that this transformer
can work in a streaming setup.
Parameters
----------
X : cudf.Series(A Series of string documents).
"""
return self
def fit(self, X, y=None):
"""
This method only checks the input type and the model parameter.
It does not do anything meaningful as this transformer is stateless
Parameters
----------
X : cudf.Series
A Series of string documents
"""
if not (
isinstance(X, cudf.Series)
and isinstance(X._column, cudf.core.column.StringColumn)
):
raise ValueError(f"cudf.Series([str]) expected ,got {type(X)}")
self._warn_for_unused_params()
self._validate_params()
return self
def _preprocess(self, raw_documents):
preprocess = self.build_preprocessor()
return preprocess(raw_documents)
def _count_hash(self, tokenized_df):
"""
Count occurrences of tokens in each document.
"""
# Transform string tokens into token indexes from 0 to n_features
tokenized_df["token"] = tokenized_df["token"].hash_values()
if self.alternate_sign:
# below logic is equivalent to: value *= ((h >= 0) * 2) - 1
tokenized_df["value"] = ((tokenized_df["token"] >= 0) * 2) - 1
tokenized_df["token"] = tokenized_df["token"].abs() %\
self.n_features
count_ser = tokenized_df.groupby(["doc_id", "token"],
sort=True).value.sum()
count_ser.name = "count"
else:
tokenized_df["token"] = tokenized_df["token"].abs() %\
self.n_features
count_ser = tokenized_df.groupby(["doc_id", "token"],
sort=True).size()
count_ser.name = "count"
count_df = count_ser.reset_index(drop=False)
del count_ser, tokenized_df
return count_df
def fit_transform(self, X, y=None):
"""
Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
X : sparse CuPy CSR matrix of shape (n_samples, n_features)
Document-term matrix.
"""
return self.fit(X, y).transform(X)
def transform(self, raw_documents):
"""
Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : cudf.Series
A Series of string documents
Returns
-------
X : sparse CuPy CSR matrix of shape (n_samples, n_features)
Document-term matrix.
"""
docs = self._preprocess(raw_documents)
del raw_documents
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
del docs
count_df = self._count_hash(tokenized_df)
del tokenized_df
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(
count_df, empty_doc_ids, n_doc, self.n_features,
dtype=self.dtype
)
if self.binary:
X.data.fill(1)
if self.norm:
if self.norm == "l1":
csr_row_normalize_l1(X, inplace=True)
elif self.norm == "l2":
csr_row_normalize_l2(X, inplace=True)
return X
| 37.051668
| 79
| 0.604863
|
from cudf import Series
from cuml.common.exceptions import NotFittedError
from cuml.feature_extraction._stop_words import ENGLISH_STOP_WORDS
from cuml.common.sparsefuncs import csr_row_normalize_l1, csr_row_normalize_l2
from cuml.common.sparsefuncs import create_csr_matrix_from_count_df
from functools import partial
import cupy as cp
import numbers
import cudf
from cuml.common.type_utils import CUPY_SPARSE_DTYPES
from cudf.utils.dtypes import min_signed_type
import cuml.common.logger as logger
def _preprocess(doc, lower=False, remove_non_alphanumeric=False, delimiter=" ",
keep_underscore_char=True, remove_single_token_len=True):
if lower:
doc = doc.str.lower()
if remove_non_alphanumeric:
if keep_underscore_char:
temp_string = 'cumlSt'
doc = doc.str.replace('_', temp_string, regex=False)
doc = doc.str.filter_alphanum(' ', keep=True)
doc = doc.str.replace(temp_string, '_', regex=False)
else:
doc = doc.str.filter_alphanum(' ', keep=True)
if remove_single_token_len:
doc = doc.str.filter_tokens(2)
return doc
class _VectorizerMixin:
def _remove_stop_words(self, doc):
if self.analyzer == 'word' and self.stop_words is not None:
stop_words = Series(self._get_stop_words())
doc = doc.str.replace_tokens(stop_words,
replacements=self.delimiter,
delimiter=self.delimiter)
return doc
def build_preprocessor(self):
if self.preprocessor is not None:
preprocess = self.preprocessor
else:
remove_non_alpha = self.analyzer == 'word'
preprocess = partial(_preprocess, lower=self.lowercase,
remove_non_alphanumeric=remove_non_alpha,
delimiter=self.delimiter)
return lambda doc: self._remove_stop_words(preprocess(doc))
def _get_stop_words(self):
if self.stop_words == "english":
return list(ENGLISH_STOP_WORDS)
elif isinstance(self.stop_words, str):
raise ValueError("not a built-in stop list: %s" % self.stop_words)
elif self.stop_words is None:
return None
else: return list(self.stop_words)
def get_char_ngrams(self, ngram_size, str_series, doc_id_sr):
if self.analyzer == 'char_wb' and ngram_size != 1:
token_count = str_series.str.token_count(self.delimiter)
tokens = str_series.str.tokenize(self.delimiter)
del str_series
padding = Series(self.delimiter).repeat(len(tokens))
tokens = tokens.str.cat(padding)
padding = padding.reset_index(drop=True)
tokens = padding.str.cat(tokens)
tokens = tokens.reset_index(drop=True)
ngram_sr = tokens.str.character_ngrams(n=ngram_size)
doc_id_df = cudf.DataFrame({
'doc_id': doc_id_sr.repeat(token_count).reset_index(drop=True),
# formula to count ngrams given number of letters per token:
'ngram_count': tokens.str.len() - (ngram_size - 1)
})
del tokens
ngram_count = doc_id_df.groupby('doc_id',
sort=True).sum()['ngram_count']
return ngram_sr, ngram_count, token_count
if ngram_size == 1:
token_count = str_series.str.len()
ngram_sr = str_series.str.character_tokenize()
del str_series
elif self.analyzer == 'char':
token_count = str_series.str.len()
ngram_sr = str_series.str.character_ngrams(n=ngram_size)
del str_series
ngram_count = token_count - (ngram_size - 1)
return ngram_sr, ngram_count, token_count
def get_ngrams(self, str_series, ngram_size, doc_id_sr):
if self.analyzer == 'word':
token_count_sr = str_series.str.token_count(self.delimiter)
ngram_sr = str_series.str.ngrams_tokenize(n=ngram_size,
separator=" ",
delimiter=self.delimiter)
# formula to count ngrams given number of tokens x per doc: x-(n-1)
ngram_count = token_count_sr - (ngram_size - 1)
else:
ngram_sr, ngram_count, token_count_sr = self.get_char_ngrams(
ngram_size, str_series, doc_id_sr
)
not_empty_docs = token_count_sr > 0
doc_id_sr = doc_id_sr[not_empty_docs]
ngram_count = ngram_count[not_empty_docs]
doc_id_sr = doc_id_sr.repeat(ngram_count).reset_index(drop=True)
tokenized_df = cudf.DataFrame()
tokenized_df["doc_id"] = doc_id_sr
tokenized_df["token"] = ngram_sr
return tokenized_df
def _create_tokenized_df(self, docs):
min_n, max_n = self.ngram_range
doc_id = cp.arange(start=0, stop=len(docs), dtype=cp.int32)
doc_id = Series(doc_id)
tokenized_df_ls = [
self.get_ngrams(docs, n, doc_id)
for n in range(min_n, max_n + 1)
]
del docs
tokenized_df = cudf.concat(tokenized_df_ls)
tokenized_df = tokenized_df.reset_index(drop=True)
return tokenized_df
def _compute_empty_doc_ids(self, count_df, n_doc):
remaining_docs = count_df['doc_id'].unique()
dtype = min_signed_type(n_doc)
doc_ids = cudf.DataFrame(data={'all_ids': cp.arange(0, n_doc,
dtype=dtype)},
dtype=dtype)
empty_docs = doc_ids - doc_ids.iloc[remaining_docs]
empty_ids = empty_docs[empty_docs['all_ids'].isnull()].index.values
return empty_ids
def _validate_params(self):
min_n, max_m = self.ngram_range
msg = ""
if min_n < 1:
msg += "lower boundary must be >= 1. "
if min_n > max_m:
msg += "lower boundary larger than the upper boundary. "
if msg != "":
msg = f"Invalid value for ngram_range={self.ngram_range} {msg}"
raise ValueError(msg)
if hasattr(self, "n_features"):
if not isinstance(self.n_features, numbers.Integral):
raise TypeError(
f"n_features must be integral, got {self.n_features}\
({type(self.n_features)})."
)
def _warn_for_unused_params(self):
if self.analyzer != "word" and self.stop_words is not None:
logger.warn(
"The parameter 'stop_words' will not be used"
" since 'analyzer' != 'word'"
)
def _check_sklearn_params(self, analyzer, sklearn_params):
if callable(analyzer):
raise ValueError(
"cuML does not support callable analyzer,"
" please refer to the cuML documentation for"
" more information."
)
for key, vals in sklearn_params.items():
if vals is not None:
raise TypeError(
"The Scikit-learn variable",
key,
" is not supported in cuML,"
" please read the cuML documentation for"
" more information.",
)
def _document_frequency(X):
doc_freq = (
X[["token", "doc_id"]]
.groupby(["token"], sort=True)
.count()
)
return doc_freq["doc_id"].values
def _term_frequency(X):
term_freq = (
X[["token", "count"]]
.groupby(["token"], sort=True)
.sum()
)
return term_freq["count"].values
class CountVectorizer(_VectorizerMixin):
def __init__(self, input=None, encoding=None, decode_error=None,
strip_accents=None, lowercase=True, preprocessor=None,
tokenizer=None, stop_words=None, token_pattern=None,
ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=cp.float32, delimiter=' '):
self.preprocessor = preprocessor
self.analyzer = analyzer
self.lowercase = lowercase
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if not isinstance(max_features, int) or max_features <= 0:
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
self.delimiter = delimiter
if dtype not in CUPY_SPARSE_DTYPES:
msg = f"Expected dtype in {CUPY_SPARSE_DTYPES}, got {dtype}"
raise ValueError(msg)
sklearn_params = {"input": input,
"encoding": encoding,
"decode_error": decode_error,
"strip_accents": strip_accents,
"tokenizer": tokenizer,
"token_pattern": token_pattern}
self._check_sklearn_params(analyzer, sklearn_params)
def _count_vocab(self, tokenized_df):
# Transform string tokens into token indexes from 0 to len(vocab)
# The indexes are based on lexicographical ordering.
tokenized_df['token'] = tokenized_df['token'].astype('category')
tokenized_df['token'] = tokenized_df['token'].cat.set_categories(
self.vocabulary_
)._column.codes
# Count of each token in each document
count_df = (
tokenized_df[["doc_id", "token"]]
.groupby(["doc_id", "token"], sort=True)
.size()
.reset_index()
.rename({0: "count"}, axis=1)
)
return count_df
def _filter_and_renumber(self, df, keep_values, column):
df[column] = (
df[column].astype('category')
.cat.set_categories(keep_values)
._column.codes
)
df = df.dropna(subset=column)
return df
def _limit_features(self, count_df, vocab, high, low, limit):
if high is None and low is None and limit is None:
self.stop_words_ = None
return count_df
document_frequency = _document_frequency(count_df)
mask = cp.ones(len(document_frequency), dtype=bool)
if high is not None:
mask &= document_frequency <= high
if low is not None:
mask &= document_frequency >= low
if limit is not None and mask.sum() > limit:
term_frequency = _term_frequency(count_df)
mask_inds = (-term_frequency[mask]).argsort()[:limit]
new_mask = cp.zeros(len(document_frequency), dtype=bool)
new_mask[cp.where(mask)[0][mask_inds]] = True
mask = new_mask
keep_idx = cp.where(mask)[0].astype(cp.int32)
keep_num = keep_idx.shape[0]
if keep_num == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
if len(vocab) - keep_num != 0:
count_df = self._filter_and_renumber(count_df, keep_idx, 'token')
self.stop_words_ = vocab[~mask].reset_index(drop=True)
self.vocabulary_ = vocab[mask].reset_index(drop=True)
return count_df
def _preprocess(self, raw_documents):
preprocess = self.build_preprocessor()
return preprocess(raw_documents)
def fit(self, raw_documents):
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents):
self._warn_for_unused_params()
self._validate_params()
self._fixed_vocabulary = self.vocabulary is not None
docs = self._preprocess(raw_documents)
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
if self._fixed_vocabulary:
self.vocabulary_ = self.vocabulary
else:
self.vocabulary_ = tokenized_df["token"].unique()
count_df = self._count_vocab(tokenized_df)
if not self._fixed_vocabulary:
max_doc_count = (self.max_df
if isinstance(self.max_df, numbers.Integral)
else self.max_df * n_doc)
min_doc_count = (self.min_df
if isinstance(self.min_df, numbers.Integral)
else self.min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
count_df = self._limit_features(count_df, self.vocabulary_,
max_doc_count,
min_doc_count,
self.max_features)
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(count_df, empty_doc_ids,
n_doc, len(self.vocabulary_),
dtype=self.dtype)
if self.binary:
X.data.fill(1)
return X
def transform(self, raw_documents):
if not hasattr(self, "vocabulary_"):
if self.vocabulary is not None:
self.vocabulary_ = self.vocabulary
else:
raise NotFittedError()
docs = self._preprocess(raw_documents)
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
count_df = self._count_vocab(tokenized_df)
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(
count_df, empty_doc_ids, n_doc, len(self.vocabulary_),
dtype=self.dtype
)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
vocab = Series(self.vocabulary_)
return [vocab[X[i, :].indices] for i in range(X.shape[0])]
def get_feature_names(self):
return self.vocabulary_
class HashingVectorizer(_VectorizerMixin):
def __init__(
self,
input=None,
encoding=None,
decode_error=None,
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=None,
ngram_range=(1, 1),
analyzer="word",
n_features=(2 ** 20),
binary=False,
norm="l2",
alternate_sign=True,
dtype=cp.float32,
delimiter=" ",
):
self.preprocessor = preprocessor
self.analyzer = analyzer
self.lowercase = lowercase
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.dtype = dtype
self.delimiter = delimiter
if dtype not in CUPY_SPARSE_DTYPES:
msg = f"Expected dtype in {CUPY_SPARSE_DTYPES}, got {dtype}"
raise ValueError(msg)
if self.norm not in ("l1", "l2", None):
raise ValueError(f"{self.norm} is not a supported norm")
sklearn_params = {
"input": input,
"encoding": encoding,
"decode_error": decode_error,
"strip_accents": strip_accents,
"tokenizer": tokenizer,
"token_pattern": token_pattern,
}
self._check_sklearn_params(analyzer, sklearn_params)
def partial_fit(self, X, y=None):
return self
def fit(self, X, y=None):
if not (
isinstance(X, cudf.Series)
and isinstance(X._column, cudf.core.column.StringColumn)
):
raise ValueError(f"cudf.Series([str]) expected ,got {type(X)}")
self._warn_for_unused_params()
self._validate_params()
return self
def _preprocess(self, raw_documents):
preprocess = self.build_preprocessor()
return preprocess(raw_documents)
def _count_hash(self, tokenized_df):
# Transform string tokens into token indexes from 0 to n_features
tokenized_df["token"] = tokenized_df["token"].hash_values()
if self.alternate_sign:
# below logic is equivalent to: value *= ((h >= 0) * 2) - 1
tokenized_df["value"] = ((tokenized_df["token"] >= 0) * 2) - 1
tokenized_df["token"] = tokenized_df["token"].abs() %\
self.n_features
count_ser = tokenized_df.groupby(["doc_id", "token"],
sort=True).value.sum()
count_ser.name = "count"
else:
tokenized_df["token"] = tokenized_df["token"].abs() %\
self.n_features
count_ser = tokenized_df.groupby(["doc_id", "token"],
sort=True).size()
count_ser.name = "count"
count_df = count_ser.reset_index(drop=False)
del count_ser, tokenized_df
return count_df
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def transform(self, raw_documents):
docs = self._preprocess(raw_documents)
del raw_documents
n_doc = len(docs)
tokenized_df = self._create_tokenized_df(docs)
del docs
count_df = self._count_hash(tokenized_df)
del tokenized_df
empty_doc_ids = self._compute_empty_doc_ids(count_df, n_doc)
X = create_csr_matrix_from_count_df(
count_df, empty_doc_ids, n_doc, self.n_features,
dtype=self.dtype
)
if self.binary:
X.data.fill(1)
if self.norm:
if self.norm == "l1":
csr_row_normalize_l1(X, inplace=True)
elif self.norm == "l2":
csr_row_normalize_l2(X, inplace=True)
return X
| true
| true
|
f709e7d57692258edd0459ea50391cace46e9b24
| 2,825
|
py
|
Python
|
chillow/service/ai/search_tree_pathfinding_ai.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 3
|
2021-01-17T23:32:07.000Z
|
2022-01-30T14:49:16.000Z
|
chillow/service/ai/search_tree_pathfinding_ai.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 2
|
2021-01-17T13:37:56.000Z
|
2021-04-14T12:28:49.000Z
|
chillow/service/ai/search_tree_pathfinding_ai.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 2
|
2021-04-02T14:53:38.000Z
|
2021-04-20T11:10:17.000Z
|
from multiprocessing import Value
from random import choice
from chillow.service.ai.pathfinding_ai import PathfindingAI
from chillow.service.ai.search_tree_ai import SearchTreeAI
from chillow.model.action import Action
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.service.game_service import GameService
class SearchTreePathfindingAI(PathfindingAI, SearchTreeAI):
"""This AI combines the SearchTreeAI and the PathfindingAI by favoring the former.
Therefore it finds all actions that let the player survive the next rounds by using the SearchTreeAI and
afterwards lets the PathfindingAI check which of these is the best action to perform.
Attributes:
player: The player associated with this AI.
"""
def __init__(self, player: Player, max_speed: int, count_paths_to_check: int, depth: int,
distance_to_check: int = 0):
"""Creates a new object of the SearchTreePathfindingAI.
Args:
player: The player assigned to the AI.
max_speed: The maximum speed the AI can reach.
count_paths_to_check: The number of paths used to avoid dead ends.
depth: Number of pre-calculating actions.
distance_to_check:
Distance an enemy player is allowed to be at maximum distance, so that he is taken into
account in the calculations.
"""
PathfindingAI.__init__(self, player, max_speed, count_paths_to_check)
SearchTreeAI.__init__(self, player, depth, max_speed, distance_to_check=distance_to_check)
def get_information(self) -> str:
"""See base class."""
return "max_speed=" + str(self._max_speed) \
+ ", count_paths_to_check=" + str(self._get_count_paths_to_check()) \
+ ", depth=" + str(self._get_depth()) \
+ ", distance_to_check=" + str(self._get_distance_to_check())
def create_next_action(self, game: Game, return_value: Value):
"""See base class."""
self._turn_ctr += 1
surviving_actions = self.create_all_next_surviving_actions(game)
if surviving_actions is not None and len(surviving_actions) > 0:
return_value.value = choice(surviving_actions).get_index()
return_value.value = self.find_actions_by_best_path_connection(surviving_actions, game)[0][0].get_index()
else:
surviving_pathfinding_actions = self.find_actions_by_best_path_connection(
self.find_surviving_actions(GameService(game), 1), game)
return_value.value = surviving_pathfinding_actions[0][0].get_index() \
if surviving_pathfinding_actions is not None and len(surviving_pathfinding_actions) > 0 \
else Action.get_default().get_index()
| 47.881356
| 117
| 0.695575
|
from multiprocessing import Value
from random import choice
from chillow.service.ai.pathfinding_ai import PathfindingAI
from chillow.service.ai.search_tree_ai import SearchTreeAI
from chillow.model.action import Action
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.service.game_service import GameService
class SearchTreePathfindingAI(PathfindingAI, SearchTreeAI):
def __init__(self, player: Player, max_speed: int, count_paths_to_check: int, depth: int,
distance_to_check: int = 0):
PathfindingAI.__init__(self, player, max_speed, count_paths_to_check)
SearchTreeAI.__init__(self, player, depth, max_speed, distance_to_check=distance_to_check)
def get_information(self) -> str:
return "max_speed=" + str(self._max_speed) \
+ ", count_paths_to_check=" + str(self._get_count_paths_to_check()) \
+ ", depth=" + str(self._get_depth()) \
+ ", distance_to_check=" + str(self._get_distance_to_check())
def create_next_action(self, game: Game, return_value: Value):
self._turn_ctr += 1
surviving_actions = self.create_all_next_surviving_actions(game)
if surviving_actions is not None and len(surviving_actions) > 0:
return_value.value = choice(surviving_actions).get_index()
return_value.value = self.find_actions_by_best_path_connection(surviving_actions, game)[0][0].get_index()
else:
surviving_pathfinding_actions = self.find_actions_by_best_path_connection(
self.find_surviving_actions(GameService(game), 1), game)
return_value.value = surviving_pathfinding_actions[0][0].get_index() \
if surviving_pathfinding_actions is not None and len(surviving_pathfinding_actions) > 0 \
else Action.get_default().get_index()
| true
| true
|
f709e817f4713e92e5114cefb9a40115888b3a18
| 16,977
|
py
|
Python
|
kivy/tests/common.py
|
Galland/kivy
|
95a6bf279883d706f645e4629c16d5ee1038f0ec
|
[
"MIT"
] | 1
|
2019-10-04T00:27:02.000Z
|
2019-10-04T00:27:02.000Z
|
kivy/tests/common.py
|
Galland/kivy
|
95a6bf279883d706f645e4629c16d5ee1038f0ec
|
[
"MIT"
] | 1
|
2019-10-08T02:06:27.000Z
|
2019-10-08T02:06:27.000Z
|
kivy/tests/common.py
|
Galland/kivy
|
95a6bf279883d706f645e4629c16d5ee1038f0ec
|
[
"MIT"
] | null | null | null |
'''
This is a extended unittest module for Kivy, to make unittests based on
graphics with an OpenGL context.
The idea is to render a Widget tree, and after 1, 2 or more frames, a
screenshot will be made and be compared to the original one.
If no screenshot exists for the current test, the very first one will be used.
The screenshots live in the 'kivy/tests/results' folder and are in PNG format,
320x240 pixels.
'''
__all__ = ('GraphicUnitTest', 'UnitTestTouch', 'UTMotionEvent', 'async_run')
import unittest
import logging
import pytest
import sys
import os
import threading
from kivy.graphics.cgl import cgl_get_backend_name
from kivy.input.motionevent import MotionEvent
log = logging.getLogger('unittest')
_base = object
if 'mock' != cgl_get_backend_name():
# check what the gl backend might be, we can't know for sure
# what it'll be untill actually initialized by the window.
_base = unittest.TestCase
make_screenshots = os.environ.get('KIVY_UNITTEST_SCREENSHOTS')
http_server = None
http_server_ready = threading.Event()
kivy_eventloop = os.environ.get('KIVY_EVENTLOOP', 'asyncio')
def ensure_web_server():
if http_server is not None:
return True
def _start_web_server():
global http_server
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
except ImportError:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
try:
handler = SimpleHTTPRequestHandler
handler.directory = os.path.join(
os.path.dirname(__file__), "..", "..")
http_server = TCPServer(
("", 8000), handler, bind_and_activate=False)
http_server.daemon_threads = True
http_server.allow_reuse_address = True
http_server.server_bind()
http_server.server_activate()
http_server_ready.set()
http_server.serve_forever()
except:
import traceback
traceback.print_exc()
finally:
http_server = None
http_server_ready.set()
th = threading.Thread(target=_start_web_server)
th.daemon = True
th.start()
http_server_ready.wait()
if http_server is None:
raise Exception("Unable to start webserver")
class GraphicUnitTest(_base):
framecount = 0
def _force_refresh(self, *largs):
# this prevent in some case to be stuck if the screen doesn't refresh
# and we wait for a number of self.framecount that never goes down
from kivy.base import EventLoop
win = EventLoop.window
if win and win.canvas:
win.canvas.ask_update()
def render(self, root, framecount=1):
'''Call rendering process using the `root` widget.
The screenshot will be done in `framecount` frames.
'''
from kivy.base import runTouchApp
from kivy.clock import Clock
self.framecount = framecount
try:
Clock.schedule_interval(self._force_refresh, 1)
runTouchApp(root)
finally:
Clock.unschedule(self._force_refresh)
# reset for the next test, but nobody will know if it will be used :/
if self.test_counter != 0:
self.tearDown(fake=True)
self.setUp()
def run(self, *args, **kwargs):
'''Extend the run of unittest, to check if results directory have been
found. If no results directory exists, the test will be ignored.
'''
from os.path import join, dirname, exists
results_dir = join(dirname(__file__), 'results')
if make_screenshots and not exists(results_dir):
log.warning('No result directory found, cancel test.')
os.mkdir(results_dir)
self.test_counter = 0
self.results_dir = results_dir
self.test_failed = False
return super(GraphicUnitTest, self).run(*args, **kwargs)
def setUp(self):
'''Prepare the graphic test, with:
- Window size fixed to 320x240
- Default kivy configuration
- Without any kivy input
'''
# use default kivy configuration (don't load user file.)
from os import environ
environ['KIVY_USE_DEFAULTCONFIG'] = '1'
# force window size + remove all inputs
from kivy.config import Config
Config.set('graphics', 'width', '320')
Config.set('graphics', 'height', '240')
for items in Config.items('input'):
Config.remove_option('input', items[0])
# bind ourself for the later screenshot
from kivy.core.window import Window
self.Window = Window
Window.bind(on_flip=self.on_window_flip)
# ensure our window is correctly created
Window.create_window()
Window.register()
Window.initialized = True
Window.canvas.clear()
Window.close = lambda *s: True
def on_window_flip(self, window):
'''Internal method to be called when the window have just displayed an
image.
When an image is showed, we decrement our framecount. If framecount is
come to 0, we are taking the screenshot.
The screenshot is done in a temporary place, and is compared to the
original one -> test ok/ko.
If no screenshot is available in the results directory, a new one will
be created.
'''
from kivy.base import EventLoop
from tempfile import mkstemp
from os.path import join, exists
from os import unlink, close
from shutil import move, copy
# don't save screenshot until we have enough frames.
# log.debug('framecount %d' % self.framecount)
# ! check if there is 'framecount', otherwise just
# ! assume zero e.g. if handling runTouchApp manually
self.framecount = getattr(self, 'framecount', 0) - 1
if self.framecount > 0:
return
# don't create screenshots if not requested manually
if not make_screenshots:
EventLoop.stop()
return
reffn = None
match = False
try:
# just get a temporary name
fd, tmpfn = mkstemp(suffix='.png', prefix='kivyunit-')
close(fd)
unlink(tmpfn)
# get a filename for the current unit test
self.test_counter += 1
test_uid = '%s-%d.png' % (
'_'.join(self.id().split('.')[-2:]),
self.test_counter)
# capture the screen
log.info('Capturing screenshot for %s' % test_uid)
tmpfn = window.screenshot(tmpfn)
log.info('Capture saved at %s' % tmpfn)
# search the file to compare to
reffn = join(self.results_dir, test_uid)
log.info('Compare with %s' % reffn)
# get sourcecode
import inspect
frame = inspect.getouterframes(inspect.currentframe())[6]
sourcecodetab, line = inspect.getsourcelines(frame[0])
line = frame[2] - line
currentline = sourcecodetab[line]
sourcecodetab[line] = '<span style="color: red;">%s</span>' % (
currentline)
sourcecode = ''.join(sourcecodetab)
sourcecodetab[line] = '>>>>>>>>\n%s<<<<<<<<\n' % currentline
sourcecodeask = ''.join(sourcecodetab)
if not exists(reffn):
log.info('No image reference, move %s as ref ?' % test_uid)
if self.interactive_ask_ref(sourcecodeask, tmpfn, self.id()):
move(tmpfn, reffn)
tmpfn = reffn
log.info('Image used as reference')
match = True
else:
log.info('Image discarded')
else:
from kivy.core.image import Image as CoreImage
s1 = CoreImage(tmpfn, keep_data=True)
sd1 = s1.image._data[0].data
s2 = CoreImage(reffn, keep_data=True)
sd2 = s2.image._data[0].data
if sd1 != sd2:
log.critical(
'%s at render() #%d, images are different.' % (
self.id(), self.test_counter))
if self.interactive_ask_diff(sourcecodeask,
tmpfn, reffn, self.id()):
log.critical('user ask to use it as ref.')
move(tmpfn, reffn)
tmpfn = reffn
match = True
else:
self.test_failed = True
else:
match = True
# generate html
from os.path import join, dirname, exists, basename
from os import mkdir
build_dir = join(dirname(__file__), 'build')
if not exists(build_dir):
mkdir(build_dir)
copy(reffn, join(build_dir, 'ref_%s' % basename(reffn)))
if tmpfn != reffn:
copy(tmpfn, join(build_dir, 'test_%s' % basename(reffn)))
with open(join(build_dir, 'index.html'), 'at') as fd:
color = '#ffdddd' if not match else '#ffffff'
fd.write('<div style="background-color: %s">' % color)
fd.write('<h2>%s #%d</h2>' % (self.id(), self.test_counter))
fd.write('<table><tr><th>Reference</th>'
'<th>Test</th>'
'<th>Comment</th>')
fd.write('<tr><td><img src="ref_%s"/></td>' %
basename(reffn))
if tmpfn != reffn:
fd.write('<td><img src="test_%s"/></td>' %
basename(reffn))
else:
fd.write('<td>First time, no comparison.</td>')
fd.write('<td><pre>%s</pre></td>' % sourcecode)
fd.write('</table></div>')
finally:
try:
if reffn != tmpfn:
unlink(tmpfn)
except:
pass
EventLoop.stop()
def tearDown(self, fake=False):
'''When the test is finished, stop the application, and unbind our
current flip callback.
'''
from kivy.base import stopTouchApp
from kivy.core.window import Window
Window.unbind(on_flip=self.on_window_flip)
stopTouchApp()
if not fake and self.test_failed:
self.assertTrue(False)
super(GraphicUnitTest, self).tearDown()
def interactive_ask_ref(self, code, imagefn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return True
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
image = Image.open(imagefn)
photo = ImageTk.PhotoImage(image)
Label(root, text='The test %s\nhave no reference.' % testid).pack()
Label(root, text='Use this image as a reference ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=photo).pack(side=LEFT)
Button(root, text='Use as reference', command=do_yes).pack(side=BOTTOM)
Button(root, text='Discard', command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def interactive_ask_diff(self, code, tmpfn, reffn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return False
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
phototmp = ImageTk.PhotoImage(Image.open(tmpfn))
photoref = ImageTk.PhotoImage(Image.open(reffn))
Label(root, text='The test %s\nhave generated an different'
'image as the reference one..' % testid).pack()
Label(root, text='Which one is good ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=phototmp).pack(side=RIGHT)
Label(root, image=photoref).pack(side=LEFT)
Button(root, text='Use the new image -->',
command=do_yes).pack(side=BOTTOM)
Button(root, text='<-- Use the reference',
command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def advance_frames(self, count):
'''Render the new frames and:
* tick the Clock
* dispatch input from all registered providers
* flush all the canvas operations
* redraw Window canvas if necessary
'''
from kivy.base import EventLoop
for i in range(count):
EventLoop.idle()
class UnitTestTouch(MotionEvent):
'''Custom MotionEvent representing a single touch. Similar to `on_touch_*`
methods from the Widget class, this one introduces:
* touch_down
* touch_move
* touch_up
Create a new touch with::
touch = UnitTestTouch(x, y)
then you press it on the default position with::
touch.touch_down()
or move it or even release with these simple calls::
touch.touch_move(new_x, new_y)
touch.touch_up()
'''
def __init__(self, x, y):
'''Create a MotionEvent instance with X and Y of the first
position a touch is at.
'''
from kivy.base import EventLoop
self.eventloop = EventLoop
win = EventLoop.window
super(UnitTestTouch, self).__init__(
# device, (tuio) id, args
self.__class__.__name__, 99, {
"x": x / float(win.width),
"y": y / float(win.height),
}
)
def touch_down(self, *args):
self.eventloop.post_dispatch_input("begin", self)
def touch_move(self, x, y):
win = self.eventloop.window
self.move({
"x": x / float(win.width),
"y": y / float(win.height)
})
self.eventloop.post_dispatch_input("update", self)
def touch_up(self, *args):
self.eventloop.post_dispatch_input("end", self)
def depack(self, args):
# set MotionEvent to touch
self.is_touch = True
# set sx/sy properties to ratio (e.g. X / win.width)
self.sx = args['x']
self.sy = args['y']
# set profile to accept x, y and pos properties
self.profile = ['pos']
# run depack after we set the values
super(UnitTestTouch, self).depack(args)
class UTMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
super(UTMotionEvent, self).depack(args)
def async_run(func=None, app_cls_func=None):
def inner_func(func):
if 'mock' == cgl_get_backend_name():
return pytest.mark.skip(
reason='Skipping because gl backend is set to mock')(func)
if sys.version_info[0] < 3 or sys.version_info[1] <= 5:
return pytest.mark.skip(
reason='Skipping because graphics tests are not supported on '
'py3.5, only on py3.6+')(func)
if app_cls_func is not None:
func = pytest.mark.parametrize(
"kivy_app", [[app_cls_func], ], indirect=True)(func)
if kivy_eventloop == 'asyncio':
try:
import pytest_asyncio
return pytest.mark.asyncio(func)
except ImportError:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP == "asyncio" but '
'"pytest-asyncio" is not installed')(func)
elif kivy_eventloop == 'trio':
try:
import trio
from pytest_trio import trio_fixture
func._force_trio_fixture = True
return func
except ImportError:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP == "trio" but '
'"pytest-trio" is not installed')(func)
else:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP must be set to either of "asyncio" or '
'"trio" to run async tests')(func)
if func is None:
return inner_func
return inner_func(func)
| 34.646939
| 79
| 0.569594
|
__all__ = ('GraphicUnitTest', 'UnitTestTouch', 'UTMotionEvent', 'async_run')
import unittest
import logging
import pytest
import sys
import os
import threading
from kivy.graphics.cgl import cgl_get_backend_name
from kivy.input.motionevent import MotionEvent
log = logging.getLogger('unittest')
_base = object
if 'mock' != cgl_get_backend_name():
# what it'll be untill actually initialized by the window.
_base = unittest.TestCase
make_screenshots = os.environ.get('KIVY_UNITTEST_SCREENSHOTS')
http_server = None
http_server_ready = threading.Event()
kivy_eventloop = os.environ.get('KIVY_EVENTLOOP', 'asyncio')
def ensure_web_server():
if http_server is not None:
return True
def _start_web_server():
global http_server
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
except ImportError:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
try:
handler = SimpleHTTPRequestHandler
handler.directory = os.path.join(
os.path.dirname(__file__), "..", "..")
http_server = TCPServer(
("", 8000), handler, bind_and_activate=False)
http_server.daemon_threads = True
http_server.allow_reuse_address = True
http_server.server_bind()
http_server.server_activate()
http_server_ready.set()
http_server.serve_forever()
except:
import traceback
traceback.print_exc()
finally:
http_server = None
http_server_ready.set()
th = threading.Thread(target=_start_web_server)
th.daemon = True
th.start()
http_server_ready.wait()
if http_server is None:
raise Exception("Unable to start webserver")
class GraphicUnitTest(_base):
framecount = 0
def _force_refresh(self, *largs):
# and we wait for a number of self.framecount that never goes down
from kivy.base import EventLoop
win = EventLoop.window
if win and win.canvas:
win.canvas.ask_update()
def render(self, root, framecount=1):
from kivy.base import runTouchApp
from kivy.clock import Clock
self.framecount = framecount
try:
Clock.schedule_interval(self._force_refresh, 1)
runTouchApp(root)
finally:
Clock.unschedule(self._force_refresh)
# reset for the next test, but nobody will know if it will be used :/
if self.test_counter != 0:
self.tearDown(fake=True)
self.setUp()
def run(self, *args, **kwargs):
from os.path import join, dirname, exists
results_dir = join(dirname(__file__), 'results')
if make_screenshots and not exists(results_dir):
log.warning('No result directory found, cancel test.')
os.mkdir(results_dir)
self.test_counter = 0
self.results_dir = results_dir
self.test_failed = False
return super(GraphicUnitTest, self).run(*args, **kwargs)
def setUp(self):
# use default kivy configuration (don't load user file.)
from os import environ
environ['KIVY_USE_DEFAULTCONFIG'] = '1'
from kivy.config import Config
Config.set('graphics', 'width', '320')
Config.set('graphics', 'height', '240')
for items in Config.items('input'):
Config.remove_option('input', items[0])
from kivy.core.window import Window
self.Window = Window
Window.bind(on_flip=self.on_window_flip)
Window.create_window()
Window.register()
Window.initialized = True
Window.canvas.clear()
Window.close = lambda *s: True
def on_window_flip(self, window):
from kivy.base import EventLoop
from tempfile import mkstemp
from os.path import join, exists
from os import unlink, close
from shutil import move, copy
# log.debug('framecount %d' % self.framecount)
# ! check if there is 'framecount', otherwise just
# ! assume zero e.g. if handling runTouchApp manually
self.framecount = getattr(self, 'framecount', 0) - 1
if self.framecount > 0:
return
# don't create screenshots if not requested manually
if not make_screenshots:
EventLoop.stop()
return
reffn = None
match = False
try:
fd, tmpfn = mkstemp(suffix='.png', prefix='kivyunit-')
close(fd)
unlink(tmpfn)
self.test_counter += 1
test_uid = '%s-%d.png' % (
'_'.join(self.id().split('.')[-2:]),
self.test_counter)
log.info('Capturing screenshot for %s' % test_uid)
tmpfn = window.screenshot(tmpfn)
log.info('Capture saved at %s' % tmpfn)
reffn = join(self.results_dir, test_uid)
log.info('Compare with %s' % reffn)
import inspect
frame = inspect.getouterframes(inspect.currentframe())[6]
sourcecodetab, line = inspect.getsourcelines(frame[0])
line = frame[2] - line
currentline = sourcecodetab[line]
sourcecodetab[line] = '<span style="color: red;">%s</span>' % (
currentline)
sourcecode = ''.join(sourcecodetab)
sourcecodetab[line] = '>>>>>>>>\n%s<<<<<<<<\n' % currentline
sourcecodeask = ''.join(sourcecodetab)
if not exists(reffn):
log.info('No image reference, move %s as ref ?' % test_uid)
if self.interactive_ask_ref(sourcecodeask, tmpfn, self.id()):
move(tmpfn, reffn)
tmpfn = reffn
log.info('Image used as reference')
match = True
else:
log.info('Image discarded')
else:
from kivy.core.image import Image as CoreImage
s1 = CoreImage(tmpfn, keep_data=True)
sd1 = s1.image._data[0].data
s2 = CoreImage(reffn, keep_data=True)
sd2 = s2.image._data[0].data
if sd1 != sd2:
log.critical(
'%s at render() #%d, images are different.' % (
self.id(), self.test_counter))
if self.interactive_ask_diff(sourcecodeask,
tmpfn, reffn, self.id()):
log.critical('user ask to use it as ref.')
move(tmpfn, reffn)
tmpfn = reffn
match = True
else:
self.test_failed = True
else:
match = True
from os.path import join, dirname, exists, basename
from os import mkdir
build_dir = join(dirname(__file__), 'build')
if not exists(build_dir):
mkdir(build_dir)
copy(reffn, join(build_dir, 'ref_%s' % basename(reffn)))
if tmpfn != reffn:
copy(tmpfn, join(build_dir, 'test_%s' % basename(reffn)))
with open(join(build_dir, 'index.html'), 'at') as fd:
color = '#ffdddd' if not match else '#ffffff'
fd.write('<div style="background-color: %s">' % color)
fd.write('<h2>%s #%d</h2>' % (self.id(), self.test_counter))
fd.write('<table><tr><th>Reference</th>'
'<th>Test</th>'
'<th>Comment</th>')
fd.write('<tr><td><img src="ref_%s"/></td>' %
basename(reffn))
if tmpfn != reffn:
fd.write('<td><img src="test_%s"/></td>' %
basename(reffn))
else:
fd.write('<td>First time, no comparison.</td>')
fd.write('<td><pre>%s</pre></td>' % sourcecode)
fd.write('</table></div>')
finally:
try:
if reffn != tmpfn:
unlink(tmpfn)
except:
pass
EventLoop.stop()
def tearDown(self, fake=False):
from kivy.base import stopTouchApp
from kivy.core.window import Window
Window.unbind(on_flip=self.on_window_flip)
stopTouchApp()
if not fake and self.test_failed:
self.assertTrue(False)
super(GraphicUnitTest, self).tearDown()
def interactive_ask_ref(self, code, imagefn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return True
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
image = Image.open(imagefn)
photo = ImageTk.PhotoImage(image)
Label(root, text='The test %s\nhave no reference.' % testid).pack()
Label(root, text='Use this image as a reference ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=photo).pack(side=LEFT)
Button(root, text='Use as reference', command=do_yes).pack(side=BOTTOM)
Button(root, text='Discard', command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def interactive_ask_diff(self, code, tmpfn, reffn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return False
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
phototmp = ImageTk.PhotoImage(Image.open(tmpfn))
photoref = ImageTk.PhotoImage(Image.open(reffn))
Label(root, text='The test %s\nhave generated an different'
'image as the reference one..' % testid).pack()
Label(root, text='Which one is good ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=phototmp).pack(side=RIGHT)
Label(root, image=photoref).pack(side=LEFT)
Button(root, text='Use the new image -->',
command=do_yes).pack(side=BOTTOM)
Button(root, text='<-- Use the reference',
command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def advance_frames(self, count):
from kivy.base import EventLoop
for i in range(count):
EventLoop.idle()
class UnitTestTouch(MotionEvent):
def __init__(self, x, y):
from kivy.base import EventLoop
self.eventloop = EventLoop
win = EventLoop.window
super(UnitTestTouch, self).__init__(
self.__class__.__name__, 99, {
"x": x / float(win.width),
"y": y / float(win.height),
}
)
def touch_down(self, *args):
self.eventloop.post_dispatch_input("begin", self)
def touch_move(self, x, y):
win = self.eventloop.window
self.move({
"x": x / float(win.width),
"y": y / float(win.height)
})
self.eventloop.post_dispatch_input("update", self)
def touch_up(self, *args):
self.eventloop.post_dispatch_input("end", self)
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
super(UnitTestTouch, self).depack(args)
class UTMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
super(UTMotionEvent, self).depack(args)
def async_run(func=None, app_cls_func=None):
def inner_func(func):
if 'mock' == cgl_get_backend_name():
return pytest.mark.skip(
reason='Skipping because gl backend is set to mock')(func)
if sys.version_info[0] < 3 or sys.version_info[1] <= 5:
return pytest.mark.skip(
reason='Skipping because graphics tests are not supported on '
'py3.5, only on py3.6+')(func)
if app_cls_func is not None:
func = pytest.mark.parametrize(
"kivy_app", [[app_cls_func], ], indirect=True)(func)
if kivy_eventloop == 'asyncio':
try:
import pytest_asyncio
return pytest.mark.asyncio(func)
except ImportError:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP == "asyncio" but '
'"pytest-asyncio" is not installed')(func)
elif kivy_eventloop == 'trio':
try:
import trio
from pytest_trio import trio_fixture
func._force_trio_fixture = True
return func
except ImportError:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP == "trio" but '
'"pytest-trio" is not installed')(func)
else:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP must be set to either of "asyncio" or '
'"trio" to run async tests')(func)
if func is None:
return inner_func
return inner_func(func)
| true
| true
|
f709e8cded7e066c6f95bc28fa79965b97ba66d5
| 160
|
py
|
Python
|
minecraft_remapper/__init__.py
|
Nearata/minecraft-remapper
|
502a26f3f90c926554976b1f2ad0a82b236c7a96
|
[
"MIT"
] | 1
|
2021-01-24T22:05:51.000Z
|
2021-01-24T22:05:51.000Z
|
minecraft_remapper/__init__.py
|
Nearata/minecraft-remapper
|
502a26f3f90c926554976b1f2ad0a82b236c7a96
|
[
"MIT"
] | 4
|
2021-01-24T22:11:24.000Z
|
2021-08-15T21:16:53.000Z
|
minecraft_remapper/__init__.py
|
Nearata/minecraft-remapper
|
502a26f3f90c926554976b1f2ad0a82b236c7a96
|
[
"MIT"
] | null | null | null |
"""A Minecraft remapper for already deobfuscated forge mod source code."""
__version__ = "1.1.0"
from minecraft_remapper.remapper import Remapper as Remapper
| 26.666667
| 74
| 0.7875
|
__version__ = "1.1.0"
from minecraft_remapper.remapper import Remapper as Remapper
| true
| true
|
f709e9abdda917983a56be8d0236370b06343eef
| 1,997
|
py
|
Python
|
bluebottle/payments_vitepay/migrations/0001_initial.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 10
|
2015-05-28T18:26:40.000Z
|
2021-09-06T10:07:03.000Z
|
bluebottle/payments_vitepay/migrations/0001_initial.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 762
|
2015-01-15T10:00:59.000Z
|
2022-03-31T15:35:14.000Z
|
bluebottle/payments_vitepay/migrations/0001_initial.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 9
|
2015-02-20T13:19:30.000Z
|
2022-03-08T14:09:17.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-10-13 15:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('payments', '0002_auto_20160718_2345'),
]
operations = [
migrations.CreateModel(
name='VitepayPayment',
fields=[
('payment_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='payments.Payment')),
('language_code', models.CharField(default=b'en', max_length=10)),
('currency_code', models.CharField(default=b'XOF', max_length=10)),
('country_code', models.CharField(default=b'ML', max_length=10)),
('order_id', models.CharField(max_length=10, null=True)),
('description', models.CharField(max_length=500, null=True)),
('amount_100', models.IntegerField(null=True)),
('buyer_ip_adress', models.CharField(max_length=200, null=True)),
('return_url', models.CharField(max_length=500, null=True)),
('decline_url', models.CharField(max_length=500, null=True)),
('cancel_url', models.CharField(max_length=500, null=True)),
('callback_url', models.CharField(max_length=500, null=True)),
('email', models.CharField(max_length=500, null=True)),
('p_type', models.CharField(default=b'orange_money', max_length=500)),
('payment_url', models.CharField(max_length=500, null=True)),
],
options={
'ordering': ('-created', '-updated'),
'verbose_name': 'Vitepay Payment',
'verbose_name_plural': 'Vitepay Payments',
},
bases=('payments.payment',),
),
]
| 44.377778
| 194
| 0.597897
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('payments', '0002_auto_20160718_2345'),
]
operations = [
migrations.CreateModel(
name='VitepayPayment',
fields=[
('payment_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='payments.Payment')),
('language_code', models.CharField(default=b'en', max_length=10)),
('currency_code', models.CharField(default=b'XOF', max_length=10)),
('country_code', models.CharField(default=b'ML', max_length=10)),
('order_id', models.CharField(max_length=10, null=True)),
('description', models.CharField(max_length=500, null=True)),
('amount_100', models.IntegerField(null=True)),
('buyer_ip_adress', models.CharField(max_length=200, null=True)),
('return_url', models.CharField(max_length=500, null=True)),
('decline_url', models.CharField(max_length=500, null=True)),
('cancel_url', models.CharField(max_length=500, null=True)),
('callback_url', models.CharField(max_length=500, null=True)),
('email', models.CharField(max_length=500, null=True)),
('p_type', models.CharField(default=b'orange_money', max_length=500)),
('payment_url', models.CharField(max_length=500, null=True)),
],
options={
'ordering': ('-created', '-updated'),
'verbose_name': 'Vitepay Payment',
'verbose_name_plural': 'Vitepay Payments',
},
bases=('payments.payment',),
),
]
| true
| true
|
f709eb1b5af037f2a9c13eb283c75382944620af
| 4,757
|
py
|
Python
|
datamart/joiners/join_feature/feature_factory.py
|
juancroldan/datamart
|
9ec3b99f36192f812edd74ad2262bebccc22bc66
|
[
"MIT"
] | 7
|
2018-10-02T01:32:23.000Z
|
2020-10-08T00:42:35.000Z
|
datamart/joiners/join_feature/feature_factory.py
|
juancroldan/datamart
|
9ec3b99f36192f812edd74ad2262bebccc22bc66
|
[
"MIT"
] | 47
|
2018-10-02T05:41:13.000Z
|
2021-02-02T21:50:31.000Z
|
datamart/joiners/join_feature/feature_factory.py
|
juancroldan/datamart
|
9ec3b99f36192f812edd74ad2262bebccc22bc66
|
[
"MIT"
] | 19
|
2018-10-01T22:27:20.000Z
|
2019-02-28T18:59:53.000Z
|
from datamart.joiners.join_feature.feature_classes import *
from functools import reduce
import numpy as np
class FeatureFactory:
subclasses = {
(DistributeType.CATEGORICAL, DataType.NUMBER): CategoricalNumberFeature,
(DistributeType.CATEGORICAL, DataType.STRING): CategoricalStringFeature,
(DistributeType.TOKEN_CATEGORICAL, DataType.STRING): CategoricalTokenFeature,
(DistributeType.NON_CATEGORICAL, DataType.NUMBER): NonCategoricalNumberFeature,
(DistributeType.NON_CATEGORICAL, DataType.STRING): NonCategoricalStringFeature
}
@classmethod
def create(cls, df: pd.DataFrame, indexes, df_metadata):
"""
TODO: dynamically generate subclass of FeatureBase, by profiled info, datatype etc.
"""
# set default values:
metadata = cls._get_feature_metadata(df_metadata, indexes) or {}
data_type = None
distribute_type = DistributeType.NON_CATEGORICAL
if len(indexes) > 1:
distribute_type = DistributeType.TOKEN_CATEGORICAL
if cls._try_pd_to_datetime(df, indexes):
data_type = DataType.DATETIME
else:
# single column, not datetime
idx = indexes[0]
profiles = metadata.get('dsbox_profiled', {})
if len(df.iloc[:, idx]) // len(df.iloc[:, idx].unique()) >= 1.5:
distribute_type = DistributeType.CATEGORICAL
elif profiles:
most_common_tokens = profiles.get('most_common_tokens')
if most_common_tokens and cls._get_greater_than(most_common_tokens) >= len(most_common_tokens)//2:
distribute_type = DistributeType.TOKEN_CATEGORICAL
dtype = df.iloc[:, idx].dtype
if dtype == np.int64 or dtype == np.float64:
data_type = DataType.NUMBER
else:
semantic_types = metadata.get('semantic_type')
profiles = metadata.get('dsbox_profiled', {})
data_type = cls._get_data_type_by_semantic_type(semantic_types) \
or cls._get_data_type_by_profile(profiles)
if not data_type and cls._try_pd_to_datetime(df, indexes):
data_type = DataType.DATETIME
return cls.get_instance(df, indexes, metadata, data_type or DataType.STRING, distribute_type)
@classmethod
def get_instance(cls, df, indices, metadata, data_type, distribute_type):
constructor = cls.get_constructor(data_type, distribute_type)
return constructor(df, indices, metadata, distribute_type, data_type)
@classmethod
def get_constructor(cls, data_type, distribute_type=None):
if data_type == DataType.DATETIME:
return DatetimeFeature
return cls.subclasses.get((distribute_type, data_type))
@staticmethod
def _get_feature_metadata(metadata, indices):
if metadata.get('variables') and indices and indices[0] < len(metadata.get('variables')):
return metadata['variables'][indices[0]]
@staticmethod
def _get_avg(list_of_dict, key='count'):
if len(list_of_dict):
return sum([_.get(key) for _ in list_of_dict])/len(list_of_dict)
@staticmethod
def _get_greater_than(list_of_dict, key='count', threshold=2, inclusive=True):
if inclusive:
return reduce(lambda x, y: x + 1 if float(y[key]) >= threshold else x, list_of_dict, 0)
return reduce(lambda x, y: x + 1 if float(y[key]) > threshold else x, list_of_dict, 0)
@staticmethod
def _get_data_type_by_semantic_type(semantic_types: list):
# TODO: it would be better if we have a close set of used semantic_type, \
# and map them to either STRING, NUMBER or DATETIME
if semantic_types and len(semantic_types):
unique_types = set(t.rsplit('/', 1)[-1].lower() for t in semantic_types)
if 'time' in unique_types or 'date' in unique_types or 'datetime' in unique_types:
return DataType.DATETIME
if 'float' in unique_types or 'int' in unique_types or 'number' in unique_types:
return DataType.NUMBER
@staticmethod
def _get_data_type_by_profile(profiles):
numeric_ratio = profiles.get('ratio_of_numeric_values')
if numeric_ratio and numeric_ratio >= 0.99:
return DataType.NUMBER
@staticmethod
def _try_pd_to_datetime(df, indices):
try:
if len(indices) == 1:
_ = pd.to_datetime(df.iloc[[0, len(df) - 1], indices[0]])
else:
_ = pd.to_datetime(df.iloc[[0, len(df)-1], indices])
return True
except ValueError:
return False
| 42.473214
| 114
| 0.646206
|
from datamart.joiners.join_feature.feature_classes import *
from functools import reduce
import numpy as np
class FeatureFactory:
subclasses = {
(DistributeType.CATEGORICAL, DataType.NUMBER): CategoricalNumberFeature,
(DistributeType.CATEGORICAL, DataType.STRING): CategoricalStringFeature,
(DistributeType.TOKEN_CATEGORICAL, DataType.STRING): CategoricalTokenFeature,
(DistributeType.NON_CATEGORICAL, DataType.NUMBER): NonCategoricalNumberFeature,
(DistributeType.NON_CATEGORICAL, DataType.STRING): NonCategoricalStringFeature
}
@classmethod
def create(cls, df: pd.DataFrame, indexes, df_metadata):
metadata = cls._get_feature_metadata(df_metadata, indexes) or {}
data_type = None
distribute_type = DistributeType.NON_CATEGORICAL
if len(indexes) > 1:
distribute_type = DistributeType.TOKEN_CATEGORICAL
if cls._try_pd_to_datetime(df, indexes):
data_type = DataType.DATETIME
else:
idx = indexes[0]
profiles = metadata.get('dsbox_profiled', {})
if len(df.iloc[:, idx]) // len(df.iloc[:, idx].unique()) >= 1.5:
distribute_type = DistributeType.CATEGORICAL
elif profiles:
most_common_tokens = profiles.get('most_common_tokens')
if most_common_tokens and cls._get_greater_than(most_common_tokens) >= len(most_common_tokens)//2:
distribute_type = DistributeType.TOKEN_CATEGORICAL
dtype = df.iloc[:, idx].dtype
if dtype == np.int64 or dtype == np.float64:
data_type = DataType.NUMBER
else:
semantic_types = metadata.get('semantic_type')
profiles = metadata.get('dsbox_profiled', {})
data_type = cls._get_data_type_by_semantic_type(semantic_types) \
or cls._get_data_type_by_profile(profiles)
if not data_type and cls._try_pd_to_datetime(df, indexes):
data_type = DataType.DATETIME
return cls.get_instance(df, indexes, metadata, data_type or DataType.STRING, distribute_type)
@classmethod
def get_instance(cls, df, indices, metadata, data_type, distribute_type):
constructor = cls.get_constructor(data_type, distribute_type)
return constructor(df, indices, metadata, distribute_type, data_type)
@classmethod
def get_constructor(cls, data_type, distribute_type=None):
if data_type == DataType.DATETIME:
return DatetimeFeature
return cls.subclasses.get((distribute_type, data_type))
@staticmethod
def _get_feature_metadata(metadata, indices):
if metadata.get('variables') and indices and indices[0] < len(metadata.get('variables')):
return metadata['variables'][indices[0]]
@staticmethod
def _get_avg(list_of_dict, key='count'):
if len(list_of_dict):
return sum([_.get(key) for _ in list_of_dict])/len(list_of_dict)
@staticmethod
def _get_greater_than(list_of_dict, key='count', threshold=2, inclusive=True):
if inclusive:
return reduce(lambda x, y: x + 1 if float(y[key]) >= threshold else x, list_of_dict, 0)
return reduce(lambda x, y: x + 1 if float(y[key]) > threshold else x, list_of_dict, 0)
@staticmethod
def _get_data_type_by_semantic_type(semantic_types: list):
if semantic_types and len(semantic_types):
unique_types = set(t.rsplit('/', 1)[-1].lower() for t in semantic_types)
if 'time' in unique_types or 'date' in unique_types or 'datetime' in unique_types:
return DataType.DATETIME
if 'float' in unique_types or 'int' in unique_types or 'number' in unique_types:
return DataType.NUMBER
@staticmethod
def _get_data_type_by_profile(profiles):
numeric_ratio = profiles.get('ratio_of_numeric_values')
if numeric_ratio and numeric_ratio >= 0.99:
return DataType.NUMBER
@staticmethod
def _try_pd_to_datetime(df, indices):
try:
if len(indices) == 1:
_ = pd.to_datetime(df.iloc[[0, len(df) - 1], indices[0]])
else:
_ = pd.to_datetime(df.iloc[[0, len(df)-1], indices])
return True
except ValueError:
return False
| true
| true
|
f709ef1c8a4102a6552cd1ecd42a1eb6b329f1f3
| 2,517
|
py
|
Python
|
nodeeditor/node_editor_widget.py
|
icnmtrx/econ_helper
|
76a2a49a17f87acf9b9f4e0142f86e544a9cc52b
|
[
"MIT"
] | null | null | null |
nodeeditor/node_editor_widget.py
|
icnmtrx/econ_helper
|
76a2a49a17f87acf9b9f4e0142f86e544a9cc52b
|
[
"MIT"
] | null | null | null |
nodeeditor/node_editor_widget.py
|
icnmtrx/econ_helper
|
76a2a49a17f87acf9b9f4e0142f86e544a9cc52b
|
[
"MIT"
] | null | null | null |
import logging
import os
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from nodeeditor.node_edge import Edge, EDGE_TYPE_BEZIER
from nodeeditor.node_graphics_view import QDMGraphicsView
from nodeeditor.node_node import Node
from nodeeditor.node_scene import Scene, InvalidFile
class NodeEditorWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.filename = None
self.initUI()
def initUI(self):
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
# crate graphics scene
self.scene = Scene()
# create graphics view
self.view = QDMGraphicsView(self.scene.grScene, self)
self.layout.addWidget(self.view)
def isModified(self):
return self.scene.isModified()
def isFilenameSet(self):
return self.filename is not None
def getSelectedItems(self):
return self.scene.getSelectedItems()
def hasSelectedItems(self):
return self.getSelectedItems() != []
def canUndo(self):
return self.scene.history.canUndo()
def canRedo(self):
return self.scene.history.canRedo()
def getUserFriendlyFilename(self):
name = os.path.basename(self.filename) if self.isFilenameSet() else "New Graph"
return name + ("*" if self.isModified() else "")
def fileNew(self):
self.scene.clear()
self.filename = None
self.scene.history.clear()
self.scene.history.storeInitialHistoryStamp()
def fileLoad(self, filename):
QApplication.setOverrideCursor(Qt.WaitCursor)
try:
self.scene.loadFromFile(filename)
self.filename = filename
self.scene.history.clear()
self.scene.history.storeInitialHistoryStamp()
return True
except InvalidFile as e:
logging.error(e)
QApplication.restoreOverrideCursor()
QMessageBox.warning(self, "Error loading %s" % os.path.basename(filename), str(e))
return False
finally:
QApplication.restoreOverrideCursor()
def fileSave(self, filename=None):
# when called with empty parameter, we won't store the filename
if filename is not None: self.filename = filename
QApplication.setOverrideCursor(Qt.WaitCursor)
self.scene.saveToFile(self.filename)
QApplication.restoreOverrideCursor()
return True
| 27.358696
| 94
| 0.657132
|
import logging
import os
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from nodeeditor.node_edge import Edge, EDGE_TYPE_BEZIER
from nodeeditor.node_graphics_view import QDMGraphicsView
from nodeeditor.node_node import Node
from nodeeditor.node_scene import Scene, InvalidFile
class NodeEditorWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.filename = None
self.initUI()
def initUI(self):
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.scene = Scene()
self.view = QDMGraphicsView(self.scene.grScene, self)
self.layout.addWidget(self.view)
def isModified(self):
return self.scene.isModified()
def isFilenameSet(self):
return self.filename is not None
def getSelectedItems(self):
return self.scene.getSelectedItems()
def hasSelectedItems(self):
return self.getSelectedItems() != []
def canUndo(self):
return self.scene.history.canUndo()
def canRedo(self):
return self.scene.history.canRedo()
def getUserFriendlyFilename(self):
name = os.path.basename(self.filename) if self.isFilenameSet() else "New Graph"
return name + ("*" if self.isModified() else "")
def fileNew(self):
self.scene.clear()
self.filename = None
self.scene.history.clear()
self.scene.history.storeInitialHistoryStamp()
def fileLoad(self, filename):
QApplication.setOverrideCursor(Qt.WaitCursor)
try:
self.scene.loadFromFile(filename)
self.filename = filename
self.scene.history.clear()
self.scene.history.storeInitialHistoryStamp()
return True
except InvalidFile as e:
logging.error(e)
QApplication.restoreOverrideCursor()
QMessageBox.warning(self, "Error loading %s" % os.path.basename(filename), str(e))
return False
finally:
QApplication.restoreOverrideCursor()
def fileSave(self, filename=None):
if filename is not None: self.filename = filename
QApplication.setOverrideCursor(Qt.WaitCursor)
self.scene.saveToFile(self.filename)
QApplication.restoreOverrideCursor()
return True
| true
| true
|
f709f0b169aac9dac69ec255c444a85bce05cb86
| 1,001
|
py
|
Python
|
test/test_v1alpha1_data_volume_source_pvc.py
|
gabriel-samfira/client-python
|
c2e184c3cad6797af35b0160a36ffcbba77284a7
|
[
"Apache-2.0"
] | null | null | null |
test/test_v1alpha1_data_volume_source_pvc.py
|
gabriel-samfira/client-python
|
c2e184c3cad6797af35b0160a36ffcbba77284a7
|
[
"Apache-2.0"
] | null | null | null |
test/test_v1alpha1_data_volume_source_pvc.py
|
gabriel-samfira/client-python
|
c2e184c3cad6797af35b0160a36ffcbba77284a7
|
[
"Apache-2.0"
] | 1
|
2020-12-10T03:16:05.000Z
|
2020-12-10T03:16:05.000Z
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1alpha1_data_volume_source_pvc import V1alpha1DataVolumeSourcePVC
class TestV1alpha1DataVolumeSourcePVC(unittest.TestCase):
""" V1alpha1DataVolumeSourcePVC unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1DataVolumeSourcePVC(self):
"""
Test V1alpha1DataVolumeSourcePVC
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1alpha1_data_volume_source_pvc.V1alpha1DataVolumeSourcePVC()
pass
if __name__ == '__main__':
unittest.main()
| 22.244444
| 94
| 0.726274
|
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1alpha1_data_volume_source_pvc import V1alpha1DataVolumeSourcePVC
class TestV1alpha1DataVolumeSourcePVC(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1DataVolumeSourcePVC(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
f709f3a1db36a7690e2301e6bb78d5b399a49454
| 3,453
|
py
|
Python
|
foundations/docker/localhost/kind/yugabyte-demo/cluster/build/yugabyte/_vendir/upstream/stable/yugabyte/generate_kubeconfig.py
|
aegershman/deployments
|
c2093ea6824fea6ef18198f9420ca36a1f3ec1af
|
[
"Apache-2.0"
] | 2
|
2020-02-20T05:56:41.000Z
|
2020-04-26T17:29:51.000Z
|
foundations/docker/localhost/kind/yugabyte-demo/cluster/build/yugabyte/_vendir/upstream/stable/yugabyte/generate_kubeconfig.py
|
aegershman/deployments
|
c2093ea6824fea6ef18198f9420ca36a1f3ec1af
|
[
"Apache-2.0"
] | 39
|
2019-11-02T19:35:56.000Z
|
2021-10-14T20:16:38.000Z
|
foundations/docker/localhost/kind/yugabyte-demo/cluster/build/yugabyte/_vendir/upstream/stable/yugabyte/generate_kubeconfig.py
|
aegershman/deployments
|
c2093ea6824fea6ef18198f9420ca36a1f3ec1af
|
[
"Apache-2.0"
] | 1
|
2020-06-22T23:00:32.000Z
|
2020-06-22T23:00:32.000Z
|
#!/usr/bin/python
# Copyright (c) YugaByte, Inc.
# This script would generate a kubeconfig for the given servie account
# by fetching the cluster information and also add the service account
# token for the authentication purpose.
import argparse
from subprocess import check_output
import json
import base64
import tempfile
def run_command(command_args, namespace=None, as_json=True):
command = ['kubectl']
if namespace:
command.extend(['--namespace', namespace])
command.extend(command_args)
if as_json:
command.extend(['-o', 'json'])
return json.loads(check_output(command))
else:
return check_output(command).decode('utf8')
parser = argparse.ArgumentParser(description='Generate KubeConfig with Token')
parser.add_argument('-s', '--service_account', help='Service Account name', required=True)
parser.add_argument('-n', '--namespace', help='Kubernetes namespace', default='kube-system')
parser.add_argument('-c', '--context', help='kubectl context')
args = vars(parser.parse_args())
# if the context is not provided we use the current-context
context = args['context']
if context is None:
context = run_command(['config', 'current-context'],
args['namespace'], as_json=False)
cluster_attrs = run_command(['config', 'get-contexts', context.strip(),
'--no-headers'], args['namespace'], as_json=False)
cluster_name = cluster_attrs.strip().split()[2]
endpoint = run_command(['config', 'view', '-o',
'jsonpath="{.clusters[?(@.name =="' +
cluster_name + '")].cluster.server}"'],
args['namespace'], as_json=False)
service_account_info = run_command(['get', 'sa', args['service_account']],
args['namespace'])
sa_secret = service_account_info['secrets'][0]['name']
secret_data = run_command(['get', 'secret', sa_secret], args['namespace'])
context_name = '{}-{}'.format(args['service_account'], cluster_name)
kube_config = '/tmp/{}.conf'.format(args['service_account'])
with tempfile.NamedTemporaryFile() as ca_crt_file:
ca_crt = base64.b64decode(secret_data['data']['ca.crt'])
ca_crt_file.write(ca_crt)
ca_crt_file.flush()
# create kubeconfig entry
set_cluster_cmd = ['config', 'set-cluster', cluster_name,
'--kubeconfig={}'.format(kube_config),
'--server={}'.format(endpoint.strip('"')),
'--embed-certs=true',
'--certificate-authority={}'.format(ca_crt_file.name)]
run_command(set_cluster_cmd, as_json=False)
user_token = base64.b64decode(secret_data['data']['token']).decode('utf-8')
set_credentials_cmd = ['config', 'set-credentials', context_name,
'--token={}'.format(user_token),
'--kubeconfig={}'.format(kube_config)]
run_command(set_credentials_cmd, as_json=False)
set_context_cmd = ['config', 'set-context', context_name,
'--cluster={}'.format(cluster_name),
'--user={}'.format(context_name),
'--kubeconfig={}'.format(kube_config)]
run_command(set_context_cmd, as_json=False)
use_context_cmd = ['config', 'use-context', context_name,
'--kubeconfig={}'.format(kube_config)]
run_command(use_context_cmd, as_json=False)
print("Generated the kubeconfig file: {}".format(kube_config))
| 41.60241
| 92
| 0.643498
|
import argparse
from subprocess import check_output
import json
import base64
import tempfile
def run_command(command_args, namespace=None, as_json=True):
command = ['kubectl']
if namespace:
command.extend(['--namespace', namespace])
command.extend(command_args)
if as_json:
command.extend(['-o', 'json'])
return json.loads(check_output(command))
else:
return check_output(command).decode('utf8')
parser = argparse.ArgumentParser(description='Generate KubeConfig with Token')
parser.add_argument('-s', '--service_account', help='Service Account name', required=True)
parser.add_argument('-n', '--namespace', help='Kubernetes namespace', default='kube-system')
parser.add_argument('-c', '--context', help='kubectl context')
args = vars(parser.parse_args())
context = args['context']
if context is None:
context = run_command(['config', 'current-context'],
args['namespace'], as_json=False)
cluster_attrs = run_command(['config', 'get-contexts', context.strip(),
'--no-headers'], args['namespace'], as_json=False)
cluster_name = cluster_attrs.strip().split()[2]
endpoint = run_command(['config', 'view', '-o',
'jsonpath="{.clusters[?(@.name =="' +
cluster_name + '")].cluster.server}"'],
args['namespace'], as_json=False)
service_account_info = run_command(['get', 'sa', args['service_account']],
args['namespace'])
sa_secret = service_account_info['secrets'][0]['name']
secret_data = run_command(['get', 'secret', sa_secret], args['namespace'])
context_name = '{}-{}'.format(args['service_account'], cluster_name)
kube_config = '/tmp/{}.conf'.format(args['service_account'])
with tempfile.NamedTemporaryFile() as ca_crt_file:
ca_crt = base64.b64decode(secret_data['data']['ca.crt'])
ca_crt_file.write(ca_crt)
ca_crt_file.flush()
set_cluster_cmd = ['config', 'set-cluster', cluster_name,
'--kubeconfig={}'.format(kube_config),
'--server={}'.format(endpoint.strip('"')),
'--embed-certs=true',
'--certificate-authority={}'.format(ca_crt_file.name)]
run_command(set_cluster_cmd, as_json=False)
user_token = base64.b64decode(secret_data['data']['token']).decode('utf-8')
set_credentials_cmd = ['config', 'set-credentials', context_name,
'--token={}'.format(user_token),
'--kubeconfig={}'.format(kube_config)]
run_command(set_credentials_cmd, as_json=False)
set_context_cmd = ['config', 'set-context', context_name,
'--cluster={}'.format(cluster_name),
'--user={}'.format(context_name),
'--kubeconfig={}'.format(kube_config)]
run_command(set_context_cmd, as_json=False)
use_context_cmd = ['config', 'use-context', context_name,
'--kubeconfig={}'.format(kube_config)]
run_command(use_context_cmd, as_json=False)
print("Generated the kubeconfig file: {}".format(kube_config))
| true
| true
|
f709f44fde235e8c6f38256a50cbfb476ceee8e9
| 373
|
py
|
Python
|
JDjangoDemo/docs/migrations/0003_auto_20201028_1758.py
|
JIYANG-PLUS/JDjango
|
57cbb13b2b4c07f34d546c0c637c22f60c1e692a
|
[
"MIT"
] | 3
|
2020-12-28T05:09:02.000Z
|
2021-06-23T10:02:03.000Z
|
JDjangoDemo/docs/migrations/0003_auto_20201028_1758.py
|
JIYANG-PLUS/JDjango
|
57cbb13b2b4c07f34d546c0c637c22f60c1e692a
|
[
"MIT"
] | null | null | null |
JDjangoDemo/docs/migrations/0003_auto_20201028_1758.py
|
JIYANG-PLUS/JDjango
|
57cbb13b2b4c07f34d546c0c637c22f60c1e692a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-10-28 17:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('docs', '0002_article_version'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': '插件内容', 'verbose_name_plural': '插件内容'},
),
]
| 20.722222
| 76
| 0.603217
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('docs', '0002_article_version'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': '插件内容', 'verbose_name_plural': '插件内容'},
),
]
| true
| true
|
f709f4a438cc8457611aa5475ad0795c5fb0526b
| 1,333
|
py
|
Python
|
src/adorn/exception/configuration_error.py
|
pyadorn/adorn
|
a34a9a20c1a80c7bdbee0fa641c2bd17e20e60e6
|
[
"Apache-2.0"
] | 3
|
2021-12-11T03:52:57.000Z
|
2022-03-22T20:42:56.000Z
|
src/adorn/exception/configuration_error.py
|
pyadorn/adorn
|
a34a9a20c1a80c7bdbee0fa641c2bd17e20e60e6
|
[
"Apache-2.0"
] | 12
|
2021-12-31T19:22:09.000Z
|
2022-03-21T03:49:13.000Z
|
src/adorn/exception/configuration_error.py
|
pyadorn/adorn
|
a34a9a20c1a80c7bdbee0fa641c2bd17e20e60e6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Jacob Baumbach
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic exception for a misconfigured object."""
class ConfigurationError(Exception):
"""The exception raised by any object when it's misconfigured.
(e.g. missing properties, invalid properties, unknown properties).
.. note::
This ``Exception`` should typically be avoided, and instead an
exception that subclasses
:class:`~adorn.exception.type_check_error.TypeCheckError`
or a new custom ``Exception`` should be used. These alternate
``Exception`` objects contain more information, and are therefore
more useful for the caller.
"""
def __init__(self, message: str):
super().__init__()
self.message = message
def __str__(self):
return self.message
| 35.078947
| 74
| 0.711928
|
class ConfigurationError(Exception):
def __init__(self, message: str):
super().__init__()
self.message = message
def __str__(self):
return self.message
| true
| true
|
f709f56e4962f1057d045039872862b374050ce3
| 8,637
|
py
|
Python
|
carla/PythonAPI/examples/tutorial4.py
|
kyuhoJeong11/GrewRL
|
a514698df8d38df34de0bd1667d99927f0aa3885
|
[
"MIT"
] | null | null | null |
carla/PythonAPI/examples/tutorial4.py
|
kyuhoJeong11/GrewRL
|
a514698df8d38df34de0bd1667d99927f0aa3885
|
[
"MIT"
] | null | null | null |
carla/PythonAPI/examples/tutorial4.py
|
kyuhoJeong11/GrewRL
|
a514698df8d38df34de0bd1667d99927f0aa3885
|
[
"MIT"
] | null | null | null |
import glob
import os
import sys
import random
import time
import numpy as np
import cv2
import math
from collections import deque
from keras.applications.xception import Xception
from keras.layers import Dense, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.models import Model
'''
Carla 패키지가 사용하는 egg파일 탐색
'''
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
SHOW_PREVIEW = False
IM_WIDTH = 640
IM_HEIGHT = 480
SECONDS_PER_EPISODE = 10
REPLAY_MEMORY_SIZE = 5_000
MIN_REPLAY_MEMORY_SIZE = 1_000
MINIBATCH_SIZE = 16
PREDICTION_BATCH_SIZE = 1
TRAINING_BATCH_SIZE = MINIBATCH_SIZE // 4
UPDATE_TARGET_EVERY = 5
MODEL_NAME = "Xception"
MEMORY_FRACTION = 0.8
MIN_REWARD = -200
EPISODES = 100
DISCOUNT = 0.99
epsilon = 1
EPSILON_DECAY = 0.95
MIN_EPSILON = 0.001
AGGREGATE_STATS_EVERY = 10
'''
환경 class 세팅
'''
class CarEnv:
SHOW_CAM = SHOW_PREVIEW # 미리보기 여부
STEER_AMT = 1.0
im_width = IM_WIDTH
im_height = IM_HEIGHT
front_camera = None
actor_list = []
collision_hist = [] # collision 목록
def __init__(self):
self.client = carla.Client("localhost", 2000)
self.client.set_timeout(2.0)
# client가 켜져 있다면, world 검색 가능.
self.world = self.client.get_world()
# world에는 우리가 시뮬레이션에 액터를 새로 추가할 때 사용할 수 있는 bp 목록이 있다.
self.blueprint_library = self.world.get_blueprint_library()
# 차량 모델 지정
self.model_3 = self.blueprint_library.filter("model3")[0]
def reset(self):
self.collision_hist = []
self.actor_list = []
# 랜덤한 위치에 차량 생성 후 actor list에 추가
self.transform = random.choice(self.world.get_map().get_spawn_points())
self.vehicle = self.world.spawn_actor(self.model_3, self.transform)
self.actor_list.append(self.vehicle)
# rgb Camera 센서의 bp 가져오기
self.rgb_cam = self.blueprint_library.find('sensor.camera.rgb')
# rgb Camera 센서로 입력 받은 이미지의 크기 조절
self.rgb_cam.set_attribute("image_size_x", f"{self.im_width}")
self.rgb_cam.set_attribute("image_size_y", f"{self.im_height}")
self.rgb_cam.set_attribute("fov", f"110")
# sensor의 위치 조정
transform = carla.Transform(carla.Location(x=2.5, z=0.7))
# 센서의 생성 및 리스트 추가.
self.sensor = self.world.spawn_actor(self.rgb_cam, transform, attach_to=self.vehicle)
self.actor_list.append(self.sensor)
# 센서로 입력 받은 데이터를 활용하기 위해 lambda 함수 사용
self.sensor.listen(lambda data: self.process_img(data))
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))
'''
차량 생성 시 차가 지면에 부딪히면 충돌이 발생.
또는 센서들이 초기화되고 값을 반환하는 데 시간이 걸릴 수 있음.
따라서 4초 정도의 대기시간을 사용.
'''
time.sleep(4)
# collision 센서의 bp 가져오기
colsensor = self.blueprint_library.find("sensor.other.collision")
# 센서의 생성 및 리스트 추가
self.colsensor = self.world.spawn_actor(colsensor, transform, attach_to=self.vehicle)
self.actor_list.append(self.colsensor)
# 센서로 입력 받은 데이터를 활용하기 위해 lambda 함수 사용
self.colsensor.listen(lambda event: self.collision_data(event))
while self.front_camera is None:
time.sleep(0.01)
'''
에피소드의 실제 확인 시간 기록.
브레이크와 스로틀이 사용되지 않는지 확인 후
첫 번째 관찰 결과 반환.
'''
self.episode_start = time.time()
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))
return self.front_camera
# collision data 처리
def collision_data(self, event):
self.collision_hist.append(event)
# image data 처리
def process_img(self, image):
i = np.array(image.raw_data)
#print(i.shape)
i2 = i.reshape((self.im_height, self.im_width, 4))
i3 = i2[:, :, :3]
if self.SHOW_CAM:
cv2.imshow("", i3)
cv2.waitKey(1)
self.front_camera = i3
# action, reward, done, any_extra_info 관리
def step(self, action):
if action == 0:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-1*self.STEER_AMT))
elif action == 1:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer= 0))
elif action == 2:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=1*self.STEER_AMT))
v = self.vehicle.get_velocity()
kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))
if len(self.collision_hist) != 0:
done = True
reward = -200
elif kmh < 50:
done = False
reward = -1
else:
done = False
reward = 1
if self.episode_start + SECONDS_PER_EPISODE < time.time():
done = True
return self.front_camera, reward, done, None
# 강화 학습
class DQNAgent:
def __init__(self):
self.model = self.create_model()
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
self.tensorboard = ModifiedTensorBoard(log_dir=f"logs/{MODEL_NAME}-{int(time.time())}")
self.target_update_counter = 0
self.graph = tf.get_default_graph()
self.terminate = False
self.last_logged_episode = 0
self.training_initialized = False
# 모델 생성
def create_model(self):
base_model = Xception(weights=None, include_top=False, input_shape=(IM_HEIGHT, IM_WIDTH,3))
x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(3, activation="linear")(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=["accuracy"])
return model
def update_replay_memory(self, transition):
# transition = (current_state, action, reward, new_state, done)
self.replay_memory.append(transition)
def train(self):
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
current_states = np.array([transition[0] for transition in minibatch])/255
with self.graph.as_default():
current_qs_list = self.model.predict(current_states, PREDICTION_BATCH_SIZE)
new_current_states = np.array([transition[3] for transition in minibatch])/255
with self.graph.as_default():
future_qs_list = self.target_model.predict(new_current_states, PREDICTION_BATCH_SIZE)
# x = input / y = output
X = []
y = []
for index, (current_state, action, reward, new_state, done) in enumerate(minibatch):
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
current_qs = current_qs_list[index]
current_qs[action] = new_q
X.append(current_state)
y.append(current_qs)
'''
step 단위가 아니라 episode 단위로 log 기록
log_this_step이 true일 때만 TensorBoard에 log 기록
'''
log_this_step = False
if self.tensorboard.step > self.last_logged_episode:
log_this_step = True
self.last_log_episode = self.tensorboard.step
with self.graph.as_default():
self.model.fit(np.array(X)/255, np.array(y), batch_size=TRAINING_BATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if log_this_step else None)
if log_this_step:
self.target_update_counter += 1
# target_model 업데이트 여부 확인
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1 *state.shape)/255)[0]
# train 진행
def train_in_loop(self):
X = np.random.uniform(size=(1, IM_HEIGHT, IM_WIDTH, 3)).astype(np.float32)
y = np.random.uniform(size=(1, 3)).astype(np.float32)
with self.graph.as_default():
self.model.fit(X,y, verbose=False, batch_size=1)
self.training_initialized = True
while True:
if self.terminate:
return
self.train()
time.sleep(0.01)
| 30.956989
| 171
| 0.631817
|
import glob
import os
import sys
import random
import time
import numpy as np
import cv2
import math
from collections import deque
from keras.applications.xception import Xception
from keras.layers import Dense, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.models import Model
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
SHOW_PREVIEW = False
IM_WIDTH = 640
IM_HEIGHT = 480
SECONDS_PER_EPISODE = 10
REPLAY_MEMORY_SIZE = 5_000
MIN_REPLAY_MEMORY_SIZE = 1_000
MINIBATCH_SIZE = 16
PREDICTION_BATCH_SIZE = 1
TRAINING_BATCH_SIZE = MINIBATCH_SIZE // 4
UPDATE_TARGET_EVERY = 5
MODEL_NAME = "Xception"
MEMORY_FRACTION = 0.8
MIN_REWARD = -200
EPISODES = 100
DISCOUNT = 0.99
epsilon = 1
EPSILON_DECAY = 0.95
MIN_EPSILON = 0.001
AGGREGATE_STATS_EVERY = 10
class CarEnv:
SHOW_CAM = SHOW_PREVIEW STEER_AMT = 1.0
im_width = IM_WIDTH
im_height = IM_HEIGHT
front_camera = None
actor_list = []
collision_hist = []
def __init__(self):
self.client = carla.Client("localhost", 2000)
self.client.set_timeout(2.0)
self.world = self.client.get_world()
self.blueprint_library = self.world.get_blueprint_library()
self.model_3 = self.blueprint_library.filter("model3")[0]
def reset(self):
self.collision_hist = []
self.actor_list = []
self.transform = random.choice(self.world.get_map().get_spawn_points())
self.vehicle = self.world.spawn_actor(self.model_3, self.transform)
self.actor_list.append(self.vehicle)
self.rgb_cam = self.blueprint_library.find('sensor.camera.rgb')
self.rgb_cam.set_attribute("image_size_x", f"{self.im_width}")
self.rgb_cam.set_attribute("image_size_y", f"{self.im_height}")
self.rgb_cam.set_attribute("fov", f"110")
transform = carla.Transform(carla.Location(x=2.5, z=0.7))
self.sensor = self.world.spawn_actor(self.rgb_cam, transform, attach_to=self.vehicle)
self.actor_list.append(self.sensor)
self.sensor.listen(lambda data: self.process_img(data))
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))
time.sleep(4)
colsensor = self.blueprint_library.find("sensor.other.collision")
self.colsensor = self.world.spawn_actor(colsensor, transform, attach_to=self.vehicle)
self.actor_list.append(self.colsensor)
self.colsensor.listen(lambda event: self.collision_data(event))
while self.front_camera is None:
time.sleep(0.01)
self.episode_start = time.time()
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))
return self.front_camera
def collision_data(self, event):
self.collision_hist.append(event)
def process_img(self, image):
i = np.array(image.raw_data)
i2 = i.reshape((self.im_height, self.im_width, 4))
i3 = i2[:, :, :3]
if self.SHOW_CAM:
cv2.imshow("", i3)
cv2.waitKey(1)
self.front_camera = i3
def step(self, action):
if action == 0:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-1*self.STEER_AMT))
elif action == 1:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer= 0))
elif action == 2:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=1*self.STEER_AMT))
v = self.vehicle.get_velocity()
kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))
if len(self.collision_hist) != 0:
done = True
reward = -200
elif kmh < 50:
done = False
reward = -1
else:
done = False
reward = 1
if self.episode_start + SECONDS_PER_EPISODE < time.time():
done = True
return self.front_camera, reward, done, None
class DQNAgent:
def __init__(self):
self.model = self.create_model()
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
self.tensorboard = ModifiedTensorBoard(log_dir=f"logs/{MODEL_NAME}-{int(time.time())}")
self.target_update_counter = 0
self.graph = tf.get_default_graph()
self.terminate = False
self.last_logged_episode = 0
self.training_initialized = False
def create_model(self):
base_model = Xception(weights=None, include_top=False, input_shape=(IM_HEIGHT, IM_WIDTH,3))
x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(3, activation="linear")(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=["accuracy"])
return model
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
def train(self):
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
current_states = np.array([transition[0] for transition in minibatch])/255
with self.graph.as_default():
current_qs_list = self.model.predict(current_states, PREDICTION_BATCH_SIZE)
new_current_states = np.array([transition[3] for transition in minibatch])/255
with self.graph.as_default():
future_qs_list = self.target_model.predict(new_current_states, PREDICTION_BATCH_SIZE)
X = []
y = []
for index, (current_state, action, reward, new_state, done) in enumerate(minibatch):
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
current_qs = current_qs_list[index]
current_qs[action] = new_q
X.append(current_state)
y.append(current_qs)
log_this_step = False
if self.tensorboard.step > self.last_logged_episode:
log_this_step = True
self.last_log_episode = self.tensorboard.step
with self.graph.as_default():
self.model.fit(np.array(X)/255, np.array(y), batch_size=TRAINING_BATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if log_this_step else None)
if log_this_step:
self.target_update_counter += 1
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1 *state.shape)/255)[0]
def train_in_loop(self):
X = np.random.uniform(size=(1, IM_HEIGHT, IM_WIDTH, 3)).astype(np.float32)
y = np.random.uniform(size=(1, 3)).astype(np.float32)
with self.graph.as_default():
self.model.fit(X,y, verbose=False, batch_size=1)
self.training_initialized = True
while True:
if self.terminate:
return
self.train()
time.sleep(0.01)
| true
| true
|
f709f6674727313dfa9736c9f5cd0079095048e2
| 1,907
|
py
|
Python
|
from_cpython/Lib/email/errors.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
from_cpython/Lib/email/errors.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
from_cpython/Lib/email/errors.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""email package exception classes."""
class MessageError(Exception):
"""Base class for errors in the email package."""
class MessageParseError(MessageError):
"""Base class for message parsing errors."""
class HeaderParseError(MessageParseError):
"""Error while parsing headers."""
class BoundaryError(MessageParseError):
"""Couldn't find terminating boundary."""
# Pyston change: we don't support multiple inheritance yet, so this error class is tricky.
# We could make it so that it only inherits one of the base classes, but I'd rather that
# anyone who tries to use this error gets a loud error message rather than different behavior.
# class MultipartConversionError(MessageError, TypeError):
# """Conversion to a multipart is prohibited."""
class CharsetError(MessageError):
"""An illegal charset was given."""
# These are parsing defects which the parser was able to work around.
class MessageDefect:
"""Base class for a message defect."""
def __init__(self, line=None):
self.line = line
class NoBoundaryInMultipartDefect(MessageDefect):
"""A message claimed to be a multipart but had no boundary parameter."""
class StartBoundaryNotFoundDefect(MessageDefect):
"""The claimed start boundary was never found."""
class FirstHeaderLineIsContinuationDefect(MessageDefect):
"""A message had a continuation line as its first header line."""
class MisplacedEnvelopeHeaderDefect(MessageDefect):
"""A 'Unix-from' header was found in the middle of a header block."""
class MalformedHeaderDefect(MessageDefect):
"""Found a header that was missing a colon, or was otherwise malformed."""
class MultipartInvariantViolationDefect(MessageDefect):
"""A message claimed to be a multipart but no subparts were found."""
| 31.262295
| 94
| 0.745674
|
class MessageError(Exception):
class MessageParseError(MessageError):
class HeaderParseError(MessageParseError):
class BoundaryError(MessageParseError):
# We could make it so that it only inherits one of the base classes, but I'd rather that
class CharsetError(MessageError):
class MessageDefect:
def __init__(self, line=None):
self.line = line
class NoBoundaryInMultipartDefect(MessageDefect):
class StartBoundaryNotFoundDefect(MessageDefect):
class FirstHeaderLineIsContinuationDefect(MessageDefect):
class MisplacedEnvelopeHeaderDefect(MessageDefect):
class MalformedHeaderDefect(MessageDefect):
class MultipartInvariantViolationDefect(MessageDefect):
| true
| true
|
f709f773e6761ca5effc91050a215dab991ab27e
| 588
|
py
|
Python
|
graphql/utils/tests/test_quoted_or_list.py
|
ThanksBoomerang/graphql-core-legacy
|
6e2fbccdec655ce9122b84d3808c14242c4e6b96
|
[
"MIT"
] | 8
|
2020-03-23T21:34:02.000Z
|
2021-11-12T11:27:45.000Z
|
graphql/utils/tests/test_quoted_or_list.py
|
ThanksBoomerang/graphql-core-legacy
|
6e2fbccdec655ce9122b84d3808c14242c4e6b96
|
[
"MIT"
] | 17
|
2020-03-14T22:22:29.000Z
|
2022-03-16T19:26:37.000Z
|
graphql/utils/tests/test_quoted_or_list.py
|
ThanksBoomerang/graphql-core-legacy
|
6e2fbccdec655ce9122b84d3808c14242c4e6b96
|
[
"MIT"
] | 17
|
2020-03-23T12:06:23.000Z
|
2022-02-13T05:33:32.000Z
|
from pytest import raises
from ..quoted_or_list import quoted_or_list
def test_does_not_accept_an_empty_list():
with raises(StopIteration):
quoted_or_list([])
def test_returns_single_quoted_item():
assert quoted_or_list(["A"]) == '"A"'
def test_returns_two_item_list():
assert quoted_or_list(["A", "B"]) == '"A" or "B"'
def test_returns_comma_separated_many_item_list():
assert quoted_or_list(["A", "B", "C"]) == '"A", "B" or "C"'
def test_limits_to_five_items():
assert quoted_or_list(["A", "B", "C", "D", "E", "F"]) == '"A", "B", "C", "D" or "E"'
| 23.52
| 88
| 0.646259
|
from pytest import raises
from ..quoted_or_list import quoted_or_list
def test_does_not_accept_an_empty_list():
with raises(StopIteration):
quoted_or_list([])
def test_returns_single_quoted_item():
assert quoted_or_list(["A"]) == '"A"'
def test_returns_two_item_list():
assert quoted_or_list(["A", "B"]) == '"A" or "B"'
def test_returns_comma_separated_many_item_list():
assert quoted_or_list(["A", "B", "C"]) == '"A", "B" or "C"'
def test_limits_to_five_items():
assert quoted_or_list(["A", "B", "C", "D", "E", "F"]) == '"A", "B", "C", "D" or "E"'
| true
| true
|
f709f7c3584cc274e7b71885cfd3023498cacb9b
| 4,418
|
py
|
Python
|
profiles_api/views.py
|
MatheusAbdias/proj-rest-api
|
5401d6c2bc96f9f30f646a7969cdc2a65cb0de3c
|
[
"MIT"
] | 1
|
2022-02-20T23:16:19.000Z
|
2022-02-20T23:16:19.000Z
|
profiles_api/views.py
|
MatheusAbdias/proj-rest-api
|
5401d6c2bc96f9f30f646a7969cdc2a65cb0de3c
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
MatheusAbdias/proj-rest-api
|
5401d6c2bc96f9f30f646a7969cdc2a65cb0de3c
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloViewSet(viewsets.ViewSet):
"""Test ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self,request):
"""teste"""
a_viewset = [
'Use esta metodo para (list,recuperar,atualizar,atualizar um campo',
'Automaticamente mapeia as urls usando Roters',
'Proporciona mais funcionalidades com menos codigo',
]
return(Response({'message':'Hello','a_viewset':a_viewset}))
def create(self,request):
"""Cria uma nova menssagem"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
messagem=f'Hello {name}'
return Response({'messagem':messagem})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def retrieve(self,request, pk = None):
"""Retorna um objeto pela ID"""
return Response({'http_method':'PUT'})
def update(self, request, pk = None):
"""Atualiza um objeto"""
return Response({'http_method':'PUT'})
def partial_update(self,request,pk = None):
"""Atualiza parte de um objeto"""
return Response({'http_method':'PATCH'})
def destroy(self, request, pk = None):
"""Remove um objeto"""
return Response({'http_method':'DELETE'})
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self,request,format=None):
"""Returna uma lista de funções da APIView"""
an_apiview = [
'Usando Http metodos (get,post,put,delete,patch)',
'É similar a uma tradiciona view do django',
'te da o controle da logica da aplicação',
'e mapea manualmente as urls',
]
return Response({'message':'hello','an_apiview':an_apiview})
def post(self,request):
"""cria uma messagem de vem vindo com o nome"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def put(self,request,pk = None):
"""Atualizando um objeto"""
return Response({'metodo':'put'})
def patch(self,request, pk = None):
"""Atualizando um campo de um objeto"""
return Response({'metodo':'Patch'})
def delete(self, request, pk = None):
"""Deletando um objeto"""
return Response({'methodo':'Delete'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Cria e atualiza um usuario """
serializer_class = serializers.UserProfileSerizalizer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fileds = ('name','email',)
class UserLoginApiView(ObtainAuthToken):
"""Cria um token autenticado para o usuario"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Registra e atualiza feed de usuario autenticado"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permissions_classes =(permissions.UpdateOwnStatus,IsAuthenticated)
def perform_create(self, serializer):
"""seta o usuario do perfil para o usuario logado"""
serializer.save(user_profile=self.request.user)
| 35.918699
| 80
| 0.661838
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloViewSet(viewsets.ViewSet):
serializer_class = serializers.HelloSerializer
def list(self,request):
a_viewset = [
'Use esta metodo para (list,recuperar,atualizar,atualizar um campo',
'Automaticamente mapeia as urls usando Roters',
'Proporciona mais funcionalidades com menos codigo',
]
return(Response({'message':'Hello','a_viewset':a_viewset}))
def create(self,request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
messagem=f'Hello {name}'
return Response({'messagem':messagem})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def retrieve(self,request, pk = None):
return Response({'http_method':'PUT'})
def update(self, request, pk = None):
return Response({'http_method':'PUT'})
def partial_update(self,request,pk = None):
return Response({'http_method':'PATCH'})
def destroy(self, request, pk = None):
return Response({'http_method':'DELETE'})
class HelloApiView(APIView):
serializer_class = serializers.HelloSerializer
def get(self,request,format=None):
an_apiview = [
'Usando Http metodos (get,post,put,delete,patch)',
'É similar a uma tradiciona view do django',
'te da o controle da logica da aplicação',
'e mapea manualmente as urls',
]
return Response({'message':'hello','an_apiview':an_apiview})
def post(self,request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def put(self,request,pk = None):
return Response({'metodo':'put'})
def patch(self,request, pk = None):
return Response({'metodo':'Patch'})
def delete(self, request, pk = None):
return Response({'methodo':'Delete'})
class UserProfileViewSet(viewsets.ModelViewSet):
serializer_class = serializers.UserProfileSerizalizer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fileds = ('name','email',)
class UserLoginApiView(ObtainAuthToken):
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permissions_classes =(permissions.UpdateOwnStatus,IsAuthenticated)
def perform_create(self, serializer):
serializer.save(user_profile=self.request.user)
| true
| true
|
f709f855c6208fa8f7d480a10f080584e7ee9b14
| 2,022
|
py
|
Python
|
tests/gold_tests/shutdown/emergency.test.py
|
nozomi1773/trafficserver
|
2ee141137545a84584d8047eee70b171b5254c40
|
[
"Apache-2.0"
] | null | null | null |
tests/gold_tests/shutdown/emergency.test.py
|
nozomi1773/trafficserver
|
2ee141137545a84584d8047eee70b171b5254c40
|
[
"Apache-2.0"
] | null | null | null |
tests/gold_tests/shutdown/emergency.test.py
|
nozomi1773/trafficserver
|
2ee141137545a84584d8047eee70b171b5254c40
|
[
"Apache-2.0"
] | null | null | null |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = 'Test TSEmergency API'
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess('ts')
Test.testName = 'Emergency Shutdown Test'
ts.Disk.records_config.update({
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.exec_thread.autoconfig.scale': 1.5,
'proxy.config.exec_thread.limit': 16,
'proxy.config.accept_threads': 1,
'proxy.config.task_threads': 2,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'TSEmergency_test'
})
# Load plugin
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'emergency_shutdown.so'), ts)
# www.example.com Host
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'printf "Emergency Shutdown Test"'
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts)
ts.ReturnCode = 33
ts.Ready = 0 # Need this to be 0 because we are testing shutdown, this is to make autest not think ats went away for a bad reason.
ts.Streams.All = Testers.ExcludesExpression('failed to shutdown', 'should NOT contain "failed to shutdown"')
ts.Disk.diags_log.Content = Testers.IncludesExpression('testing emergency shutdown', 'should contain "testing emergency shutdown"')
| 38.884615
| 131
| 0.755193
|
import os
Test.Summary = 'Test TSEmergency API'
Test.ContinueOnFail = True
ts = Test.MakeATSProcess('ts')
Test.testName = 'Emergency Shutdown Test'
ts.Disk.records_config.update({
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.exec_thread.autoconfig.scale': 1.5,
'proxy.config.exec_thread.limit': 16,
'proxy.config.accept_threads': 1,
'proxy.config.task_threads': 2,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'TSEmergency_test'
})
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'emergency_shutdown.so'), ts)
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'printf "Emergency Shutdown Test"'
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts)
ts.ReturnCode = 33
ts.Ready = 0 ts.Streams.All = Testers.ExcludesExpression('failed to shutdown', 'should NOT contain "failed to shutdown"')
ts.Disk.diags_log.Content = Testers.IncludesExpression('testing emergency shutdown', 'should contain "testing emergency shutdown"')
| true
| true
|
f709f87c4beb8fa85bcf5d596823ca75307a6393
| 29,244
|
py
|
Python
|
test/test_fnetout.py
|
vanderhe/fortnet-python
|
118237f0ce750852d973b213161fc04623fd7f82
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_fnetout.py
|
vanderhe/fortnet-python
|
118237f0ce750852d973b213161fc04623fd7f82
|
[
"BSD-2-Clause"
] | 1
|
2022-03-11T15:21:56.000Z
|
2022-03-11T15:33:46.000Z
|
test/test_fnetout.py
|
vanderhe/fortnet-python
|
118237f0ce750852d973b213161fc04623fd7f82
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
#------------------------------------------------------------------------------#
# fortnet-python: Python Tools for the Fortnet Software Package #
# Copyright (C) 2021 - 2022 T. W. van der Heide #
# #
# See the LICENSE file for terms of usage and distribution. #
#------------------------------------------------------------------------------#
'''
Regression tests covering the Fnetout class of Fortformat.
'''
import os
import pytest
import numpy as np
from common import compare_fnetout_references
REFPATH = os.path.join(os.getcwd(), 'test', 'references', 'Fnetout')
def test_predict_atomic():
'''Test extraction capabilities for a prediction run
with a network that was trained on atomic targets.
'''
fname = 'predict_atomic.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 0
ref['natomictargets'] = 2
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['globalpredictions'] = None
ref['globalpredictions_atomic'] = None
ref['atomicpredictions'] = [
np.array([[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global():
'''Test extraction capabilities for a prediction run
with a network that was trained on global targets.
'''
fname = 'predict_global.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['globalpredictions_atomic'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float) / 2.0,
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float) / 3.0,
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float) / 4.0,
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float) / 5.0]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([-4.585193773117663341e+02], dtype=float),
np.array([-2.290754290677185736e+02], dtype=float),
np.array([-6.877477714671086915e+02], dtype=float),
np.array([-5.349057545062817098e+02], dtype=float)]
ref['atomicpredictions'] = None
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global_singleforces():
'''Test extraction capabilities for a prediction run with a network
that was trained on global targets and calculates atomic forces.
'''
fname = 'predict_global_singleforces.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 2
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['atomicpredictions'] = None
ref['tforces'] = True
ref['forces'] = []
ref['forces'].append([])
ref['forces'].append([])
ref['forces'][0].append(np.array([
[-1.129280561189105470e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.129280561189105470e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.464270111301352983e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.464270111301352983e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['globalpredictions_atomic'] = [
np.array([[-4.301790810131604914e-01],
[-4.301790810131604914e-01]], dtype=float) / 2.0,
np.array([[-5.025593389423121948e-01],
[-5.025593389423121948e-01]], dtype=float) / 2.0]
ref['globalpredictions'] = [
np.array([-4.301790810131604914e-01], dtype=float),
np.array([-5.025593389423121948e-01], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global_multiforces():
'''Test extraction capabilities for a prediction run with a network
that was trained on global targets and calculates atomic forces.
'''
fname = 'predict_global_multiforces.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 2
ref['nglobaltargets'] = 3
ref['natomictargets'] = 0
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['atomicpredictions'] = None
ref['tforces'] = True
ref['forces'] = []
ref['forces'].append([])
ref['forces'].append([])
ref['forces'][0].append(np.array([
[-1.113504383113195217e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.113504383113195217e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][0].append(np.array([
[-1.117387033151562292e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.117387033151562292e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][0].append(np.array([
[-1.110108965167277972e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.110108965167277972e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.450938994823964379e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.450938994823964379e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.465140042623886529e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.465140042623886529e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.438788427604926312e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.438788427604926312e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['globalpredictions_atomic'] = [
np.array([[-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01],
[-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01]], dtype=float) / 2.0,
np.array([[-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01],
[-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01]], dtype=float) / 2.0]
ref['globalpredictions'] = [
np.array([-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01], dtype=float),
np.array([-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_atomic():
'''Test extraction capabilities for a validation run
with a network that was trained on atomic targets.
'''
fname = 'validate_atomic.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 0
ref['natomictargets'] = 2
ref['globaltargets'] = None
ref['globalpredictions'] = None
ref['globalpredictions_atomic'] = None
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = [
np.array([
[1.540549993515014648e-01, 8.459450006484985352e-01],
[1.883080005645751953e-01, 8.116919994354248047e-01],
[1.595949977636337280e-01, 8.404050022363662720e-01],
[1.432220041751861572e-01, 8.567779958248138428e-01],
[1.232710033655166626e-01, 8.767289966344833374e-01],
[1.735100001096725464e-01, 8.264899998903274536e-01],
[1.588409990072250366e-01, 8.411590009927749634e-01],
[1.403059959411621094e-01, 8.596940040588378906e-01],
[-2.634609937667846680e-01, 6.263460993766784668e+00],
[-3.214380145072937012e-01, 6.321438014507293701e+00],
[-3.043099939823150635e-01, 6.304309993982315063e+00],
[-3.519429862499237061e-01, 6.351942986249923706e+00]],
dtype=float),
np.array([
[1.272429972887039185e-01, 8.727570027112960815e-01],
[1.549790054559707642e-01, 8.450209945440292358e-01],
[1.774729937314987183e-01, 8.225270062685012817e-01],
[1.796700060367584229e-01, 8.203299939632415771e-01],
[-3.525030016899108887e-01, 6.352503001689910889e+00],
[-2.868520021438598633e-01, 6.286852002143859863e+00]],
dtype=float),
np.array([
[1.852180063724517822e-01, 8.147819936275482178e-01],
[1.311800032854080200e-01, 8.688199967145919800e-01],
[1.232030019164085388e-01, 8.767969980835914612e-01],
[1.774370074272155762e-01, 8.225629925727844238e-01],
[1.587480008602142334e-01, 8.412519991397857666e-01],
[1.444180011749267578e-01, 8.555819988250732422e-01],
[1.365029960870742798e-01, 8.634970039129257202e-01],
[1.802569925785064697e-01, 8.197430074214935303e-01],
[-2.689329981803894043e-01, 6.268932998180389404e+00],
[-3.368290066719055176e-01, 6.336829006671905518e+00],
[-3.142969906330108643e-01, 6.314296990633010864e+00],
[-3.169249892234802246e-01, 6.316924989223480225e+00]],
dtype=float),
np.array([
[1.770180016756057739e-01, 8.229819983243942261e-01],
[1.812230050563812256e-01, 8.187769949436187744e-01],
[1.482979953289031982e-01, 8.517020046710968018e-01],
[9.460300207138061523e-02, 9.053969979286193848e-01],
[-2.429430037736892700e-01, 6.242943003773689270e+00],
[-3.581880033016204834e-01, 6.358188003301620483e+00]],
dtype=float),
np.array([
[1.596090048551559448e-01, 8.403909951448440552e-01],
[1.659840047359466553e-01, 8.340159952640533447e-01],
[1.713179945945739746e-01, 8.286820054054260254e-01],
[1.658540070056915283e-01, 8.341459929943084717e-01],
[-3.264440000057220459e-01, 6.326444000005722046e+00],
[-3.363139927387237549e-01, 6.336313992738723755e+00]],
dtype=float)]
ref['atomicpredictions'] = [
np.array([
[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([
[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([
[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([
[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([
[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_global():
'''Test extraction capabilities for a validation run
with a network that was trained on global targets.
'''
fname = 'validate_global.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['atomicpredictions'] = None
ref['globaltargets'] = [
np.array([-1.527736989418316114e+02], dtype=float),
np.array([-4.584216715420000128e+02], dtype=float),
np.array([-2.291870019319999869e+02], dtype=float),
np.array([-6.876760346160000381e+02], dtype=float),
np.array([-5.348338707069999600e+02], dtype=float)]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([-4.585193773117663341e+02], dtype=float),
np.array([-2.290754290677185736e+02], dtype=float),
np.array([-6.877477714671086915e+02], dtype=float),
np.array([-5.349057545062817098e+02], dtype=float)]
ref['globalpredictions_atomic'] = [
np.array([[-1.526436789762218496e+02]], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float) / 2.0,
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float) / 3.0,
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float) / 4.0,
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float) / 5.0]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_atomic_global():
'''Test extraction capabilities for a validation run with a
network that was trained on both, atomic and global targets.
'''
fname = 'validate_atomic_global.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 2
ref['targets'] = True
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = [
np.array([
[1.540549993515014648e-01, 8.459450006484985352e-01],
[1.883080005645751953e-01, 8.116919994354248047e-01],
[1.595949977636337280e-01, 8.404050022363662720e-01],
[1.432220041751861572e-01, 8.567779958248138428e-01],
[1.232710033655166626e-01, 8.767289966344833374e-01],
[1.735100001096725464e-01, 8.264899998903274536e-01],
[1.588409990072250366e-01, 8.411590009927749634e-01],
[1.403059959411621094e-01, 8.596940040588378906e-01],
[-2.634609937667846680e-01, 6.263460993766784668e+00],
[-3.214380145072937012e-01, 6.321438014507293701e+00],
[-3.043099939823150635e-01, 6.304309993982315063e+00],
[-3.519429862499237061e-01, 6.351942986249923706e+00]],
dtype=float),
np.array([
[1.272429972887039185e-01, 8.727570027112960815e-01],
[1.549790054559707642e-01, 8.450209945440292358e-01],
[1.774729937314987183e-01, 8.225270062685012817e-01],
[1.796700060367584229e-01, 8.203299939632415771e-01],
[-3.525030016899108887e-01, 6.352503001689910889e+00],
[-2.868520021438598633e-01, 6.286852002143859863e+00]],
dtype=float),
np.array([
[1.852180063724517822e-01, 8.147819936275482178e-01],
[1.311800032854080200e-01, 8.688199967145919800e-01],
[1.232030019164085388e-01, 8.767969980835914612e-01],
[1.774370074272155762e-01, 8.225629925727844238e-01],
[1.587480008602142334e-01, 8.412519991397857666e-01],
[1.444180011749267578e-01, 8.555819988250732422e-01],
[1.365029960870742798e-01, 8.634970039129257202e-01],
[1.802569925785064697e-01, 8.197430074214935303e-01],
[-2.689329981803894043e-01, 6.268932998180389404e+00],
[-3.368290066719055176e-01, 6.336829006671905518e+00],
[-3.142969906330108643e-01, 6.314296990633010864e+00],
[-3.169249892234802246e-01, 6.316924989223480225e+00]],
dtype=float),
np.array([
[1.770180016756057739e-01, 8.229819983243942261e-01],
[1.812230050563812256e-01, 8.187769949436187744e-01],
[1.482979953289031982e-01, 8.517020046710968018e-01],
[9.460300207138061523e-02, 9.053969979286193848e-01],
[-2.429430037736892700e-01, 6.242943003773689270e+00],
[-3.581880033016204834e-01, 6.358188003301620483e+00]],
dtype=float),
np.array([
[1.596090048551559448e-01, 8.403909951448440552e-01],
[1.659840047359466553e-01, 8.340159952640533447e-01],
[1.713179945945739746e-01, 8.286820054054260254e-01],
[1.658540070056915283e-01, 8.341459929943084717e-01],
[-3.264440000057220459e-01, 6.326444000005722046e+00],
[-3.363139927387237549e-01, 6.336313992738723755e+00]],
dtype=float)]
ref['atomicpredictions'] = [
np.array([
[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([
[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([
[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([
[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([
[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
ref['globaltargets'] = [
np.array([-1.527736989418316114e+02], dtype=float),
np.array([-4.584216715420000128e+02], dtype=float),
np.array([-2.291870019319999869e+02], dtype=float),
np.array([-6.876760346160000381e+02], dtype=float),
np.array([-5.348338707069999600e+02], dtype=float)]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float) * 12.0,
np.array([-4.585193773117663341e+02], dtype=float) * 6.0,
np.array([-2.290754290677185736e+02], dtype=float) * 12.0,
np.array([-6.877477714671086915e+02], dtype=float) * 6.0,
np.array([-5.349057545062817098e+02], dtype=float) * 6.0]
ref['globalpredictions_atomic'] = [
np.array([[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02]], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float),
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float),
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float),
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
if __name__ == '__main__':
pytest.main()
| 45.622465
| 80
| 0.610792
|
import os
import pytest
import numpy as np
from common import compare_fnetout_references
REFPATH = os.path.join(os.getcwd(), 'test', 'references', 'Fnetout')
def test_predict_atomic():
fname = 'predict_atomic.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 0
ref['natomictargets'] = 2
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['globalpredictions'] = None
ref['globalpredictions_atomic'] = None
ref['atomicpredictions'] = [
np.array([[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global():
fname = 'predict_global.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['globalpredictions_atomic'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float) / 2.0,
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float) / 3.0,
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float) / 4.0,
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float) / 5.0]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([-4.585193773117663341e+02], dtype=float),
np.array([-2.290754290677185736e+02], dtype=float),
np.array([-6.877477714671086915e+02], dtype=float),
np.array([-5.349057545062817098e+02], dtype=float)]
ref['atomicpredictions'] = None
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global_singleforces():
fname = 'predict_global_singleforces.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 2
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['atomicpredictions'] = None
ref['tforces'] = True
ref['forces'] = []
ref['forces'].append([])
ref['forces'].append([])
ref['forces'][0].append(np.array([
[-1.129280561189105470e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.129280561189105470e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.464270111301352983e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.464270111301352983e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['globalpredictions_atomic'] = [
np.array([[-4.301790810131604914e-01],
[-4.301790810131604914e-01]], dtype=float) / 2.0,
np.array([[-5.025593389423121948e-01],
[-5.025593389423121948e-01]], dtype=float) / 2.0]
ref['globalpredictions'] = [
np.array([-4.301790810131604914e-01], dtype=float),
np.array([-5.025593389423121948e-01], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global_multiforces():
fname = 'predict_global_multiforces.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 2
ref['nglobaltargets'] = 3
ref['natomictargets'] = 0
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['atomicpredictions'] = None
ref['tforces'] = True
ref['forces'] = []
ref['forces'].append([])
ref['forces'].append([])
ref['forces'][0].append(np.array([
[-1.113504383113195217e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.113504383113195217e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][0].append(np.array([
[-1.117387033151562292e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.117387033151562292e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][0].append(np.array([
[-1.110108965167277972e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.110108965167277972e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.450938994823964379e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.450938994823964379e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.465140042623886529e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.465140042623886529e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.438788427604926312e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.438788427604926312e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['globalpredictions_atomic'] = [
np.array([[-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01],
[-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01]], dtype=float) / 2.0,
np.array([[-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01],
[-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01]], dtype=float) / 2.0]
ref['globalpredictions'] = [
np.array([-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01], dtype=float),
np.array([-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_atomic():
fname = 'validate_atomic.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 0
ref['natomictargets'] = 2
ref['globaltargets'] = None
ref['globalpredictions'] = None
ref['globalpredictions_atomic'] = None
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = [
np.array([
[1.540549993515014648e-01, 8.459450006484985352e-01],
[1.883080005645751953e-01, 8.116919994354248047e-01],
[1.595949977636337280e-01, 8.404050022363662720e-01],
[1.432220041751861572e-01, 8.567779958248138428e-01],
[1.232710033655166626e-01, 8.767289966344833374e-01],
[1.735100001096725464e-01, 8.264899998903274536e-01],
[1.588409990072250366e-01, 8.411590009927749634e-01],
[1.403059959411621094e-01, 8.596940040588378906e-01],
[-2.634609937667846680e-01, 6.263460993766784668e+00],
[-3.214380145072937012e-01, 6.321438014507293701e+00],
[-3.043099939823150635e-01, 6.304309993982315063e+00],
[-3.519429862499237061e-01, 6.351942986249923706e+00]],
dtype=float),
np.array([
[1.272429972887039185e-01, 8.727570027112960815e-01],
[1.549790054559707642e-01, 8.450209945440292358e-01],
[1.774729937314987183e-01, 8.225270062685012817e-01],
[1.796700060367584229e-01, 8.203299939632415771e-01],
[-3.525030016899108887e-01, 6.352503001689910889e+00],
[-2.868520021438598633e-01, 6.286852002143859863e+00]],
dtype=float),
np.array([
[1.852180063724517822e-01, 8.147819936275482178e-01],
[1.311800032854080200e-01, 8.688199967145919800e-01],
[1.232030019164085388e-01, 8.767969980835914612e-01],
[1.774370074272155762e-01, 8.225629925727844238e-01],
[1.587480008602142334e-01, 8.412519991397857666e-01],
[1.444180011749267578e-01, 8.555819988250732422e-01],
[1.365029960870742798e-01, 8.634970039129257202e-01],
[1.802569925785064697e-01, 8.197430074214935303e-01],
[-2.689329981803894043e-01, 6.268932998180389404e+00],
[-3.368290066719055176e-01, 6.336829006671905518e+00],
[-3.142969906330108643e-01, 6.314296990633010864e+00],
[-3.169249892234802246e-01, 6.316924989223480225e+00]],
dtype=float),
np.array([
[1.770180016756057739e-01, 8.229819983243942261e-01],
[1.812230050563812256e-01, 8.187769949436187744e-01],
[1.482979953289031982e-01, 8.517020046710968018e-01],
[9.460300207138061523e-02, 9.053969979286193848e-01],
[-2.429430037736892700e-01, 6.242943003773689270e+00],
[-3.581880033016204834e-01, 6.358188003301620483e+00]],
dtype=float),
np.array([
[1.596090048551559448e-01, 8.403909951448440552e-01],
[1.659840047359466553e-01, 8.340159952640533447e-01],
[1.713179945945739746e-01, 8.286820054054260254e-01],
[1.658540070056915283e-01, 8.341459929943084717e-01],
[-3.264440000057220459e-01, 6.326444000005722046e+00],
[-3.363139927387237549e-01, 6.336313992738723755e+00]],
dtype=float)]
ref['atomicpredictions'] = [
np.array([
[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([
[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([
[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([
[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([
[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_global():
fname = 'validate_global.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['atomicpredictions'] = None
ref['globaltargets'] = [
np.array([-1.527736989418316114e+02], dtype=float),
np.array([-4.584216715420000128e+02], dtype=float),
np.array([-2.291870019319999869e+02], dtype=float),
np.array([-6.876760346160000381e+02], dtype=float),
np.array([-5.348338707069999600e+02], dtype=float)]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([-4.585193773117663341e+02], dtype=float),
np.array([-2.290754290677185736e+02], dtype=float),
np.array([-6.877477714671086915e+02], dtype=float),
np.array([-5.349057545062817098e+02], dtype=float)]
ref['globalpredictions_atomic'] = [
np.array([[-1.526436789762218496e+02]], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float) / 2.0,
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float) / 3.0,
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float) / 4.0,
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float) / 5.0]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_atomic_global():
fname = 'validate_atomic_global.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 2
ref['targets'] = True
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = [
np.array([
[1.540549993515014648e-01, 8.459450006484985352e-01],
[1.883080005645751953e-01, 8.116919994354248047e-01],
[1.595949977636337280e-01, 8.404050022363662720e-01],
[1.432220041751861572e-01, 8.567779958248138428e-01],
[1.232710033655166626e-01, 8.767289966344833374e-01],
[1.735100001096725464e-01, 8.264899998903274536e-01],
[1.588409990072250366e-01, 8.411590009927749634e-01],
[1.403059959411621094e-01, 8.596940040588378906e-01],
[-2.634609937667846680e-01, 6.263460993766784668e+00],
[-3.214380145072937012e-01, 6.321438014507293701e+00],
[-3.043099939823150635e-01, 6.304309993982315063e+00],
[-3.519429862499237061e-01, 6.351942986249923706e+00]],
dtype=float),
np.array([
[1.272429972887039185e-01, 8.727570027112960815e-01],
[1.549790054559707642e-01, 8.450209945440292358e-01],
[1.774729937314987183e-01, 8.225270062685012817e-01],
[1.796700060367584229e-01, 8.203299939632415771e-01],
[-3.525030016899108887e-01, 6.352503001689910889e+00],
[-2.868520021438598633e-01, 6.286852002143859863e+00]],
dtype=float),
np.array([
[1.852180063724517822e-01, 8.147819936275482178e-01],
[1.311800032854080200e-01, 8.688199967145919800e-01],
[1.232030019164085388e-01, 8.767969980835914612e-01],
[1.774370074272155762e-01, 8.225629925727844238e-01],
[1.587480008602142334e-01, 8.412519991397857666e-01],
[1.444180011749267578e-01, 8.555819988250732422e-01],
[1.365029960870742798e-01, 8.634970039129257202e-01],
[1.802569925785064697e-01, 8.197430074214935303e-01],
[-2.689329981803894043e-01, 6.268932998180389404e+00],
[-3.368290066719055176e-01, 6.336829006671905518e+00],
[-3.142969906330108643e-01, 6.314296990633010864e+00],
[-3.169249892234802246e-01, 6.316924989223480225e+00]],
dtype=float),
np.array([
[1.770180016756057739e-01, 8.229819983243942261e-01],
[1.812230050563812256e-01, 8.187769949436187744e-01],
[1.482979953289031982e-01, 8.517020046710968018e-01],
[9.460300207138061523e-02, 9.053969979286193848e-01],
[-2.429430037736892700e-01, 6.242943003773689270e+00],
[-3.581880033016204834e-01, 6.358188003301620483e+00]],
dtype=float),
np.array([
[1.596090048551559448e-01, 8.403909951448440552e-01],
[1.659840047359466553e-01, 8.340159952640533447e-01],
[1.713179945945739746e-01, 8.286820054054260254e-01],
[1.658540070056915283e-01, 8.341459929943084717e-01],
[-3.264440000057220459e-01, 6.326444000005722046e+00],
[-3.363139927387237549e-01, 6.336313992738723755e+00]],
dtype=float)]
ref['atomicpredictions'] = [
np.array([
[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([
[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([
[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([
[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([
[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
ref['globaltargets'] = [
np.array([-1.527736989418316114e+02], dtype=float),
np.array([-4.584216715420000128e+02], dtype=float),
np.array([-2.291870019319999869e+02], dtype=float),
np.array([-6.876760346160000381e+02], dtype=float),
np.array([-5.348338707069999600e+02], dtype=float)]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float) * 12.0,
np.array([-4.585193773117663341e+02], dtype=float) * 6.0,
np.array([-2.290754290677185736e+02], dtype=float) * 12.0,
np.array([-6.877477714671086915e+02], dtype=float) * 6.0,
np.array([-5.349057545062817098e+02], dtype=float) * 6.0]
ref['globalpredictions_atomic'] = [
np.array([[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02]], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float),
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float),
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float),
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
if __name__ == '__main__':
pytest.main()
| true
| true
|
f709f98cce7a40c942eef0665a243189284ff7ed
| 5,240
|
py
|
Python
|
bumblebee/buzzes/api/serializers/buzz_serializers.py
|
sthasam2/bumblebee-backend
|
22057399f34cdc1edb0ef04e622c97df46532de3
|
[
"Linux-OpenIB"
] | null | null | null |
bumblebee/buzzes/api/serializers/buzz_serializers.py
|
sthasam2/bumblebee-backend
|
22057399f34cdc1edb0ef04e622c97df46532de3
|
[
"Linux-OpenIB"
] | null | null | null |
bumblebee/buzzes/api/serializers/buzz_serializers.py
|
sthasam2/bumblebee-backend
|
22057399f34cdc1edb0ef04e622c97df46532de3
|
[
"Linux-OpenIB"
] | null | null | null |
from rest_framework import serializers
from bumblebee.buzzes.api.serializers.interaction_serializers import (
BuzzInteractionsSerializer,
)
from bumblebee.buzzes.models import Buzz, BuzzImage
from bumblebee.core.exceptions import UnknownModelFieldsError
from .user_serializers import BuzzUserSerializer
######################################
## RETRIEVE
######################################
class BuzzImageSerializer(serializers.ModelSerializer):
""" """
image = serializers.ImageField(required=False, use_url=True)
class Meta:
model = BuzzImage
fields = ["image"]
class ListBuzzImageSerializer(serializers.ModelSerializer):
""" """
image = serializers.ImageField(required=False, use_url=True)
class Meta:
model = BuzzImage
fields = ["image"]
class BuzzDetailSerializer(serializers.ModelSerializer):
""" """
buzzid = serializers.IntegerField(source="id")
created_date = serializers.DateTimeField()
edited_date = serializers.DateTimeField()
edited = serializers.BooleanField()
privacy = serializers.ChoiceField(choices=Buzz.PrivacyChoices.choices)
content = serializers.CharField(help_text="Something in your mind? Post a buzz")
location = serializers.CharField()
flair = serializers.ListField(child=serializers.CharField())
author = BuzzUserSerializer(many=False)
images = ListBuzzImageSerializer(source="buzz_image", many=True, read_only=True)
interaction = BuzzInteractionsSerializer(source="buzz_interaction", read_only=True)
sentiment_value = serializers.FloatField()
textblob_value = serializers.FloatField()
class Meta:
model = Buzz
fields = [
"buzzid",
"created_date",
"edited_date",
"edited",
"privacy",
"content",
"location",
"flair",
"author",
"images",
"interaction",
"sentiment_value",
"textblob_value",
]
######################################
## CREATE
######################################
class CreateBuzzSerializer(serializers.ModelSerializer):
""" """
privacy = serializers.ChoiceField(
required=False, choices=Buzz.PrivacyChoices.choices
)
content = serializers.CharField(
required=False, help_text="Something in your mind? Post a buzz"
)
flair = serializers.ListField(child=serializers.CharField(), required=False)
location = serializers.CharField(required=False)
class Meta:
model = Buzz
fields = ["privacy", "content", "location", "flair"]
class EditBuzzSerializer(serializers.ModelSerializer):
""" """
privacy = serializers.ChoiceField(
required=False, choices=Buzz.PrivacyChoices.choices
)
content = serializers.CharField(
required=False, help_text="Something in your mind? Post a buzz"
)
location = serializers.CharField(required=False)
flair = serializers.ListField(child=serializers.CharField(), required=False)
class Meta:
model = Buzz
fields = ["privacy", "content", "location", "flair"]
def update_buzz(self, buzz_instance, **validated_data):
""" """
try:
for key, value in validated_data.items():
if buzz_instance.__dict__.__contains__(key):
if buzz_instance.__getattribute__(key) != value:
buzz_instance.__setattr__(key, value)
else:
raise UnknownModelFieldsError(
key,
f"'{buzz_instance.__class__.__name__}' object has no model field called {key}",
)
if buzz_instance.__getattribute__("edited") != True:
buzz_instance.__setattr__("edited", True)
buzz_instance.save()
except UnknownModelFieldsError as error:
print(error)
raise error
except Exception as error:
print("ERROR @update_buzz\n", error)
raise error
class BuzzListSerializer(serializers.Serializer):
""" """
buzzid = serializers.IntegerField(source="id")
author = BuzzUserSerializer()
created_date = serializers.DateTimeField()
edited_date = serializers.DateTimeField()
edited = serializers.BooleanField()
privacy = serializers.CharField()
content = serializers.CharField()
location = serializers.CharField()
flair = serializers.ListField()
images = ListBuzzImageSerializer(source="buzz_image", many=True, read_only=True)
interaction = BuzzInteractionsSerializer(source="buzz_interaction", read_only=True)
sentiment_value = serializers.FloatField()
textblob_value = serializers.FloatField()
class Meta:
""" """
model = Buzz
fields = [
"buzzid",
"author",
"created_date",
"edited_date",
"privacy",
"content",
"location",
"flair",
"images",
"interaction",
"sentiment_value",
"textblob_value",
]
# depth = 1
| 29.111111
| 103
| 0.608206
|
from rest_framework import serializers
from bumblebee.buzzes.api.serializers.interaction_serializers import (
BuzzInteractionsSerializer,
)
from bumblebee.buzzes.models import Buzz, BuzzImage
from bumblebee.core.exceptions import UnknownModelFieldsError
from .user_serializers import BuzzUserSerializer
class BuzzImageSerializer(serializers.ModelSerializer):
image = serializers.ImageField(required=False, use_url=True)
class Meta:
model = BuzzImage
fields = ["image"]
class ListBuzzImageSerializer(serializers.ModelSerializer):
image = serializers.ImageField(required=False, use_url=True)
class Meta:
model = BuzzImage
fields = ["image"]
class BuzzDetailSerializer(serializers.ModelSerializer):
buzzid = serializers.IntegerField(source="id")
created_date = serializers.DateTimeField()
edited_date = serializers.DateTimeField()
edited = serializers.BooleanField()
privacy = serializers.ChoiceField(choices=Buzz.PrivacyChoices.choices)
content = serializers.CharField(help_text="Something in your mind? Post a buzz")
location = serializers.CharField()
flair = serializers.ListField(child=serializers.CharField())
author = BuzzUserSerializer(many=False)
images = ListBuzzImageSerializer(source="buzz_image", many=True, read_only=True)
interaction = BuzzInteractionsSerializer(source="buzz_interaction", read_only=True)
sentiment_value = serializers.FloatField()
textblob_value = serializers.FloatField()
class Meta:
model = Buzz
fields = [
"buzzid",
"created_date",
"edited_date",
"edited",
"privacy",
"content",
"location",
"flair",
"author",
"images",
"interaction",
"sentiment_value",
"textblob_value",
]
class CreateBuzzSerializer(serializers.ModelSerializer):
privacy = serializers.ChoiceField(
required=False, choices=Buzz.PrivacyChoices.choices
)
content = serializers.CharField(
required=False, help_text="Something in your mind? Post a buzz"
)
flair = serializers.ListField(child=serializers.CharField(), required=False)
location = serializers.CharField(required=False)
class Meta:
model = Buzz
fields = ["privacy", "content", "location", "flair"]
class EditBuzzSerializer(serializers.ModelSerializer):
privacy = serializers.ChoiceField(
required=False, choices=Buzz.PrivacyChoices.choices
)
content = serializers.CharField(
required=False, help_text="Something in your mind? Post a buzz"
)
location = serializers.CharField(required=False)
flair = serializers.ListField(child=serializers.CharField(), required=False)
class Meta:
model = Buzz
fields = ["privacy", "content", "location", "flair"]
def update_buzz(self, buzz_instance, **validated_data):
try:
for key, value in validated_data.items():
if buzz_instance.__dict__.__contains__(key):
if buzz_instance.__getattribute__(key) != value:
buzz_instance.__setattr__(key, value)
else:
raise UnknownModelFieldsError(
key,
f"'{buzz_instance.__class__.__name__}' object has no model field called {key}",
)
if buzz_instance.__getattribute__("edited") != True:
buzz_instance.__setattr__("edited", True)
buzz_instance.save()
except UnknownModelFieldsError as error:
print(error)
raise error
except Exception as error:
print("ERROR @update_buzz\n", error)
raise error
class BuzzListSerializer(serializers.Serializer):
buzzid = serializers.IntegerField(source="id")
author = BuzzUserSerializer()
created_date = serializers.DateTimeField()
edited_date = serializers.DateTimeField()
edited = serializers.BooleanField()
privacy = serializers.CharField()
content = serializers.CharField()
location = serializers.CharField()
flair = serializers.ListField()
images = ListBuzzImageSerializer(source="buzz_image", many=True, read_only=True)
interaction = BuzzInteractionsSerializer(source="buzz_interaction", read_only=True)
sentiment_value = serializers.FloatField()
textblob_value = serializers.FloatField()
class Meta:
model = Buzz
fields = [
"buzzid",
"author",
"created_date",
"edited_date",
"privacy",
"content",
"location",
"flair",
"images",
"interaction",
"sentiment_value",
"textblob_value",
]
| true
| true
|
f709f9ddbd9a5f676f6351489bf105cf4c645a2a
| 2,382
|
py
|
Python
|
gazoo_device/tests/functional_tests/device_power_test_suite.py
|
isabella232/gazoo-device
|
0e1e276d72333e713b47152815708b9c74c45409
|
[
"Apache-2.0"
] | null | null | null |
gazoo_device/tests/functional_tests/device_power_test_suite.py
|
isabella232/gazoo-device
|
0e1e276d72333e713b47152815708b9c74c45409
|
[
"Apache-2.0"
] | 1
|
2021-06-24T19:20:50.000Z
|
2021-06-24T19:20:50.000Z
|
gazoo_device/tests/functional_tests/device_power_test_suite.py
|
isabella232/gazoo-device
|
0e1e276d72333e713b47152815708b9c74c45409
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test suite verifies device_power capability."""
from typing import Type
from gazoo_device.tests.functional_tests.utils import gdm_test_base
import retry
class DevicePowerTestSuite(gdm_test_base.GDMTestBase):
"""Test suite for the device_power capability."""
@classmethod
def is_applicable_to(cls, device_type: str,
device_class: Type[gdm_test_base.DeviceType],
device_name: str) -> bool:
"""Determine if this test suite can run on the given device."""
if not device_class.has_capabilities(["device_power"]):
return False
props = ["device_power.hub_name", "device_power.port_number"]
return cls.check_properties_set(device_name, props)
@classmethod
def requires_pairing(cls) -> bool:
"""Returns True if the device must be paired to run this test suite."""
return False
@retry.retry(tries=2, delay=30)
def test_device_power_on_off(self):
"""Verifies on() and off() methods work."""
original_mode = self.device.device_power.port_mode
try:
self.device.device_power.off()
self.assertEqual(
self.device.device_power.port_mode, "off",
f"{self.device.name} port {self.device.device_power.port_number} "
"should have been set to off")
self.device.device_power.on()
on_modes = ["on", "charge", "sync"]
self.assertIn(
self.device.device_power.port_mode, on_modes,
f"{self.device.name} port {self.device.device_power.port_number} "
f"should have been set to one of {on_modes}")
finally:
if original_mode == "off":
self.logger.info(
"Restoring device power back to its original mode 'off'")
self.device.device_power.off()
if __name__ == "__main__":
gdm_test_base.main()
| 36.090909
| 76
| 0.695634
|
from typing import Type
from gazoo_device.tests.functional_tests.utils import gdm_test_base
import retry
class DevicePowerTestSuite(gdm_test_base.GDMTestBase):
@classmethod
def is_applicable_to(cls, device_type: str,
device_class: Type[gdm_test_base.DeviceType],
device_name: str) -> bool:
if not device_class.has_capabilities(["device_power"]):
return False
props = ["device_power.hub_name", "device_power.port_number"]
return cls.check_properties_set(device_name, props)
@classmethod
def requires_pairing(cls) -> bool:
return False
@retry.retry(tries=2, delay=30)
def test_device_power_on_off(self):
original_mode = self.device.device_power.port_mode
try:
self.device.device_power.off()
self.assertEqual(
self.device.device_power.port_mode, "off",
f"{self.device.name} port {self.device.device_power.port_number} "
"should have been set to off")
self.device.device_power.on()
on_modes = ["on", "charge", "sync"]
self.assertIn(
self.device.device_power.port_mode, on_modes,
f"{self.device.name} port {self.device.device_power.port_number} "
f"should have been set to one of {on_modes}")
finally:
if original_mode == "off":
self.logger.info(
"Restoring device power back to its original mode 'off'")
self.device.device_power.off()
if __name__ == "__main__":
gdm_test_base.main()
| true
| true
|
f709fa3e67ee8095268931a28d7f66ac738f26b7
| 1,023
|
py
|
Python
|
books/PRML/PRML-master-Python/prml/nn/optimizer/ada_delta.py
|
iamfaith/DeepLearning
|
80ce429d0e9e448cf84e7d51129ef4e0077048a2
|
[
"Apache-2.0"
] | 7,581
|
2018-04-26T04:29:30.000Z
|
2022-03-31T15:35:39.000Z
|
books/PRML/PRML-master-Python/prml/nn/optimizer/ada_delta.py
|
lizhenchen2019/DeepLearning
|
467c73e2d0435f0a05255e5b5e00454260d01f27
|
[
"Apache-2.0"
] | 8
|
2019-05-22T02:27:35.000Z
|
2022-03-03T03:53:05.000Z
|
books/PRML/PRML-master-Python/prml/nn/optimizer/ada_delta.py
|
lizhenchen2019/DeepLearning
|
467c73e2d0435f0a05255e5b5e00454260d01f27
|
[
"Apache-2.0"
] | 2,340
|
2018-04-26T04:28:11.000Z
|
2022-03-31T02:28:25.000Z
|
import numpy as np
from prml.nn.optimizer.optimizer import Optimizer
class AdaDelta(Optimizer):
"""
AdaDelta optimizer
"""
def __init__(self, parameter, rho=0.95, epsilon=1e-8):
super().__init__(parameter, None)
self.rho = rho
self.epsilon = epsilon
self.mean_squared_deriv = []
self.mean_squared_update = []
for p in self.parameter:
self.mean_squared_deriv.append(np.zeros(p.shape))
self.mean_squared_update.append(np.zeros(p.shape))
def update(self):
self.increment_iteration()
for p, msd, msu in zip(self.parameter, self.mean_squared_deriv, self.mean_squared_update):
if p.grad is None:
continue
grad = p.grad
msd *= self.rho
msd += (1 - self.rho) * grad ** 2
delta = np.sqrt((msu + self.epsilon) / (msd + self.epsilon)) * grad
msu *= self.rho
msu *= (1 - self.rho) * delta ** 2
p.value += delta
| 31.96875
| 98
| 0.57087
|
import numpy as np
from prml.nn.optimizer.optimizer import Optimizer
class AdaDelta(Optimizer):
def __init__(self, parameter, rho=0.95, epsilon=1e-8):
super().__init__(parameter, None)
self.rho = rho
self.epsilon = epsilon
self.mean_squared_deriv = []
self.mean_squared_update = []
for p in self.parameter:
self.mean_squared_deriv.append(np.zeros(p.shape))
self.mean_squared_update.append(np.zeros(p.shape))
def update(self):
self.increment_iteration()
for p, msd, msu in zip(self.parameter, self.mean_squared_deriv, self.mean_squared_update):
if p.grad is None:
continue
grad = p.grad
msd *= self.rho
msd += (1 - self.rho) * grad ** 2
delta = np.sqrt((msu + self.epsilon) / (msd + self.epsilon)) * grad
msu *= self.rho
msu *= (1 - self.rho) * delta ** 2
p.value += delta
| true
| true
|
f709fba769a5a08997b1bd7938d44d6037a9f1df
| 8,111
|
py
|
Python
|
cloudmersive_barcode_api_client/configuration.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Barcode
|
e584de80304ebddbcce99ee6ff42196d46486421
|
[
"Apache-2.0"
] | 1
|
2018-06-24T04:50:28.000Z
|
2018-06-24T04:50:28.000Z
|
cloudmersive_barcode_api_client/configuration.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Barcode
|
e584de80304ebddbcce99ee6ff42196d46486421
|
[
"Apache-2.0"
] | 1
|
2019-02-25T18:23:23.000Z
|
2019-02-25T18:23:23.000Z
|
cloudmersive_barcode_api_client/configuration.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Barcode
|
e584de80304ebddbcce99ee6ff42196d46486421
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
barcodeapi
Barcode APIs let you generate barcode images, and recognize values from images of barcodes. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
_default = None
def __init__(self):
"""Constructor"""
if self._default:
for key in self._default.__dict__.keys():
self.__dict__[key] = copy.copy(self._default.__dict__[key])
return
# Default Base url
self.host = "https://api.cloudmersive.com"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# function to refresh API key if expired
self.refresh_api_key_hook = None
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("cloudmersive_barcode_api_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@classmethod
def set_default(cls, default):
cls._default = default
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'Apikey':
{
'type': 'api_key',
'in': 'header',
'key': 'Apikey',
'value': self.get_api_key_with_prefix('Apikey')
},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: v1\n"\
"SDK Package Version: 3.0.2".\
format(env=sys.platform, pyversion=sys.version)
| 32.059289
| 109
| 0.604981
|
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
_default = None
def __init__(self):
if self._default:
for key in self._default.__dict__.keys():
self.__dict__[key] = copy.copy(self._default.__dict__[key])
return
self.host = "https://api.cloudmersive.com"
self.temp_folder_path = None
self.api_key = {}
self.api_key_prefix = {}
self.refresh_api_key_hook = None
self.username = ""
self.password = ""
self.logger = {}
self.logger["package_logger"] = logging.getLogger("cloudmersive_barcode_api_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
self.logger_stream_handler = None
self.logger_file_handler = None
self.logger_file = None
self.debug = False
self.verify_ssl = True
self.ssl_ca_cert = None
self.cert_file = None
self.key_file = None
self.assert_hostname = None
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@classmethod
def set_default(cls, default):
cls._default = default
@property
def logger_file(self):
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
return self.__debug
@debug.setter
def debug(self, value):
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
return {
'Apikey':
{
'type': 'api_key',
'in': 'header',
'key': 'Apikey',
'value': self.get_api_key_with_prefix('Apikey')
},
}
def to_debug_report(self):
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: v1\n"\
"SDK Package Version: 3.0.2".\
format(env=sys.platform, pyversion=sys.version)
| true
| true
|
f709fd08e353fb40d7b89a80816394ef307e0dcb
| 2,633
|
py
|
Python
|
bilby_pipe/job_creation/nodes/generation_node.py
|
Samanwaya1301/tidal-heating-bilby-pipe
|
b495d4f3ffe3ef61a46ce5b87c826e10b087e2e1
|
[
"MIT"
] | 4
|
2019-02-28T00:54:28.000Z
|
2021-08-24T14:52:16.000Z
|
bilby_pipe/job_creation/nodes/generation_node.py
|
Samanwaya1301/tidal-heating-bilby-pipe
|
b495d4f3ffe3ef61a46ce5b87c826e10b087e2e1
|
[
"MIT"
] | 2
|
2020-10-05T02:11:03.000Z
|
2021-02-19T06:31:42.000Z
|
bilby_pipe/job_creation/nodes/generation_node.py
|
Samanwaya1301/tidal-heating-bilby-pipe
|
b495d4f3ffe3ef61a46ce5b87c826e10b087e2e1
|
[
"MIT"
] | 3
|
2020-08-26T09:45:54.000Z
|
2021-07-07T09:24:52.000Z
|
from ...utils import DataDump, logger
from ..node import Node
class GenerationNode(Node):
def __init__(self, inputs, trigger_time, idx, dag, parent=None):
"""
Node for data generation jobs
Parameters:
-----------
inputs: bilby_pipe.main.MainInput
The user-defined inputs
trigger_time: float
The trigger time to use in generating analysis data
idx: int
The index of the data-generation job, used to label data products
dag: bilby_pipe.dag.Dag
The dag structure
parent: bilby_pipe.job_creation.node.Node (optional)
Any job to set as the parent to this job - used to enforce
dependencies
"""
super().__init__(inputs)
self.inputs = inputs
self.trigger_time = trigger_time
self.idx = idx
self.dag = dag
self.request_cpus = 1
self.setup_arguments()
self.arguments.add("label", self.label)
self.arguments.add("idx", self.idx)
self.arguments.add("trigger-time", self.trigger_time)
if self.inputs.injection_file is not None:
self.arguments.add("injection-file", self.inputs.injection_file)
if self.inputs.timeslide_file is not None:
self.arguments.add("timeslide-file", self.inputs.timeslide_file)
self.process_node()
if parent:
self.job.add_parent(parent.job)
@property
def executable(self):
return self._get_executable_path("bilby_pipe_generation")
@property
def request_memory(self):
return self.inputs.request_memory_generation
@property
def log_directory(self):
return self.inputs.data_generation_log_directory
@property
def universe(self):
if self.inputs.local_generation:
logger.debug(
"Data generation done locally: please do not use this when "
"submitting a large number of jobs"
)
universe = "local"
else:
logger.debug(f"All data will be grabbed in the {self._universe} universe")
universe = self._universe
return universe
@property
def job_name(self):
job_name = "{}_data{}_{}_generation".format(
self.inputs.label, str(self.idx), self.trigger_time
)
job_name = job_name.replace(".", "-")
return job_name
@property
def label(self):
return self.job_name
@property
def data_dump_file(self):
return DataDump.get_filename(self.inputs.data_directory, self.label)
| 31.345238
| 86
| 0.616787
|
from ...utils import DataDump, logger
from ..node import Node
class GenerationNode(Node):
def __init__(self, inputs, trigger_time, idx, dag, parent=None):
super().__init__(inputs)
self.inputs = inputs
self.trigger_time = trigger_time
self.idx = idx
self.dag = dag
self.request_cpus = 1
self.setup_arguments()
self.arguments.add("label", self.label)
self.arguments.add("idx", self.idx)
self.arguments.add("trigger-time", self.trigger_time)
if self.inputs.injection_file is not None:
self.arguments.add("injection-file", self.inputs.injection_file)
if self.inputs.timeslide_file is not None:
self.arguments.add("timeslide-file", self.inputs.timeslide_file)
self.process_node()
if parent:
self.job.add_parent(parent.job)
@property
def executable(self):
return self._get_executable_path("bilby_pipe_generation")
@property
def request_memory(self):
return self.inputs.request_memory_generation
@property
def log_directory(self):
return self.inputs.data_generation_log_directory
@property
def universe(self):
if self.inputs.local_generation:
logger.debug(
"Data generation done locally: please do not use this when "
"submitting a large number of jobs"
)
universe = "local"
else:
logger.debug(f"All data will be grabbed in the {self._universe} universe")
universe = self._universe
return universe
@property
def job_name(self):
job_name = "{}_data{}_{}_generation".format(
self.inputs.label, str(self.idx), self.trigger_time
)
job_name = job_name.replace(".", "-")
return job_name
@property
def label(self):
return self.job_name
@property
def data_dump_file(self):
return DataDump.get_filename(self.inputs.data_directory, self.label)
| true
| true
|
f709fdad8e04c0a5e71d2d9204dc5fa6943d9a3d
| 536
|
py
|
Python
|
append_to_array_with_comp.py
|
tsuyukimakoto/chore_python_sequence_performance
|
d984675f7af4d47b256b341f123b07f8827d95ed
|
[
"MIT"
] | null | null | null |
append_to_array_with_comp.py
|
tsuyukimakoto/chore_python_sequence_performance
|
d984675f7af4d47b256b341f123b07f8827d95ed
|
[
"MIT"
] | null | null | null |
append_to_array_with_comp.py
|
tsuyukimakoto/chore_python_sequence_performance
|
d984675f7af4d47b256b341f123b07f8827d95ed
|
[
"MIT"
] | null | null | null |
from array import array
from random import randint
import sys
@profile
def create_data():
return array('i', [randint(1, 10000000) for i in range(100000)])
def proc():
cnt = 0
data = create_data()
for i in range(100000):
if randint(1, 10000000) in data:
cnt += 1
if __name__ == '__main__':
print(sys.argv[0])
# print(sys.version_info)
# import timeit
# print(timeit.timeit("proc()", setup="from __main__ import proc", number=3))
# [proc() for i in range(3)]
create_data()
| 21.44
| 81
| 0.623134
|
from array import array
from random import randint
import sys
@profile
def create_data():
return array('i', [randint(1, 10000000) for i in range(100000)])
def proc():
cnt = 0
data = create_data()
for i in range(100000):
if randint(1, 10000000) in data:
cnt += 1
if __name__ == '__main__':
print(sys.argv[0])
create_data()
| true
| true
|
f709fe65c70fa2031de7bd284fbe943cd20a10e6
| 5,293
|
py
|
Python
|
applied_python/applied_python/lib/python2.7/site-packages/pylint/test/unittest_pyreverse_inspector.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/pylint/test/unittest_pyreverse_inspector.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | 1
|
2016-03-22T13:36:30.000Z
|
2016-03-22T13:36:30.000Z
|
applied_python/applied_python/lib/python2.7/site-packages/pylint/test/unittest_pyreverse_inspector.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2003-2015 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
for the visitors.diadefs module
"""
import os
import unittest
import astroid
from astroid import nodes
from astroid import bases
from astroid import manager
from astroid import test_utils
from pylint.pyreverse import inspector
from unittest_pyreverse_writer import get_project
MANAGER = manager.AstroidManager()
def astroid_wrapper(func, modname):
return func(modname)
class LinkerTest(unittest.TestCase):
def setUp(self):
super(LinkerTest, self).setUp()
self.project = get_project('data', 'data')
self.linker = inspector.Linker(self.project)
self.linker.visit(self.project)
def test_class_implements(self):
klass = self.project.get_module('data.clientmodule_test')['Ancestor']
self.assertTrue(hasattr(klass, 'implements'))
self.assertEqual(len(klass.implements), 1)
self.assertTrue(isinstance(klass.implements[0], nodes.ClassDef))
self.assertEqual(klass.implements[0].name, "Interface")
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'implements'))
self.assertEqual(len(klass.implements), 0)
def test_locals_assignment_resolution(self):
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'locals_type'))
type_dict = klass.locals_type
self.assertEqual(len(type_dict), 2)
keys = sorted(type_dict.keys())
self.assertEqual(keys, ['TYPE', 'top'])
self.assertEqual(len(type_dict['TYPE']), 1)
self.assertEqual(type_dict['TYPE'][0].value, 'final class')
self.assertEqual(len(type_dict['top']), 1)
self.assertEqual(type_dict['top'][0].value, 'class')
def test_instance_attrs_resolution(self):
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'instance_attrs_type'))
type_dict = klass.instance_attrs_type
self.assertEqual(len(type_dict), 2)
keys = sorted(type_dict.keys())
self.assertEqual(keys, ['_id', 'relation'])
self.assertTrue(isinstance(type_dict['relation'][0], bases.Instance),
type_dict['relation'])
self.assertEqual(type_dict['relation'][0].name, 'DoNothing')
self.assertIs(type_dict['_id'][0], astroid.YES)
def test_concat_interfaces(self):
cls = test_utils.extract_node('''
class IMachin: pass
class Correct2:
"""docstring"""
__implements__ = (IMachin,)
class BadArgument:
"""docstring"""
__implements__ = (IMachin,)
class InterfaceCanNowBeFound: #@
"""docstring"""
__implements__ = BadArgument.__implements__ + Correct2.__implements__
''')
interfaces = inspector.interfaces(cls)
self.assertEqual([i.name for i in interfaces], ['IMachin'])
def test_interfaces(self):
module = astroid.parse('''
class Interface(object): pass
class MyIFace(Interface): pass
class AnotherIFace(Interface): pass
class Concrete0(object):
__implements__ = MyIFace
class Concrete1:
__implements__ = (MyIFace, AnotherIFace)
class Concrete2:
__implements__ = (MyIFace, AnotherIFace)
class Concrete23(Concrete1): pass
''')
for klass, interfaces in (('Concrete0', ['MyIFace']),
('Concrete1', ['MyIFace', 'AnotherIFace']),
('Concrete2', ['MyIFace', 'AnotherIFace']),
('Concrete23', ['MyIFace', 'AnotherIFace'])):
klass = module[klass]
self.assertEqual([i.name for i in inspector.interfaces(klass)],
interfaces)
def test_from_directory(self):
expected = os.path.join('pylint', 'test', 'data', '__init__.py')
self.assertEqual(self.project.name, 'data')
self.assertTrue(self.project.path.endswith(expected), self.project.path)
def test_project_node(self):
expected = [
'data', 'data.clientmodule_test',
'data.suppliermodule_test',
]
self.assertListEqual(sorted(self.project.keys()), expected)
if __name__ == '__main__':
unittest.main()
| 38.635036
| 87
| 0.643869
|
import os
import unittest
import astroid
from astroid import nodes
from astroid import bases
from astroid import manager
from astroid import test_utils
from pylint.pyreverse import inspector
from unittest_pyreverse_writer import get_project
MANAGER = manager.AstroidManager()
def astroid_wrapper(func, modname):
return func(modname)
class LinkerTest(unittest.TestCase):
def setUp(self):
super(LinkerTest, self).setUp()
self.project = get_project('data', 'data')
self.linker = inspector.Linker(self.project)
self.linker.visit(self.project)
def test_class_implements(self):
klass = self.project.get_module('data.clientmodule_test')['Ancestor']
self.assertTrue(hasattr(klass, 'implements'))
self.assertEqual(len(klass.implements), 1)
self.assertTrue(isinstance(klass.implements[0], nodes.ClassDef))
self.assertEqual(klass.implements[0].name, "Interface")
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'implements'))
self.assertEqual(len(klass.implements), 0)
def test_locals_assignment_resolution(self):
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'locals_type'))
type_dict = klass.locals_type
self.assertEqual(len(type_dict), 2)
keys = sorted(type_dict.keys())
self.assertEqual(keys, ['TYPE', 'top'])
self.assertEqual(len(type_dict['TYPE']), 1)
self.assertEqual(type_dict['TYPE'][0].value, 'final class')
self.assertEqual(len(type_dict['top']), 1)
self.assertEqual(type_dict['top'][0].value, 'class')
def test_instance_attrs_resolution(self):
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'instance_attrs_type'))
type_dict = klass.instance_attrs_type
self.assertEqual(len(type_dict), 2)
keys = sorted(type_dict.keys())
self.assertEqual(keys, ['_id', 'relation'])
self.assertTrue(isinstance(type_dict['relation'][0], bases.Instance),
type_dict['relation'])
self.assertEqual(type_dict['relation'][0].name, 'DoNothing')
self.assertIs(type_dict['_id'][0], astroid.YES)
def test_concat_interfaces(self):
cls = test_utils.extract_node('''
class IMachin: pass
class Correct2:
"""docstring"""
__implements__ = (IMachin,)
class BadArgument:
"""docstring"""
__implements__ = (IMachin,)
class InterfaceCanNowBeFound: #@
"""docstring"""
__implements__ = BadArgument.__implements__ + Correct2.__implements__
''')
interfaces = inspector.interfaces(cls)
self.assertEqual([i.name for i in interfaces], ['IMachin'])
def test_interfaces(self):
module = astroid.parse('''
class Interface(object): pass
class MyIFace(Interface): pass
class AnotherIFace(Interface): pass
class Concrete0(object):
__implements__ = MyIFace
class Concrete1:
__implements__ = (MyIFace, AnotherIFace)
class Concrete2:
__implements__ = (MyIFace, AnotherIFace)
class Concrete23(Concrete1): pass
''')
for klass, interfaces in (('Concrete0', ['MyIFace']),
('Concrete1', ['MyIFace', 'AnotherIFace']),
('Concrete2', ['MyIFace', 'AnotherIFace']),
('Concrete23', ['MyIFace', 'AnotherIFace'])):
klass = module[klass]
self.assertEqual([i.name for i in inspector.interfaces(klass)],
interfaces)
def test_from_directory(self):
expected = os.path.join('pylint', 'test', 'data', '__init__.py')
self.assertEqual(self.project.name, 'data')
self.assertTrue(self.project.path.endswith(expected), self.project.path)
def test_project_node(self):
expected = [
'data', 'data.clientmodule_test',
'data.suppliermodule_test',
]
self.assertListEqual(sorted(self.project.keys()), expected)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f709ff1bce098437679e5a803075ed18ece003f9
| 31,264
|
py
|
Python
|
Source/Python/UPT/Library/String.py
|
Mokwang/basetools
|
c190b1ccb9f388bec7d19a7cc281aeabb94b8d75
|
[
"BSD-2-Clause"
] | 8
|
2015-05-04T22:34:40.000Z
|
2021-07-07T06:10:40.000Z
|
Source/Python/UPT/Library/String.py
|
Mokwang/basetools
|
c190b1ccb9f388bec7d19a7cc281aeabb94b8d75
|
[
"BSD-2-Clause"
] | null | null | null |
Source/Python/UPT/Library/String.py
|
Mokwang/basetools
|
c190b1ccb9f388bec7d19a7cc281aeabb94b8d75
|
[
"BSD-2-Clause"
] | 8
|
2015-06-29T13:37:05.000Z
|
2021-03-22T16:05:16.000Z
|
## @file
# This file is used to define common string related functions used in parsing
# process
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
String
'''
##
# Import Modules
#
import re
import os.path
from string import strip
import Logger.Log as Logger
import Library.DataType as DataType
from Logger.ToolError import FORMAT_INVALID
from Logger.ToolError import PARSER_ERROR
from Logger import StringTable as ST
#
# Regular expression for matching macro used in DSC/DEC/INF file inclusion
#
gMACRO_PATTERN = re.compile("\$\(([_A-Z][_A-Z0-9]*)\)", re.UNICODE)
## GetSplitValueList
#
# Get a value list from a string with multiple values splited with SplitTag
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
#
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit=-1):
return map(lambda l: l.strip(), String.split(SplitTag, MaxSplit))
## MergeArches
#
# Find a key's all arches in dict, add the new arch to the list
# If not exist any arch, set the arch directly
#
# @param Dict: The input value for Dict
# @param Key: The input value for Key
# @param Arch: The Arch to be added or merged
#
def MergeArches(Dict, Key, Arch):
if Key in Dict.keys():
Dict[Key].append(Arch)
else:
Dict[Key] = Arch.split()
## GenDefines
#
# Parse a string with format "DEFINE <VarName> = <PATH>"
# Generate a map Defines[VarName] = PATH
# Return False if invalid format
#
# @param String: String with DEFINE statement
# @param Arch: Supportted Arch
# @param Defines: DEFINE statement to be parsed
#
def GenDefines(String, Arch, Defines):
if String.find(DataType.TAB_DEFINE + ' ') > -1:
List = String.replace(DataType.TAB_DEFINE + ' ', '').\
split(DataType.TAB_EQUAL_SPLIT)
if len(List) == 2:
Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
return 0
else:
return -1
return 1
## GetLibraryClassesWithModuleType
#
# Get Library Class definition when no module type defined
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
NewKey = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), NewKey[1]])
return True
## GetDynamics
#
# Get Dynamic Pcds
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
#
# Get SkuId Name List
#
SkuIdNameList = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
return True
## SplitModuleType
#
# Split ModuleType out of section defien to get key
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [
# 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
#
# @param Key: String to be parsed
#
def SplitModuleType(Key):
KeyList = Key.split(DataType.TAB_SPLIT)
#
# Fill in for arch
#
KeyList.append('')
#
# Fill in for moduletype
#
KeyList.append('')
ReturnValue = []
KeyValue = KeyList[0]
if KeyList[1] != '':
KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
ReturnValue.append(KeyValue)
ReturnValue.append(GetSplitValueList(KeyList[2]))
return ReturnValue
## Replace macro in string
#
# This method replace macros used in given string. The macros are given in a
# dictionary.
#
# @param String String to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
# @param Line: The content contain line string and line number
# @param FileName: The meta-file file name
#
def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line = None, FileName = None, Flag = False):
LastString = String
if MacroDefinitions == None:
MacroDefinitions = {}
while MacroDefinitions:
QuotedStringList = []
HaveQuotedMacroFlag = False
if not Flag:
MacroUsed = gMACRO_PATTERN.findall(String)
else:
ReQuotedString = re.compile('\"')
QuotedStringList = ReQuotedString.split(String)
if len(QuotedStringList) >= 3:
HaveQuotedMacroFlag = True
Count = 0
MacroString = ""
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
MacroString += QuotedStringItem
if Count == len(QuotedStringList) and Count%2 == 0:
MacroString += QuotedStringItem
MacroUsed = gMACRO_PATTERN.findall(MacroString)
#
# no macro found in String, stop replacing
#
if len(MacroUsed) == 0:
break
for Macro in MacroUsed:
if Macro not in MacroDefinitions:
if SelfReplacement:
String = String.replace("$(%s)" % Macro, '')
Logger.Debug(5, "Delete undefined MACROs in file %s line %d: %s!" %(FileName, Line[1], Line[0]))
continue
if not HaveQuotedMacroFlag:
String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
else:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
elif Count == len(QuotedStringList) and Count%2 == 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
RetString = ''
if HaveQuotedMacroFlag:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count != len(QuotedStringList):
RetString += QuotedStringList[Count-1] + "\""
else:
RetString += QuotedStringList[Count-1]
String = RetString
#
# in case there's macro not defined
#
if String == LastString:
break
LastString = String
return String
## NormPath
#
# Create a normal path
# And replace DFEINE in the path
#
# @param Path: The input value for Path to be converted
# @param Defines: A set for DEFINE statement
#
def NormPath(Path, Defines = None):
IsRelativePath = False
if Defines == None:
Defines = {}
if Path:
if Path[0] == '.':
IsRelativePath = True
#
# Replace with Define
#
if Defines:
Path = ReplaceMacro(Path, Defines)
#
# To local path format
#
Path = os.path.normpath(Path)
if IsRelativePath and Path[0] != '.':
Path = os.path.join('.', Path)
return Path
## CleanString
#
# Remove comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# remove comments, but we should escape comment character in string
#
InString = False
for Index in range(0, len(Line)):
if Line[Index] == '"':
InString = not InString
elif Line[Index] == CommentCharacter and not InString:
Line = Line[0: Index]
break
#
# remove whitespace again
#
Line = Line.strip()
return Line
## CleanString2
#
# Split comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# separate comments and statements
#
LineParts = Line.split(CommentCharacter, 1)
#
# remove whitespace again
#
Line = LineParts[0].strip()
if len(LineParts) > 1:
Comment = LineParts[1].strip()
#
# Remove prefixed and trailing comment characters
#
Start = 0
End = len(Comment)
while Start < End and Comment.startswith(CommentCharacter, Start, End):
Start += 1
while End >= 0 and Comment.endswith(CommentCharacter, Start, End):
End -= 1
Comment = Comment[Start:End]
Comment = Comment.strip()
else:
Comment = ''
return Line, Comment
## GetMultipleValuesOfKeyFromLines
#
# Parse multiple strings to clean comment and spaces
# The result is saved to KeyValues
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
if Key:
pass
if KeyValues:
pass
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues += [Line]
return True
## GetDefineValue
#
# Parse a DEFINE statement to get defined value
# DEFINE Key Value
#
# @param String: The content to be parsed
# @param Key: The key of DEFINE statement
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDefineValue(String, Key, CommentCharacter):
if CommentCharacter:
pass
String = CleanString(String)
return String[String.find(Key + ' ') + len(Key + ' ') : ]
## GetSingleValueOfKeyFromLines
#
# Parse multiple strings as below to get value of each definition line
# Key1 = Value1
# Key2 = Value2
# The result is saved to Dictionary
#
# @param Lines: The content to be parsed
# @param Dictionary: To store data after parsing
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value.
# Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has
# multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple
# values. Key1 = Value1|Value2, '|' is the value
# split char
#
def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, \
ValueSplitFlag, ValueSplitCharacter):
Lines = Lines.split('\n')
Keys = []
Value = ''
DefineValues = ['']
SpecValues = ['']
for Line in Lines:
#
# Handle DEFINE and SPEC
#
if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
if '' in DefineValues:
DefineValues.remove('')
DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
continue
if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
if '' in SpecValues:
SpecValues.remove('')
SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
continue
#
# Handle Others
#
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter:
#
# Remove comments and white spaces
#
LineList[1] = CleanString(LineList[1], CommentCharacter)
if ValueSplitFlag:
Value = map(strip, LineList[1].split(ValueSplitCharacter))
else:
Value = CleanString(LineList[1], CommentCharacter).splitlines()
if Key[0] in Dictionary:
if Key[0] not in Keys:
Dictionary[Key[0]] = Value
Keys.append(Key[0])
else:
Dictionary[Key[0]].extend(Value)
else:
Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
if DefineValues == []:
DefineValues = ['']
if SpecValues == []:
SpecValues = ['']
Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
return True
## The content to be parsed
#
# Do pre-check for a file before it is parsed
# Check $()
# Check []
#
# @param FileName: Used for error report
# @param FileContent: File content to be parsed
# @param SupSectionTag: Used for error report
#
def PreCheck(FileName, FileContent, SupSectionTag):
if SupSectionTag:
pass
LineNo = 0
IsFailed = False
NewFileContent = ''
for Line in FileContent.splitlines():
LineNo = LineNo + 1
#
# Clean current line
#
Line = CleanString(Line)
#
# Remove commented line
#
if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
Line = ''
#
# Check $()
#
if Line.find('$') > -1:
if Line.find('$(') < 0 or Line.find(')') < 0:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
#
# Check []
#
if Line.find('[') > -1 or Line.find(']') > -1:
#
# Only get one '[' or one ']'
#
if not (Line.find('[') > -1 and Line.find(']') > -1):
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
#
# Regenerate FileContent
#
NewFileContent = NewFileContent + Line + '\r\n'
if IsFailed:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
return NewFileContent
## CheckFileType
#
# Check if the Filename is including ExtName
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param ExtName: Ext name of the file to be checked
# @param ContainerFilename: The container file which describes the file to be
# checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file
# to be checked
#
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo=-1):
if CheckFilename != '' and CheckFilename != None:
(Root, Ext) = os.path.splitext(CheckFilename)
if Ext.upper() != ExtName.upper() and Root:
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_SECTIONNAME_INVALID % (SectionName, CheckFilename, ExtName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo, \
File=ContainerFilename, RaiseError=Logger.IS_RAISE_ERROR)
return True
## CheckFileExist
#
# Check if the file exists
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param WorkspaceDir: Current workspace dir
# @param ContainerFilename: The container file which describes the file to
# be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the
# file to be checked
#
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo=-1):
CheckFile = ''
if CheckFilename != '' and CheckFilename != None:
CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
if not os.path.isfile(CheckFile):
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_CHECKFILE_NOTFOUND % (CheckFile, SectionName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg,
File=ContainerFilename, Line = LineNo, RaiseError=Logger.IS_RAISE_ERROR)
return CheckFile
## GetLineNo
#
# Find the index of a line in a file
#
# @param FileContent: Search scope
# @param Line: Search key
#
def GetLineNo(FileContent, Line, IsIgnoreComment=True):
LineList = FileContent.splitlines()
for Index in range(len(LineList)):
if LineList[Index].find(Line) > -1:
#
# Ignore statement in comment
#
if IsIgnoreComment:
if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
continue
return Index + 1
return -1
## RaiseParserError
#
# Raise a parser error
#
# @param Line: String which has error
# @param Section: Used for error report
# @param File: File which has the string
# @param Format: Correct format
#
def RaiseParserError(Line, Section, File, Format='', LineNo=-1):
if LineNo == -1:
LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
ErrorMsg = ST.ERR_INVALID_NOTFOUND % (Line, Section)
if Format != '':
Format = "Correct format is " + Format
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, \
ExtraData=Format, RaiseError=Logger.IS_RAISE_ERROR)
## WorkspaceFile
#
# Return a full path with workspace dir
#
# @param WorkspaceDir: Workspace dir
# @param Filename: Relative file name
#
def WorkspaceFile(WorkspaceDir, Filename):
return os.path.join(NormPath(WorkspaceDir), NormPath(Filename))
## Split string
#
# Revmove '"' which startswith and endswith string
#
# @param String: The string need to be splited
#
def SplitString(String):
if String.startswith('\"'):
String = String[1:]
if String.endswith('\"'):
String = String[:-1]
return String
## Convert To Sql String
#
# Replace "'" with "''" in each item of StringList
#
# @param StringList: A list for strings to be converted
#
def ConvertToSqlString(StringList):
return map(lambda s: s.replace("'", "''") , StringList)
## Convert To Sql String
#
# Replace "'" with "''" in the String
#
# @param String: A String to be converted
#
def ConvertToSqlString2(String):
return String.replace("'", "''")
## GetStringOfList
#
# Get String of a List
#
# @param Lines: string list
# @param Split: split character
#
def GetStringOfList(List, Split = ' '):
if type(List) != type([]):
return List
Str = ''
for Item in List:
Str = Str + Item + Split
return Str.strip()
## Get HelpTextList
#
# Get HelpTextList from HelpTextClassList
#
# @param HelpTextClassList: Help Text Class List
#
def GetHelpTextList(HelpTextClassList):
List = []
if HelpTextClassList:
for HelpText in HelpTextClassList:
if HelpText.String.endswith('\n'):
HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
List.extend(HelpText.String.split('\n'))
return List
## Get String Array Length
#
# Get String Array Length
#
# @param String: the source string
#
def StringArrayLength(String):
if isinstance(String, unicode):
return (len(String) + 1) * 2 + 1
elif String.startswith('L"'):
return (len(String) - 3 + 1) * 2
elif String.startswith('"'):
return (len(String) - 2 + 1)
else:
return len(String.split()) + 1
## RemoveDupOption
#
# Remove Dup Option
#
# @param OptionString: the option string
# @param Which: Which flag
# @param Against: Against flag
#
def RemoveDupOption(OptionString, Which="/I", Against=None):
OptionList = OptionString.split()
ValueList = []
if Against:
ValueList += Against
for Index in range(len(OptionList)):
Opt = OptionList[Index]
if not Opt.startswith(Which):
continue
if len(Opt) > len(Which):
Val = Opt[len(Which):]
else:
Val = ""
if Val in ValueList:
OptionList[Index] = ""
else:
ValueList.append(Val)
return " ".join(OptionList)
## Check if the string is HexDgit
#
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# @param string: input string
#
def IsHexDigit(Str):
try:
int(Str, 10)
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
int(Str, 16)
return True
except ValueError:
return False
return False
## Check if the string is HexDgit and its integer value within limit of UINT32
#
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# @param string: input string
#
def IsHexDigitUINT32(Str):
try:
Value = int(Str, 10)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
Value = int(Str, 16)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
return False
return False
## CleanSpecialChar
#
# The ASCII text files of type INF, DEC, INI are edited by developers,
# and may contain characters that cannot be directly translated to strings that
# are conformant with the UDP XML Schema. Any characters in this category
# (0x00-0x08, TAB [0x09], 0x0B, 0x0C, 0x0E-0x1F, 0x80-0xFF)
# must be converted to a space character[0x20] as part of the parsing process.
#
def ConvertSpecialChar(Lines):
RetLines = []
for line in Lines:
ReMatchSpecialChar = re.compile(r"[\x00-\x08]|\x09|\x0b|\x0c|[\x0e-\x1f]|[\x7f-\xff]")
RetLines.append(ReMatchSpecialChar.sub(' ', line))
return RetLines
## __GetTokenList
#
# Assume Str is a valid feature flag expression.
# Return a list which contains tokens: alpha numeric token and other token
# Whitespace are not stripped
#
def __GetTokenList(Str):
InQuote = False
Token = ''
TokenOP = ''
PreChar = ''
List = []
for Char in Str:
if InQuote:
Token += Char
if Char == '"' and PreChar != '\\':
InQuote = not InQuote
List.append(Token)
Token = ''
continue
if Char == '"':
if Token and Token != 'L':
List.append(Token)
Token = ''
if TokenOP:
List.append(TokenOP)
TokenOP = ''
InQuote = not InQuote
Token += Char
continue
if not (Char.isalnum() or Char in '_'):
TokenOP += Char
if Token:
List.append(Token)
Token = ''
else:
Token += Char
if TokenOP:
List.append(TokenOP)
TokenOP = ''
if PreChar == '\\' and Char == '\\':
PreChar = ''
else:
PreChar = Char
if Token:
List.append(Token)
if TokenOP:
List.append(TokenOP)
return List
## ConvertNEToNOTEQ
#
# Convert NE operator to NOT EQ
# For example: 1 NE 2 -> 1 NOT EQ 2
#
# @param Expr: Feature flag expression to be converted
#
def ConvertNEToNOTEQ(Expr):
List = __GetTokenList(Expr)
for Index in range(len(List)):
if List[Index] == 'NE':
List[Index] = 'NOT EQ'
return ''.join(List)
## ConvertNOTEQToNE
#
# Convert NOT EQ operator to NE
# For example: 1 NOT NE 2 -> 1 NE 2
#
# @param Expr: Feature flag expression to be converted
#
def ConvertNOTEQToNE(Expr):
List = __GetTokenList(Expr)
HasNOT = False
RetList = []
for Token in List:
if HasNOT and Token == 'EQ':
# At least, 'NOT' is in the list
while not RetList[-1].strip():
RetList.pop()
RetList[-1] = 'NE'
HasNOT = False
continue
if Token == 'NOT':
HasNOT = True
elif Token.strip():
HasNOT = False
RetList.append(Token)
return ''.join(RetList)
## SplitPcdEntry
#
# Split an PCD entry string to Token.CName and PCD value and FFE.
# NOTE: PCD Value and FFE can contain "|" in it's expression. And in INF specification, have below rule.
# When using the characters "|" or "||" in an expression, the expression must be encapsulated in
# open "(" and close ")" parenthesis.
#
# @param String An PCD entry string need to be split.
#
# @return List [PcdTokenCName, Value, FFE]
#
def SplitPcdEntry(String):
if not String:
return ['', '',''], False
PcdTokenCName = ''
PcdValue = ''
PcdFeatureFlagExp = ''
ValueList = GetSplitValueList(String, "|", 1)
#
# Only contain TokenCName
#
if len(ValueList) == 1:
return [ValueList[0]], True
NewValueList = []
if len(ValueList) == 2:
PcdTokenCName = ValueList[0]
ValueList = GetSplitValueList(ValueList[1], "|")
RemainCount = 0
for Item in ValueList:
ParenthesisCount = 0
for Char in Item:
if Char == "(":
ParenthesisCount += 1
if Char == ")":
ParenthesisCount -= 1
#
# An individual item
#
if RemainCount == 0 and ParenthesisCount >= 0:
NewValueList.append(Item)
RemainCount = ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount >= 0:
NewValueList[-1] = NewValueList[-1] + '|' + Item
RemainCount = RemainCount + ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount < 0:
#
# ERROR, return
#
return ['', '', ''], False
if len(NewValueList) == 1:
PcdValue = NewValueList[0]
return [PcdTokenCName, PcdValue], True
elif len(NewValueList) == 2:
PcdValue = NewValueList[0]
PcdFeatureFlagExp = NewValueList[1]
return [PcdTokenCName, PcdValue, PcdFeatureFlagExp], True
else:
return ['', '', ''], False
return ['', '', ''], False
## Check if two arches matched?
#
# @param Arch1
# @param Arch2
#
def IsMatchArch(Arch1, Arch2):
if 'COMMON' in Arch1 or 'COMMON' in Arch2:
return True
if isinstance(Arch1, basestring) and isinstance(Arch2, basestring):
if Arch1 == Arch2:
return True
if isinstance(Arch1, basestring) and isinstance(Arch2, list):
return Arch1 in Arch2
if isinstance(Arch2, basestring) and isinstance(Arch1, list):
return Arch2 in Arch1
if isinstance(Arch1, list) and isinstance(Arch2, list):
for Item1 in Arch1:
for Item2 in Arch2:
if Item1 == Item2:
return True
return False
| 32.364389
| 121
| 0.576766
|
import re
import os.path
from string import strip
import Logger.Log as Logger
import Library.DataType as DataType
from Logger.ToolError import FORMAT_INVALID
from Logger.ToolError import PARSER_ERROR
from Logger import StringTable as ST
gMACRO_PATTERN = re.compile("\$\(([_A-Z][_A-Z0-9]*)\)", re.UNICODE)
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit=-1):
return map(lambda l: l.strip(), String.split(SplitTag, MaxSplit))
# If not exist any arch, set the arch directly
#
# @param Dict: The input value for Dict
# @param Key: The input value for Key
# @param Arch: The Arch to be added or merged
#
def MergeArches(Dict, Key, Arch):
if Key in Dict.keys():
Dict[Key].append(Arch)
else:
Dict[Key] = Arch.split()
## GenDefines
#
# Parse a string with format "DEFINE <VarName> = <PATH>"
# Generate a map Defines[VarName] = PATH
# Return False if invalid format
#
# @param String: String with DEFINE statement
# @param Arch: Supportted Arch
# @param Defines: DEFINE statement to be parsed
#
def GenDefines(String, Arch, Defines):
if String.find(DataType.TAB_DEFINE + ' ') > -1:
List = String.replace(DataType.TAB_DEFINE + ' ', '').\
split(DataType.TAB_EQUAL_SPLIT)
if len(List) == 2:
Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
return 0
else:
return -1
return 1
## GetLibraryClassesWithModuleType
#
# Get Library Class definition when no module type defined
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
NewKey = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), NewKey[1]])
return True
## GetDynamics
#
# Get Dynamic Pcds
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
#
# Get SkuId Name List
#
SkuIdNameList = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
return True
## SplitModuleType
#
# Split ModuleType out of section defien to get key
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [
# 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
#
# @param Key: String to be parsed
#
def SplitModuleType(Key):
KeyList = Key.split(DataType.TAB_SPLIT)
#
# Fill in for arch
#
KeyList.append('')
#
# Fill in for moduletype
#
KeyList.append('')
ReturnValue = []
KeyValue = KeyList[0]
if KeyList[1] != '':
KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
ReturnValue.append(KeyValue)
ReturnValue.append(GetSplitValueList(KeyList[2]))
return ReturnValue
## Replace macro in string
#
# This method replace macros used in given string. The macros are given in a
# dictionary.
#
# @param String String to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
# @param Line: The content contain line string and line number
# @param FileName: The meta-file file name
#
def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line = None, FileName = None, Flag = False):
LastString = String
if MacroDefinitions == None:
MacroDefinitions = {}
while MacroDefinitions:
QuotedStringList = []
HaveQuotedMacroFlag = False
if not Flag:
MacroUsed = gMACRO_PATTERN.findall(String)
else:
ReQuotedString = re.compile('\"')
QuotedStringList = ReQuotedString.split(String)
if len(QuotedStringList) >= 3:
HaveQuotedMacroFlag = True
Count = 0
MacroString = ""
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
MacroString += QuotedStringItem
if Count == len(QuotedStringList) and Count%2 == 0:
MacroString += QuotedStringItem
MacroUsed = gMACRO_PATTERN.findall(MacroString)
#
# no macro found in String, stop replacing
#
if len(MacroUsed) == 0:
break
for Macro in MacroUsed:
if Macro not in MacroDefinitions:
if SelfReplacement:
String = String.replace("$(%s)" % Macro, '')
Logger.Debug(5, "Delete undefined MACROs in file %s line %d: %s!" %(FileName, Line[1], Line[0]))
continue
if not HaveQuotedMacroFlag:
String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
else:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
elif Count == len(QuotedStringList) and Count%2 == 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
RetString = ''
if HaveQuotedMacroFlag:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
if Count != len(QuotedStringList):
RetString += QuotedStringList[Count-1] + "\""
else:
RetString += QuotedStringList[Count-1]
String = RetString
#
# in case there's macro not defined
if String == LastString:
break
LastString = String
return String
def NormPath(Path, Defines = None):
IsRelativePath = False
if Defines == None:
Defines = {}
if Path:
if Path[0] == '.':
IsRelativePath = True
if Defines:
Path = ReplaceMacro(Path, Defines)
Path = os.path.normpath(Path)
if IsRelativePath and Path[0] != '.':
Path = os.path.join('.', Path)
return Path
def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
Line = Line.strip()
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# remove comments, but we should escape comment character in string
#
InString = False
for Index in range(0, len(Line)):
if Line[Index] == '"':
InString = not InString
elif Line[Index] == CommentCharacter and not InString:
Line = Line[0: Index]
break
#
# remove whitespace again
#
Line = Line.strip()
return Line
## CleanString2
#
# Split comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK1_SPLIT, CommentCharacter)
#
# separate comments and statements
#
LineParts = Line.split(CommentCharacter, 1)
#
# remove whitespace again
#
Line = LineParts[0].strip()
if len(LineParts) > 1:
Comment = LineParts[1].strip()
#
# Remove prefixed and trailing comment characters
#
Start = 0
End = len(Comment)
while Start < End and Comment.startswith(CommentCharacter, Start, End):
Start += 1
while End >= 0 and Comment.endswith(CommentCharacter, Start, End):
End -= 1
Comment = Comment[Start:End]
Comment = Comment.strip()
else:
Comment = ''
return Line, Comment
## GetMultipleValuesOfKeyFromLines
#
# Parse multiple strings to clean comment and spaces
# The result is saved to KeyValues
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
if Key:
pass
if KeyValues:
pass
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues += [Line]
return True
## GetDefineValue
#
# Parse a DEFINE statement to get defined value
# DEFINE Key Value
#
# @param String: The content to be parsed
# @param Key: The key of DEFINE statement
# @param CommentCharacter: Comment char, used to ignore comment content
#
def GetDefineValue(String, Key, CommentCharacter):
if CommentCharacter:
pass
String = CleanString(String)
return String[String.find(Key + ' ') + len(Key + ' ') : ]
## GetSingleValueOfKeyFromLines
#
# Parse multiple strings as below to get value of each definition line
# Key1 = Value1
# Key2 = Value2
# The result is saved to Dictionary
#
# @param Lines: The content to be parsed
# @param Dictionary: To store data after parsing
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value.
# Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has
# multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple
# values. Key1 = Value1|Value2, '|' is the value
# split char
#
def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, \
ValueSplitFlag, ValueSplitCharacter):
Lines = Lines.split('\n')
Keys = []
Value = ''
DefineValues = ['']
SpecValues = ['']
for Line in Lines:
#
# Handle DEFINE and SPEC
#
if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
if '' in DefineValues:
DefineValues.remove('')
DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
continue
if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
if '' in SpecValues:
SpecValues.remove('')
SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
continue
#
# Handle Others
#
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter:
#
# Remove comments and white spaces
#
LineList[1] = CleanString(LineList[1], CommentCharacter)
if ValueSplitFlag:
Value = map(strip, LineList[1].split(ValueSplitCharacter))
else:
Value = CleanString(LineList[1], CommentCharacter).splitlines()
if Key[0] in Dictionary:
if Key[0] not in Keys:
Dictionary[Key[0]] = Value
Keys.append(Key[0])
else:
Dictionary[Key[0]].extend(Value)
else:
Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
if DefineValues == []:
DefineValues = ['']
if SpecValues == []:
SpecValues = ['']
Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
return True
## The content to be parsed
#
# Do pre-check for a file before it is parsed
# Check $()
# Check []
#
# @param FileName: Used for error report
# @param FileContent: File content to be parsed
# @param SupSectionTag: Used for error report
#
def PreCheck(FileName, FileContent, SupSectionTag):
if SupSectionTag:
pass
LineNo = 0
IsFailed = False
NewFileContent = ''
for Line in FileContent.splitlines():
LineNo = LineNo + 1
#
# Clean current line
#
Line = CleanString(Line)
#
# Remove commented line
#
if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
Line = ''
#
# Check $()
#
if Line.find('$') > -1:
if Line.find('$(') < 0 or Line.find(')') < 0:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
#
# Check []
#
if Line.find('[') > -1 or Line.find(']') > -1:
#
# Only get one '[' or one ']'
#
if not (Line.find('[') > -1 and Line.find(']') > -1):
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
#
# Regenerate FileContent
#
NewFileContent = NewFileContent + Line + '\r\n'
if IsFailed:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
return NewFileContent
## CheckFileType
#
# Check if the Filename is including ExtName
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param ExtName: Ext name of the file to be checked
# @param ContainerFilename: The container file which describes the file to be
# checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file
# to be checked
#
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo=-1):
if CheckFilename != '' and CheckFilename != None:
(Root, Ext) = os.path.splitext(CheckFilename)
if Ext.upper() != ExtName.upper() and Root:
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_SECTIONNAME_INVALID % (SectionName, CheckFilename, ExtName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo, \
File=ContainerFilename, RaiseError=Logger.IS_RAISE_ERROR)
return True
## CheckFileExist
#
# Check if the file exists
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param WorkspaceDir: Current workspace dir
# @param ContainerFilename: The container file which describes the file to
# be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the
# file to be checked
#
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo=-1):
CheckFile = ''
if CheckFilename != '' and CheckFilename != None:
CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
if not os.path.isfile(CheckFile):
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_CHECKFILE_NOTFOUND % (CheckFile, SectionName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg,
File=ContainerFilename, Line = LineNo, RaiseError=Logger.IS_RAISE_ERROR)
return CheckFile
## GetLineNo
#
# Find the index of a line in a file
#
# @param FileContent: Search scope
# @param Line: Search key
#
def GetLineNo(FileContent, Line, IsIgnoreComment=True):
LineList = FileContent.splitlines()
for Index in range(len(LineList)):
if LineList[Index].find(Line) > -1:
#
# Ignore statement in comment
#
if IsIgnoreComment:
if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
continue
return Index + 1
return -1
## RaiseParserError
#
# Raise a parser error
#
# @param Line: String which has error
# @param Section: Used for error report
# @param File: File which has the string
# @param Format: Correct format
#
def RaiseParserError(Line, Section, File, Format='', LineNo=-1):
if LineNo == -1:
LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
ErrorMsg = ST.ERR_INVALID_NOTFOUND % (Line, Section)
if Format != '':
Format = "Correct format is " + Format
Logger.Error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, \
ExtraData=Format, RaiseError=Logger.IS_RAISE_ERROR)
## WorkspaceFile
#
# Return a full path with workspace dir
#
# @param WorkspaceDir: Workspace dir
# @param Filename: Relative file name
#
def WorkspaceFile(WorkspaceDir, Filename):
return os.path.join(NormPath(WorkspaceDir), NormPath(Filename))
## Split string
#
# Revmove '"' which startswith and endswith string
def SplitString(String):
if String.startswith('\"'):
String = String[1:]
if String.endswith('\"'):
String = String[:-1]
return String
#
# @param StringList: A list for strings to be converted
#
def ConvertToSqlString(StringList):
return map(lambda s: s.replace("'", "''") , StringList)
#
# @param String: A String to be converted
#
def ConvertToSqlString2(String):
return String.replace("'", "''")
def GetStringOfList(List, Split = ' '):
if type(List) != type([]):
return List
Str = ''
for Item in List:
Str = Str + Item + Split
return Str.strip()
def GetHelpTextList(HelpTextClassList):
List = []
if HelpTextClassList:
for HelpText in HelpTextClassList:
if HelpText.String.endswith('\n'):
HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
List.extend(HelpText.String.split('\n'))
return List
def StringArrayLength(String):
if isinstance(String, unicode):
return (len(String) + 1) * 2 + 1
elif String.startswith('L"'):
return (len(String) - 3 + 1) * 2
elif String.startswith('"'):
return (len(String) - 2 + 1)
else:
return len(String.split()) + 1
def RemoveDupOption(OptionString, Which="/I", Against=None):
OptionList = OptionString.split()
ValueList = []
if Against:
ValueList += Against
for Index in range(len(OptionList)):
Opt = OptionList[Index]
if not Opt.startswith(Which):
continue
if len(Opt) > len(Which):
Val = Opt[len(Which):]
else:
Val = ""
if Val in ValueList:
OptionList[Index] = ""
else:
ValueList.append(Val)
return " ".join(OptionList)
def IsHexDigit(Str):
try:
int(Str, 10)
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
int(Str, 16)
return True
except ValueError:
return False
return False
def IsHexDigitUINT32(Str):
try:
Value = int(Str, 10)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
if len(Str) > 2 and Str.upper().startswith('0X'):
try:
Value = int(Str, 16)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
except ValueError:
return False
return False
def ConvertSpecialChar(Lines):
RetLines = []
for line in Lines:
ReMatchSpecialChar = re.compile(r"[\x00-\x08]|\x09|\x0b|\x0c|[\x0e-\x1f]|[\x7f-\xff]")
RetLines.append(ReMatchSpecialChar.sub(' ', line))
return RetLines
def __GetTokenList(Str):
InQuote = False
Token = ''
TokenOP = ''
PreChar = ''
List = []
for Char in Str:
if InQuote:
Token += Char
if Char == '"' and PreChar != '\\':
InQuote = not InQuote
List.append(Token)
Token = ''
continue
if Char == '"':
if Token and Token != 'L':
List.append(Token)
Token = ''
if TokenOP:
List.append(TokenOP)
TokenOP = ''
InQuote = not InQuote
Token += Char
continue
if not (Char.isalnum() or Char in '_'):
TokenOP += Char
if Token:
List.append(Token)
Token = ''
else:
Token += Char
if TokenOP:
List.append(TokenOP)
TokenOP = ''
if PreChar == '\\' and Char == '\\':
PreChar = ''
else:
PreChar = Char
if Token:
List.append(Token)
if TokenOP:
List.append(TokenOP)
return List
def ConvertNEToNOTEQ(Expr):
List = __GetTokenList(Expr)
for Index in range(len(List)):
if List[Index] == 'NE':
List[Index] = 'NOT EQ'
return ''.join(List)
def ConvertNOTEQToNE(Expr):
List = __GetTokenList(Expr)
HasNOT = False
RetList = []
for Token in List:
if HasNOT and Token == 'EQ':
while not RetList[-1].strip():
RetList.pop()
RetList[-1] = 'NE'
HasNOT = False
continue
if Token == 'NOT':
HasNOT = True
elif Token.strip():
HasNOT = False
RetList.append(Token)
return ''.join(RetList)
# When using the characters "|" or "||" in an expression, the expression must be encapsulated in
# open "(" and close ")" parenthesis.
#
# @param String An PCD entry string need to be split.
#
# @return List [PcdTokenCName, Value, FFE]
#
def SplitPcdEntry(String):
if not String:
return ['', '',''], False
PcdTokenCName = ''
PcdValue = ''
PcdFeatureFlagExp = ''
ValueList = GetSplitValueList(String, "|", 1)
#
# Only contain TokenCName
#
if len(ValueList) == 1:
return [ValueList[0]], True
NewValueList = []
if len(ValueList) == 2:
PcdTokenCName = ValueList[0]
ValueList = GetSplitValueList(ValueList[1], "|")
RemainCount = 0
for Item in ValueList:
ParenthesisCount = 0
for Char in Item:
if Char == "(":
ParenthesisCount += 1
if Char == ")":
ParenthesisCount -= 1
#
# An individual item
#
if RemainCount == 0 and ParenthesisCount >= 0:
NewValueList.append(Item)
RemainCount = ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount >= 0:
NewValueList[-1] = NewValueList[-1] + '|' + Item
RemainCount = RemainCount + ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount < 0:
#
# ERROR, return
#
return ['', '', ''], False
if len(NewValueList) == 1:
PcdValue = NewValueList[0]
return [PcdTokenCName, PcdValue], True
elif len(NewValueList) == 2:
PcdValue = NewValueList[0]
PcdFeatureFlagExp = NewValueList[1]
return [PcdTokenCName, PcdValue, PcdFeatureFlagExp], True
else:
return ['', '', ''], False
return ['', '', ''], False
## Check if two arches matched?
#
# @param Arch1
# @param Arch2
#
def IsMatchArch(Arch1, Arch2):
if 'COMMON' in Arch1 or 'COMMON' in Arch2:
return True
if isinstance(Arch1, basestring) and isinstance(Arch2, basestring):
if Arch1 == Arch2:
return True
if isinstance(Arch1, basestring) and isinstance(Arch2, list):
return Arch1 in Arch2
if isinstance(Arch2, basestring) and isinstance(Arch1, list):
return Arch2 in Arch1
if isinstance(Arch1, list) and isinstance(Arch2, list):
for Item1 in Arch1:
for Item2 in Arch2:
if Item1 == Item2:
return True
return False
| true
| true
|
f709ff30745f048a9069257f0e215166f25c956b
| 1,509
|
py
|
Python
|
msdsl/assignment.py
|
sgherbst/msdsl
|
e38d5ecdb88b3574bda62f22a4f91ce3e4173d12
|
[
"MIT"
] | 15
|
2019-05-14T10:12:23.000Z
|
2022-03-29T15:29:52.000Z
|
msdsl/assignment.py
|
sgherbst/msdsl
|
e38d5ecdb88b3574bda62f22a4f91ce3e4173d12
|
[
"MIT"
] | 19
|
2020-01-22T21:44:33.000Z
|
2021-06-05T02:10:41.000Z
|
msdsl/assignment.py
|
sgherbst/msdsl
|
e38d5ecdb88b3574bda62f22a4f91ce3e4173d12
|
[
"MIT"
] | 5
|
2019-10-21T09:53:17.000Z
|
2021-08-10T17:32:20.000Z
|
from msdsl.expr.expr import ModelExpr
from msdsl.expr.signals import Signal, DigitalSignal, AnalogSignal
from msdsl.expr.format import RealFormat
from msdsl.expr.table import Table
class Assignment:
def __init__(self, signal: Signal, expr: ModelExpr, check_format=True):
self.signal = signal
self.expr = expr
self.check_format = check_format
class BindingAssignment(Assignment):
pass
class ThisCycleAssignment(Assignment):
pass
class NextCycleAssignment(Assignment):
def __init__(self, *args, clk=None, rst=None, ce=None, **kwargs):
self.clk = clk
self.rst = rst
self.ce = ce
super().__init__(*args, **kwargs)
class SyncRomAssignment(Assignment):
def __init__(self, signal: Signal, table: Table, addr: ModelExpr,
clk=None, ce=None, should_bind=False):
self.table = table
self.clk = clk
self.ce = ce
self.should_bind = should_bind
super().__init__(signal=signal, expr=addr)
class SyncRamAssignment(Assignment):
def __init__(self, signal: AnalogSignal, format_: RealFormat, addr: ModelExpr,
clk: Signal=None, ce: Signal=None, we: Signal=None,
din: Signal=None, should_bind=False):
self.format_ = format_
self.clk = clk
self.ce = ce
self.we = we
self.din = din
self.should_bind = should_bind
super().__init__(signal=signal, expr=addr)
| 33.533333
| 83
| 0.636183
|
from msdsl.expr.expr import ModelExpr
from msdsl.expr.signals import Signal, DigitalSignal, AnalogSignal
from msdsl.expr.format import RealFormat
from msdsl.expr.table import Table
class Assignment:
def __init__(self, signal: Signal, expr: ModelExpr, check_format=True):
self.signal = signal
self.expr = expr
self.check_format = check_format
class BindingAssignment(Assignment):
pass
class ThisCycleAssignment(Assignment):
pass
class NextCycleAssignment(Assignment):
def __init__(self, *args, clk=None, rst=None, ce=None, **kwargs):
self.clk = clk
self.rst = rst
self.ce = ce
super().__init__(*args, **kwargs)
class SyncRomAssignment(Assignment):
def __init__(self, signal: Signal, table: Table, addr: ModelExpr,
clk=None, ce=None, should_bind=False):
self.table = table
self.clk = clk
self.ce = ce
self.should_bind = should_bind
super().__init__(signal=signal, expr=addr)
class SyncRamAssignment(Assignment):
def __init__(self, signal: AnalogSignal, format_: RealFormat, addr: ModelExpr,
clk: Signal=None, ce: Signal=None, we: Signal=None,
din: Signal=None, should_bind=False):
self.format_ = format_
self.clk = clk
self.ce = ce
self.we = we
self.din = din
self.should_bind = should_bind
super().__init__(signal=signal, expr=addr)
| true
| true
|
f709ffc9a9096e68c6eafa3940ad5daafbe4f454
| 15,511
|
py
|
Python
|
deerlab/utils/utils.py
|
laenan8466/DeerLab
|
94f1942da1b506e0661a8e7e4901bb5ba6d69143
|
[
"MIT"
] | null | null | null |
deerlab/utils/utils.py
|
laenan8466/DeerLab
|
94f1942da1b506e0661a8e7e4901bb5ba6d69143
|
[
"MIT"
] | null | null | null |
deerlab/utils/utils.py
|
laenan8466/DeerLab
|
94f1942da1b506e0661a8e7e4901bb5ba6d69143
|
[
"MIT"
] | null | null | null |
import warnings
import numpy as np
import cmath as math
import scipy as scp
import scipy.optimize as opt
from types import FunctionType
def parse_multidatasets(V,K,weights,precondition=False):
#===============================================================================
# Identify if the signals have already been processed by this function
if type(V) is not list:
if V.size == np.atleast_1d(weights).size:
# If so, just return without doing anything
if precondition:
return V,K,weights,[np.arange(0,len(V))],[1]
else:
return V,K,weights,[np.arange(0,len(V))]
# If multiple signals are specified as a list...
if type(V) is list and all([type(Vs) is np.ndarray for Vs in V]):
nSignals = len(V)
prescales = np.zeros(nSignals)
Vlist = []
# Pre-scale the signals, important for fitregmodel when using global fits with arbitrary scales
for i in range(nSignals):
if precondition:
prescales[i] = max(V[i])
Vlist.append(V[i]/prescales[i])
else:
Vlist.append(V[i])
V = np.concatenate(Vlist, axis=0) # ...concatenate them along the list
elif type(V) is np.ndarray:
nSignals = 1
prescales = [1]
Vlist = [V]
else:
raise TypeError('The input signal(s) must be numpy array or a list of numpy arrays.')
def prepareKernel(K,nSignals):
# If multiple kernels are specified as a list...
if type(K) is tuple:
K = [Ks for Ks in K]
if type(K) is list and all([type(Ks) is np.ndarray for Ks in K]):
nKernels = len(K)
K = np.concatenate(K, axis=0) # ...concatenate them along the list
elif type(K) is np.ndarray:
nKernels = 1
else:
raise TypeError('The input kernel(s) must be numpy array or a list of numpy arrays.')
# Check that the same number of signals and kernel have been passed
if nSignals!=nKernels:
raise KeyError('The same number of kernels and signals must be specified as lists.')
return K
if type(K) is FunctionType:
Kmulti = lambda p: prepareKernel(K(p),nSignals)
else:
Kmulti = prepareKernel(K,nSignals)
# If multiple weights are specified as a list...
if type(weights) is list or not hasattr(weights, "__len__"):
weights = np.atleast_1d(weights)
if len(weights)==1:
weights = np.repeat(weights,nSignals)
weights = weights/sum(weights)
if len(weights)!=nSignals:
raise KeyError('If multiple signals are passed, the same number of weights are required.')
weights_ = []
for i in range(len(weights)):
weights_ = np.concatenate((weights_,weights[i]*np.ones(len(Vlist[i]))))
weights = weights_
else:
raise TypeError('The input weights(s) must be numpy array or a list of numpy arrays.')
# Get the indices to extract the subsets again
Ns = [len(V) for V in Vlist]
subset = [None]*nSignals
for i in range(nSignals):
if i==0:
prev = 0
else:
prev = subset[i-1][-1]+1
subset[i] = np.arange(prev,prev+Ns[i])
if precondition:
return V,Kmulti,weights,subset,prescales
else:
return V,Kmulti,weights,subset
#===============================================================================
def hccm(J,*args):
"""
Heteroscedasticity Consistent Covariance Matrix (HCCM)
======================================================
Computes the heteroscedasticity consistent covariance matrix (HCCM) of
a given LSQ problem given by the Jacobian matrix (J) and the covariance
matrix of the data (V). If the residual (res) is specified, the
covariance matrix is estimated using some of the methods specified in
(mode). The HCCM are valid for both heteroscedasticit and
homoscedasticit residual vectors.
Usage:
------
C = hccm(J,V)
C = hccm(J,res,mode)
Arguments:
----------
J (NxM-element array)
Jacobian matrix of the residual vector
res (N-element array)
Vector of residuals
mode (string)
HCCM estimator, options are:
'HC0' - White, H. (1980)
'HC1' - MacKinnon and White, (1985)
'HC2' - MacKinnon and White, (1985)
'HC3' - Davidson and MacKinnon, (1993)
'HC4' - Cribari-Neto, (2004)
'HC5' - Cribari-Neto, (2007)
Returns:
--------
C (MxM-element array)
Heteroscedasticity consistent covariance matrix
References:
------------
[1]
White, H. (1980). A heteroskedasticity-consistent covariance matrix
estimator and a direct test for heteroskedasticity. Econometrica, 48(4), 817-838
DOI: 10.2307/1912934
[2]
MacKinnon and White, (1985). Some heteroskedasticity-consistent covariance
matrix estimators with improved finite sample properties. Journal of Econometrics, 29 (1985),
pp. 305-325. DOI: 10.1016/0304-4076(85)90158-7
[3]
Davidson and MacKinnon, (1993). Estimation and Inference in Econometrics
Oxford University Press, New York.
[4]
Cribari-Neto, F. (2004). Asymptotic inference under heteroskedasticity of
unknown form. Computational Statistics & Data Analysis, 45(1), 215-233
DOI: 10.1016/s0167-9473(02)00366-3
[5]
Cribari-Neto, F., Souza, T. C., & Vasconcellos, K. L. P. (2007). Inference
under heteroskedasticity and leveraged data. Communications in Statistics –
Theory and Methods, 36(10), 1877-1888. DOI: 10.1080/03610920601126589
"""
# Unpack inputs
if len(args)==2:
res,mode = args
V = []
elif len(args)==1:
V = args[0]
# Hat matrix
H = J@np.linalg.pinv(J.T@J)@J.T
# Get leverage
h = np.diag(H)
# Number of parameters (k) & Number of variables (n)
n,k = np.shape(J)
if isempty(V):
# Select estimation method using established nomenclature
if mode.upper() == 'HC0': # White,(1980),[1]
# Estimate the data covariance matrix
V = np.diag(res**2)
elif mode.upper() == 'HC1': # MacKinnon and White,(1985),[2]
# Estimate the data covariance matrix
V = n/(n-k)*np.diag(res**2)
elif mode.upper() == 'HC2': # MacKinnon and White,(1985),[2]
# Estimate the data covariance matrix
V = np.diag(res**2/(1-h))
elif mode.upper() == 'HC3': # Davidson and MacKinnon,(1993),[3]
# Estimate the data covariance matrix
V = np.diag(res/(1-h))**2
elif mode.upper() == 'HC4': # Cribari-Neto,(2004),[4]
# Compute discount factor
delta = np.minimum(4,n*h/k)
# Estimate the data covariance matrix
V = np.diag(res**2./((1 - h)**delta))
elif mode.upper() == 'HC5': # Cribari-Neto,(2007),[5]
# Compute inflation factor
k = 0.7
alpha = np.minimum(np.maximum(4,k*max(h)/np.mean(h)),h/np.mean(h))
# Estimate the data covariance matrix
V = np.diag(res**2./(np.sqrt((1 - h)**alpha)))
else:
raise KeyError('HCCM estimation mode not found.')
# Heteroscedasticity Consistent Covariance Matrix (HCCM) estimator
C = np.linalg.pinv(J.T@J)@J.T@V@J@np.linalg.pinv(J.T@J)
return C
#===============================================================================
# =================================================================
def metadata(**kwargs):
"""
Decorator: Set model metadata as function attributes
"""
attributes = list(kwargs.keys())
metadata = list(kwargs.values())
def _setmetadata(func):
for attribute,data in zip(attributes,metadata):
setattr(func,attribute,data)
return func
return _setmetadata
# =================================================================
def gsvd(A,B):
#===============================================================================
m,p = A.shape
n = B.shape[0]
# Economy-sized.
useQA = m > p
useQB = n > p
if useQA:
QA,A = scp.linalg.qr(A)
A = A[0:p,0:p]
QA = QA[:,0:p]
m = p
if useQB:
QB,B = scp.linalg.qr(B)
B = B[0:p,0:p]
QB = QB[:,0:p]
n = p
Q,_ = np.linalg.qr(np.vstack((A,B)), mode='reduced')
Q1 = Q[0:m,0:p]
Q2 = Q[m:m+n,0:p]
C,S = csd(Q1,Q2)
# Vector of generalized singular values.
q = min(m+n,p)
# Supress divide by 0 warning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
U = np.vstack((np.zeros((q-m,1),'double'), np.diag(C,max(0,q-m)).reshape(len(np.diag(C,max(0,q-m))),1)))/np.vstack((np.diag(S,0).reshape(len(np.diag(S,0)),1), np.zeros((q-n,1),'double') ))
return U
#===============================================================================
def csd(Q1,Q2):
#===============================================================================
"""
Cosine-Sine Decomposition
-------------------------
Given Q1 and Q2 such that Q1'* Q1 + Q2'* Q2 = I, the
C-S Decomposition is a joint factorization of the form
Q1 = U1*C*V' and Q2=U2*S*V'
where U1,U2,V are orthogonal matrices and C and S are diagonal
matrices (not necessarily square) satisfying
C'* C + S'* S = I
The diagonal entries of C and S are nonnegative and the
diagonal elements of C are in nondecreasing order.
The matrix Q1 cannot have more columns than rows.
Based on the Octave code by Artiste (submitted by S.J.Leon):
http://www.ar-tiste.com/m-fun/m-fun-index.html
"""
m,n = Q1.shape
p,_ = Q2.shape
if m < p:
s,c = csd(Q2,Q1)
j = np.flip(np.arange(n))
c = c[:,j]
s = s[:,j]
m = np.minimum(m,p)
i = np.flip(np.arange(m))
c[np.arange(m),:] = c[i,:]
n = np.minimum(n,p)
i = np.flip(np.arange(n))
s[np.arange(n),:] = s[i,:]
return c,s
_,sdiag,v = np.linalg.svd(Q1)
c = np.zeros((m, n))
np.fill_diagonal(c, sdiag)
v = v.T.conj()
z = np.eye(n,n)
z = scp.linalg.hankel(z[:,n-1])
c[0:n,:] = z@c[0:n,:]@z
v = v@z
Q2 = Q2@v
k=0
for j in range(1,n):
if c[j,j] <= 1/np.sqrt(2): k=j
b = Q2[:,0:k]
u2,r = np.linalg.qr(b,mode='complete')
s = u2.T@Q2
t = np.minimum(p,n)
tt = np.minimum(m,p)
if k<t:
r2 = s[np.ix_(range(k,p),range(k,t))]
_,sdiag,vt = np.linalg.svd(r2)
ss= np.zeros(r2.shape)
np.fill_diagonal(ss, sdiag)
vt = vt.T.conj()
s[k:p,k:t] = ss
c[:,k:t] = c[:,k:t]@vt
w = c[k:tt,k:t]
z,r = np.linalg.qr(w,mode='complete')
c[k:tt,k:t] = r
for j in range(n):
if c[j,j]<0:
c[j,j] = -c[j,j]
for j in range(t):
if s[j,j]<0:
s[j,j] = -s[j,j]
return c,s
#===============================================================================
#===============================================================================
def diagf(X):
"""
Diagonal force
X = diagf(X) zeros all the elements off the main diagonal of X.
"""
X = np.triu(np.tril(X))
return X
#===============================================================================
#===============================================================================
def diagp(Y,X,k):
"""
DIAGP Diagonal positive.
Y,X = diagp(Y,X,k) scales the columns of Y and the rows of X by
unimodular factors to make the k-th diagonal of X real and positive.
"""
D = np.diag(X,k)
j = np.where((D.real < 0) | (D.imag != 0))
D = np.diag(np.conj(D[j])/abs(D[j]))
Y[:,j] = Y[:,j]@D.T
X[j,:] = D@X[j,:]
X = X+0 # use "+0" to set possible -0 elements to 0
return Y,X
#===============================================================================
#===============================================================================
def Jacobian(fcn, x0, lb, ub):
"""
Finite difference Jacobian estimation
Estimates the Jacobian matrix of a vector-valued function ``fcn`` at the
point ``x0`` taking into consideration box-constraints defined by the lower
and upper bounds ``lb`` and ``ub``.
This is a wrapper around the ``scipy.optimize._numdiff.approx_derivative`` function.
"""
J = opt._numdiff.approx_derivative(fcn,x0,method='2-point',bounds=(lb,ub))
J = np.atleast_2d(J)
return J
#===============================================================================
#===============================================================================
def movmean(x, N):
"""
Moving mean
===========
Returns an array of local N-point mean values, where each mean is calculated over a sliding window of length k across neighboring elements of x.
Usage:
------
xfilt = movmean(x,N)
Arguments:
----------
x (array)
Array to be filtered
N (scalar)
Window size
Returns:
--------
xfilt (array)
Filtered array
"""
xfilt = np.convolve(x, np.ones(N)/N, mode='same')
return xfilt
#===============================================================================
#===============================================================================
def ovl(A,B):
"""
Overlap metric
==============
Returns the overlap between two vectors A and B.
Usage:
------
metric = ovl(A,B)
Arguments:
----------
A (N-element array)
First vector
B (N-element array)
Second vector
Returns:
--------
metric (array)
Overlap metric
"""
A /= np.sum(A)
B /= np.sum(B)
metric = np.sum(np.minimum(A,B))
return metric
#===============================================================================
def isempty(A):
#===============================================================================Q
A = np.atleast_1d(A)
boolean = np.size(A)==0
return boolean
#===============================================================================
def multistarts(n,x0,lb,ub):
#===============================================================================
if n<0:
raise ValueError('The number of requested starting points must be n>0.')
if len(x0) != len(lb) or len(x0) != len(ub):
raise ValueError('The lower/upper bound size(s) are not compatible with the initial guess vector x0.')
# Generate n-1 new starting points within the bounds
if n>1:
x0 = np.linspace(lb,ub,n-1)
else:
x0 = [x0]
return x0
#===============================================================================
| 33.143162
| 197
| 0.479402
|
import warnings
import numpy as np
import cmath as math
import scipy as scp
import scipy.optimize as opt
from types import FunctionType
def parse_multidatasets(V,K,weights,precondition=False):
if type(V) is not list:
if V.size == np.atleast_1d(weights).size:
if precondition:
return V,K,weights,[np.arange(0,len(V))],[1]
else:
return V,K,weights,[np.arange(0,len(V))]
if type(V) is list and all([type(Vs) is np.ndarray for Vs in V]):
nSignals = len(V)
prescales = np.zeros(nSignals)
Vlist = []
for i in range(nSignals):
if precondition:
prescales[i] = max(V[i])
Vlist.append(V[i]/prescales[i])
else:
Vlist.append(V[i])
V = np.concatenate(Vlist, axis=0) elif type(V) is np.ndarray:
nSignals = 1
prescales = [1]
Vlist = [V]
else:
raise TypeError('The input signal(s) must be numpy array or a list of numpy arrays.')
def prepareKernel(K,nSignals):
if type(K) is tuple:
K = [Ks for Ks in K]
if type(K) is list and all([type(Ks) is np.ndarray for Ks in K]):
nKernels = len(K)
K = np.concatenate(K, axis=0) elif type(K) is np.ndarray:
nKernels = 1
else:
raise TypeError('The input kernel(s) must be numpy array or a list of numpy arrays.')
if nSignals!=nKernels:
raise KeyError('The same number of kernels and signals must be specified as lists.')
return K
if type(K) is FunctionType:
Kmulti = lambda p: prepareKernel(K(p),nSignals)
else:
Kmulti = prepareKernel(K,nSignals)
if type(weights) is list or not hasattr(weights, "__len__"):
weights = np.atleast_1d(weights)
if len(weights)==1:
weights = np.repeat(weights,nSignals)
weights = weights/sum(weights)
if len(weights)!=nSignals:
raise KeyError('If multiple signals are passed, the same number of weights are required.')
weights_ = []
for i in range(len(weights)):
weights_ = np.concatenate((weights_,weights[i]*np.ones(len(Vlist[i]))))
weights = weights_
else:
raise TypeError('The input weights(s) must be numpy array or a list of numpy arrays.')
Ns = [len(V) for V in Vlist]
subset = [None]*nSignals
for i in range(nSignals):
if i==0:
prev = 0
else:
prev = subset[i-1][-1]+1
subset[i] = np.arange(prev,prev+Ns[i])
if precondition:
return V,Kmulti,weights,subset,prescales
else:
return V,Kmulti,weights,subset
def hccm(J,*args):
if len(args)==2:
res,mode = args
V = []
elif len(args)==1:
V = args[0]
H = J@np.linalg.pinv(J.T@J)@J.T
h = np.diag(H)
n,k = np.shape(J)
if isempty(V):
if mode.upper() == 'HC0': V = np.diag(res**2)
elif mode.upper() == 'HC1': V = n/(n-k)*np.diag(res**2)
elif mode.upper() == 'HC2': V = np.diag(res**2/(1-h))
elif mode.upper() == 'HC3': V = np.diag(res/(1-h))**2
elif mode.upper() == 'HC4': delta = np.minimum(4,n*h/k)
V = np.diag(res**2./((1 - h)**delta))
elif mode.upper() == 'HC5': k = 0.7
alpha = np.minimum(np.maximum(4,k*max(h)/np.mean(h)),h/np.mean(h))
V = np.diag(res**2./(np.sqrt((1 - h)**alpha)))
else:
raise KeyError('HCCM estimation mode not found.')
C = np.linalg.pinv(J.T@J)@J.T@V@J@np.linalg.pinv(J.T@J)
return C
def metadata(**kwargs):
attributes = list(kwargs.keys())
metadata = list(kwargs.values())
def _setmetadata(func):
for attribute,data in zip(attributes,metadata):
setattr(func,attribute,data)
return func
return _setmetadata
def gsvd(A,B):
m,p = A.shape
n = B.shape[0]
useQA = m > p
useQB = n > p
if useQA:
QA,A = scp.linalg.qr(A)
A = A[0:p,0:p]
QA = QA[:,0:p]
m = p
if useQB:
QB,B = scp.linalg.qr(B)
B = B[0:p,0:p]
QB = QB[:,0:p]
n = p
Q,_ = np.linalg.qr(np.vstack((A,B)), mode='reduced')
Q1 = Q[0:m,0:p]
Q2 = Q[m:m+n,0:p]
C,S = csd(Q1,Q2)
q = min(m+n,p)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
U = np.vstack((np.zeros((q-m,1),'double'), np.diag(C,max(0,q-m)).reshape(len(np.diag(C,max(0,q-m))),1)))/np.vstack((np.diag(S,0).reshape(len(np.diag(S,0)),1), np.zeros((q-n,1),'double') ))
return U
def csd(Q1,Q2):
m,n = Q1.shape
p,_ = Q2.shape
if m < p:
s,c = csd(Q2,Q1)
j = np.flip(np.arange(n))
c = c[:,j]
s = s[:,j]
m = np.minimum(m,p)
i = np.flip(np.arange(m))
c[np.arange(m),:] = c[i,:]
n = np.minimum(n,p)
i = np.flip(np.arange(n))
s[np.arange(n),:] = s[i,:]
return c,s
_,sdiag,v = np.linalg.svd(Q1)
c = np.zeros((m, n))
np.fill_diagonal(c, sdiag)
v = v.T.conj()
z = np.eye(n,n)
z = scp.linalg.hankel(z[:,n-1])
c[0:n,:] = z@c[0:n,:]@z
v = v@z
Q2 = Q2@v
k=0
for j in range(1,n):
if c[j,j] <= 1/np.sqrt(2): k=j
b = Q2[:,0:k]
u2,r = np.linalg.qr(b,mode='complete')
s = u2.T@Q2
t = np.minimum(p,n)
tt = np.minimum(m,p)
if k<t:
r2 = s[np.ix_(range(k,p),range(k,t))]
_,sdiag,vt = np.linalg.svd(r2)
ss= np.zeros(r2.shape)
np.fill_diagonal(ss, sdiag)
vt = vt.T.conj()
s[k:p,k:t] = ss
c[:,k:t] = c[:,k:t]@vt
w = c[k:tt,k:t]
z,r = np.linalg.qr(w,mode='complete')
c[k:tt,k:t] = r
for j in range(n):
if c[j,j]<0:
c[j,j] = -c[j,j]
for j in range(t):
if s[j,j]<0:
s[j,j] = -s[j,j]
return c,s
def diagf(X):
X = np.triu(np.tril(X))
return X
def diagp(Y,X,k):
D = np.diag(X,k)
j = np.where((D.real < 0) | (D.imag != 0))
D = np.diag(np.conj(D[j])/abs(D[j]))
Y[:,j] = Y[:,j]@D.T
X[j,:] = D@X[j,:]
X = X+0 return Y,X
def Jacobian(fcn, x0, lb, ub):
J = opt._numdiff.approx_derivative(fcn,x0,method='2-point',bounds=(lb,ub))
J = np.atleast_2d(J)
return J
def movmean(x, N):
xfilt = np.convolve(x, np.ones(N)/N, mode='same')
return xfilt
def ovl(A,B):
A /= np.sum(A)
B /= np.sum(B)
metric = np.sum(np.minimum(A,B))
return metric
def isempty(A):
A = np.atleast_1d(A)
boolean = np.size(A)==0
return boolean
def multistarts(n,x0,lb,ub):
if n<0:
raise ValueError('The number of requested starting points must be n>0.')
if len(x0) != len(lb) or len(x0) != len(ub):
raise ValueError('The lower/upper bound size(s) are not compatible with the initial guess vector x0.')
if n>1:
x0 = np.linspace(lb,ub,n-1)
else:
x0 = [x0]
return x0
| true
| true
|
f709ffd40dd8a38cba8ea8ba1325e17e8c739d17
| 11,448
|
py
|
Python
|
Lib/test/support/socket_helper.py
|
chexca/cpython
|
cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa
|
[
"CNRI-Python-GPL-Compatible"
] | 1,318
|
2019-07-11T10:34:39.000Z
|
2022-03-29T15:05:19.000Z
|
Lib/test/support/socket_helper.py
|
chexca/cpython
|
cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa
|
[
"CNRI-Python-GPL-Compatible"
] | 387
|
2020-12-15T14:54:04.000Z
|
2022-03-31T07:00:21.000Z
|
Lib/test/support/socket_helper.py
|
chexca/cpython
|
cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa
|
[
"CNRI-Python-GPL-Compatible"
] | 66
|
2019-11-11T15:33:12.000Z
|
2022-03-01T07:55:55.000Z
|
import contextlib
import errno
import socket
import unittest
import sys
from .. import support
HOST = "localhost"
HOSTv4 = "127.0.0.1"
HOSTv6 = "::1"
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
OSError will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
with socket.socket(family, socktype) as tempsock:
port = bind_port(tempsock)
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise support.TestFailed("tests should never set the "
"SO_REUSEADDR socket option on "
"TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise support.TestFailed("tests should never set the "
"SO_REUSEPORT socket option on "
"TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def bind_unix_socket(sock, addr):
"""Bind a unix socket, raising SkipTest if PermissionError is raised."""
assert sock.family == socket.AF_UNIX
try:
sock.bind(addr)
except PermissionError:
sock.close()
raise unittest.SkipTest('cannot bind AF_UNIX sockets')
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
_bind_nix_socket_error = None
def skip_unless_bind_unix_socket(test):
"""Decorator for tests requiring a functional bind() for unix sockets."""
if not hasattr(socket, 'AF_UNIX'):
return unittest.skip('No UNIX Sockets')(test)
global _bind_nix_socket_error
if _bind_nix_socket_error is None:
from test.support import TESTFN, unlink
path = TESTFN + "can_bind_unix_socket"
with socket.socket(socket.AF_UNIX) as sock:
try:
sock.bind(path)
_bind_nix_socket_error = False
except OSError as e:
_bind_nix_socket_error = e
finally:
unlink(path)
if _bind_nix_socket_error:
msg = 'Requires a functional unix bind(): %s' % _bind_nix_socket_error
return unittest.skip(msg)(test)
else:
return test
def get_socket_conn_refused_errs():
"""
Get the different socket error numbers ('errno') which can be received
when a connection is refused.
"""
errors = [errno.ECONNREFUSED]
if hasattr(errno, 'ENETUNREACH'):
# On Solaris, ENETUNREACH is returned sometimes instead of ECONNREFUSED
errors.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
errors.append(errno.EADDRNOTAVAIL)
if hasattr(errno, 'EHOSTUNREACH'):
# bpo-37583: The destination host cannot be reached
errors.append(errno.EHOSTUNREACH)
if not IPV6_ENABLED:
errors.append(errno.EAFNOSUPPORT)
return errors
_NOT_SET = object()
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=_NOT_SET, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
import nntplib
import urllib.error
if timeout is _NOT_SET:
timeout = support.INTERNET_TIMEOUT
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
# socket.create_connection() fails randomly with
# EADDRNOTAVAIL on Travis CI.
('EADDRNOTAVAIL', 99),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = support.ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
(isinstance(err, urllib.error.HTTPError) and
500 <= err.code <= 599) or
(isinstance(err, urllib.error.URLError) and
(("ConnectionRefusedError" in err.reason) or
("TimeoutError" in err.reason) or
("EOFError" in err.reason))) or
n in captured_errnos):
if not support.verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except nntplib.NNTPTemporaryError as err:
if support.verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
except OSError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], OSError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise OSError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], OSError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
| 42.4
| 89
| 0.65898
|
import contextlib
import errno
import socket
import unittest
import sys
from .. import support
HOST = "localhost"
HOSTv4 = "127.0.0.1"
HOSTv6 = "::1"
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
with socket.socket(family, socktype) as tempsock:
port = bind_port(tempsock)
del tempsock
return port
def bind_port(sock, host=HOST):
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise support.TestFailed("tests should never set the "
"SO_REUSEADDR socket option on "
"TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise support.TestFailed("tests should never set the "
"SO_REUSEPORT socket option on "
"TCP/IP sockets!")
except OSError:
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def bind_unix_socket(sock, addr):
assert sock.family == socket.AF_UNIX
try:
sock.bind(addr)
except PermissionError:
sock.close()
raise unittest.SkipTest('cannot bind AF_UNIX sockets')
def _is_ipv6_enabled():
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
_bind_nix_socket_error = None
def skip_unless_bind_unix_socket(test):
if not hasattr(socket, 'AF_UNIX'):
return unittest.skip('No UNIX Sockets')(test)
global _bind_nix_socket_error
if _bind_nix_socket_error is None:
from test.support import TESTFN, unlink
path = TESTFN + "can_bind_unix_socket"
with socket.socket(socket.AF_UNIX) as sock:
try:
sock.bind(path)
_bind_nix_socket_error = False
except OSError as e:
_bind_nix_socket_error = e
finally:
unlink(path)
if _bind_nix_socket_error:
msg = 'Requires a functional unix bind(): %s' % _bind_nix_socket_error
return unittest.skip(msg)(test)
else:
return test
def get_socket_conn_refused_errs():
errors = [errno.ECONNREFUSED]
if hasattr(errno, 'ENETUNREACH'):
# On Solaris, ENETUNREACH is returned sometimes instead of ECONNREFUSED
errors.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
errors.append(errno.EADDRNOTAVAIL)
if hasattr(errno, 'EHOSTUNREACH'):
# bpo-37583: The destination host cannot be reached
errors.append(errno.EHOSTUNREACH)
if not IPV6_ENABLED:
errors.append(errno.EAFNOSUPPORT)
return errors
_NOT_SET = object()
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=_NOT_SET, errnos=()):
import nntplib
import urllib.error
if timeout is _NOT_SET:
timeout = support.INTERNET_TIMEOUT
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
# socket.create_connection() fails randomly with
# EADDRNOTAVAIL on Travis CI.
('EADDRNOTAVAIL', 99),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = support.ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
(isinstance(err, urllib.error.HTTPError) and
500 <= err.code <= 599) or
(isinstance(err, urllib.error.URLError) and
(("ConnectionRefusedError" in err.reason) or
("TimeoutError" in err.reason) or
("EOFError" in err.reason))) or
n in captured_errnos):
if not support.verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except nntplib.NNTPTemporaryError as err:
if support.verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
except OSError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], OSError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise OSError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], OSError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
| true
| true
|
f70a000037d4adeeded177205c3e2f23a6c16f71
| 421
|
py
|
Python
|
jassen/django/project/blog/migrations/0003_auto_20180507_0734.py
|
cabilangan112/intern-drf-blog
|
b2d6c7a4af1316b2c7ce38547bd9df99b4f3e8b9
|
[
"MIT"
] | null | null | null |
jassen/django/project/blog/migrations/0003_auto_20180507_0734.py
|
cabilangan112/intern-drf-blog
|
b2d6c7a4af1316b2c7ce38547bd9df99b4f3e8b9
|
[
"MIT"
] | null | null | null |
jassen/django/project/blog/migrations/0003_auto_20180507_0734.py
|
cabilangan112/intern-drf-blog
|
b2d6c7a4af1316b2c7ce38547bd9df99b4f3e8b9
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.4 on 2018-05-07 07:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20180507_0653'),
]
operations = [
migrations.AlterField(
model_name='post',
name='banner_photo',
field=models.ImageField(upload_to='static/media', verbose_name='Image'),
),
]
| 22.157895
| 84
| 0.612827
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20180507_0653'),
]
operations = [
migrations.AlterField(
model_name='post',
name='banner_photo',
field=models.ImageField(upload_to='static/media', verbose_name='Image'),
),
]
| true
| true
|
f70a00bd04dc577a9c2437547c31ffdaa0260ef1
| 5,394
|
py
|
Python
|
ibmsecurity/isam/aac/server_connections/ws.py
|
sandermey/ibmsecurity
|
92ba7828260e96a6a323f4ac3830bfa43ee8dd7e
|
[
"Apache-2.0"
] | null | null | null |
ibmsecurity/isam/aac/server_connections/ws.py
|
sandermey/ibmsecurity
|
92ba7828260e96a6a323f4ac3830bfa43ee8dd7e
|
[
"Apache-2.0"
] | null | null | null |
ibmsecurity/isam/aac/server_connections/ws.py
|
sandermey/ibmsecurity
|
92ba7828260e96a6a323f4ac3830bfa43ee8dd7e
|
[
"Apache-2.0"
] | null | null | null |
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get_all(isamAppliance, check_mode=False, force=False):
"""
Retrieving a list of all Web Service connections
"""
return isamAppliance.invoke_get("Retrieving a list of all Web Service connections",
"/mga/server_connections/ws/v1")
def get(isamAppliance, name=None, check_mode=False, force=False):
"""
Retrieving a Web Service connection
"""
ret_obj = _get_id(isamAppliance, name=name)
id = ret_obj['data']
if id == {}:
return isamAppliance.create_return_object()
else:
return isamAppliance.invoke_get("Retrieving a Web Service connection",
"/mga/server_connections/ws/{0}/v1".format(id))
def set(isamAppliance, name, connection, description='', locked=False, servers=None,
check_mode=False, force=False):
"""
Creating or Modifying a Web Service connection
"""
if _check_exists(isamAppliance, name=name) is False:
# Force the add - we already know connection does not exist
return add(isamAppliance, name, connection, description, locked, servers, check_mode, True)
else:
# Update request
return update(isamAppliance, connection, description, locked, servers, name, None,
check_mode, force)
def add(isamAppliance, name, connection, description='', locked=False, servers=None,
check_mode=False, force=False):
"""
Creating a Web Service connection
"""
# warnings = []
# if isamAppliance.facts["version"] < "9.0.2.1":
# warnings.append(
# "Appliance is at version: {0}. Enabled server connection type (ws) not supported unless at least 9.0.2.1. Ignoring value.".format(
# isamAppliance.facts["version"]))
# return isamAppliance.create_return_object(warnings=warnings)
if force is True or _check_exists(isamAppliance, name=name) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post(
"Creating a Web Service connection",
"/mga/server_connections/ws/v1",
_create_json(name=name, description=description, locked=locked, servers=servers,
connection=connection))
return isamAppliance.create_return_object()
def delete(isamAppliance, name=None, check_mode=False, force=False):
"""
Deleting a Web Service connection
"""
if force is True or _check_exists(isamAppliance, name=name) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
ret_obj = _get_id(isamAppliance, name=name)
id = ret_obj['data']
return isamAppliance.invoke_delete(
"Deleting a Web Service connection",
"/mga/server_connections/ws/{0}/v1".format(id))
return isamAppliance.create_return_object()
def update(isamAppliance, connection, description='', locked=False, servers=None, name=None,
new_name=None, check_mode=False, force=False):
"""
Modifying a Web Service connection
Use new_name to rename the connection, cannot compare password so update will take place everytime
"""
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = _create_json(name=name, description=description, locked=locked, servers=servers,
connection=connection)
if new_name is not None: # Rename condition
json_data['name'] = new_name
return isamAppliance.invoke_put(
"Modifying a Web Service connection",
"/mga/server_connections/ws/{0}/v1".format(id), json_data)
def _create_json(name, description, locked, servers, connection):
"""
Create a JSON to be used for the REST API call
"""
json = {
"connection": connection,
"type": "ws",
"name": name,
"description": description,
"locked": locked
}
# servers is optional
if servers is not None:
json['servers'] = servers
return json
def _get_id(isamAppliance, name):
"""
Retrieve UUID for named Web Service connection
"""
ret_obj = get_all(isamAppliance)
ret_obj_new = isamAppliance.create_return_object()
for obj in ret_obj['data']:
if obj['name'] == name:
ret_obj_new['data'] = obj['uuid']
return ret_obj_new
def _check_exists(isamAppliance, name=None, id=None):
"""
Check if Web Service connection already exists
"""
ret_obj = get_all(isamAppliance)
for obj in ret_obj['data']:
if (name is not None and obj['name'] == name) or (id is not None and obj['uuid'] == id):
return True
return False
def compare(isamAppliance1, isamAppliance2):
"""
Compare Web Service connections between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
for obj in ret_obj1['data']:
del obj['uuid']
for obj in ret_obj2['data']:
del obj['uuid']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['uuid'])
| 33.092025
| 143
| 0.643493
|
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get_all(isamAppliance, check_mode=False, force=False):
return isamAppliance.invoke_get("Retrieving a list of all Web Service connections",
"/mga/server_connections/ws/v1")
def get(isamAppliance, name=None, check_mode=False, force=False):
ret_obj = _get_id(isamAppliance, name=name)
id = ret_obj['data']
if id == {}:
return isamAppliance.create_return_object()
else:
return isamAppliance.invoke_get("Retrieving a Web Service connection",
"/mga/server_connections/ws/{0}/v1".format(id))
def set(isamAppliance, name, connection, description='', locked=False, servers=None,
check_mode=False, force=False):
if _check_exists(isamAppliance, name=name) is False:
return add(isamAppliance, name, connection, description, locked, servers, check_mode, True)
else:
return update(isamAppliance, connection, description, locked, servers, name, None,
check_mode, force)
def add(isamAppliance, name, connection, description='', locked=False, servers=None,
check_mode=False, force=False):
if force is True or _check_exists(isamAppliance, name=name) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post(
"Creating a Web Service connection",
"/mga/server_connections/ws/v1",
_create_json(name=name, description=description, locked=locked, servers=servers,
connection=connection))
return isamAppliance.create_return_object()
def delete(isamAppliance, name=None, check_mode=False, force=False):
if force is True or _check_exists(isamAppliance, name=name) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
ret_obj = _get_id(isamAppliance, name=name)
id = ret_obj['data']
return isamAppliance.invoke_delete(
"Deleting a Web Service connection",
"/mga/server_connections/ws/{0}/v1".format(id))
return isamAppliance.create_return_object()
def update(isamAppliance, connection, description='', locked=False, servers=None, name=None,
new_name=None, check_mode=False, force=False):
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = _create_json(name=name, description=description, locked=locked, servers=servers,
connection=connection)
if new_name is not None: json_data['name'] = new_name
return isamAppliance.invoke_put(
"Modifying a Web Service connection",
"/mga/server_connections/ws/{0}/v1".format(id), json_data)
def _create_json(name, description, locked, servers, connection):
json = {
"connection": connection,
"type": "ws",
"name": name,
"description": description,
"locked": locked
}
if servers is not None:
json['servers'] = servers
return json
def _get_id(isamAppliance, name):
ret_obj = get_all(isamAppliance)
ret_obj_new = isamAppliance.create_return_object()
for obj in ret_obj['data']:
if obj['name'] == name:
ret_obj_new['data'] = obj['uuid']
return ret_obj_new
def _check_exists(isamAppliance, name=None, id=None):
ret_obj = get_all(isamAppliance)
for obj in ret_obj['data']:
if (name is not None and obj['name'] == name) or (id is not None and obj['uuid'] == id):
return True
return False
def compare(isamAppliance1, isamAppliance2):
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
for obj in ret_obj1['data']:
del obj['uuid']
for obj in ret_obj2['data']:
del obj['uuid']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['uuid'])
| true
| true
|
f70a02f2e0f7ececb1a48f2b6248fe11c810141e
| 3,714
|
py
|
Python
|
imix/data/infocomp/ocrvqa_infocpler.py
|
linxi1158/iMIX
|
af87a17275f02c94932bb2e29f132a84db812002
|
[
"Apache-2.0"
] | 23
|
2021-06-26T08:45:19.000Z
|
2022-03-02T02:13:33.000Z
|
imix/data/infocomp/ocrvqa_infocpler.py
|
XChuanLee/iMIX
|
99898de97ef8b45462ca1d6bf2542e423a73d769
|
[
"Apache-2.0"
] | null | null | null |
imix/data/infocomp/ocrvqa_infocpler.py
|
XChuanLee/iMIX
|
99898de97ef8b45462ca1d6bf2542e423a73d769
|
[
"Apache-2.0"
] | 9
|
2021-06-10T02:36:20.000Z
|
2021-11-09T02:18:16.000Z
|
import torch
from ..utils.stream import ItemFeature
from .base_infocpler import BaseInfoCpler
class OCRVQAInfoCpler(BaseInfoCpler):
def __init__(self, cfg):
super().__init__(cfg)
def complete_info(self, item_feature: ItemFeature):
tokens = self.tokenizer.tokenize(item_feature.question.strip())
tokens = self.tokenizer.get_limited_tokens(tokens, self.max_seq_length - 2)
tokens, input_lm_label_ids = self.tokenizer.random_mask_tokens(tokens, self.word_mask_ratio)
tokens = [self._CLS_TOKEN] + tokens + [self._SEP_TOEKN]
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(tokens)
input_segment = [0] * len(tokens)
input_lm_label_ids = [-1] * len(tokens)
# while len(input_ids) < self.max_seq_length:
# input_ids.append(int(self.pad_idx))
# input_mask.append(0)
# input_segment.append(0)
# input_lm_label_ids.append(-1)
to_extd_length = self.max_seq_length - len(input_ids)
self.info_extend(to_extd_length, (input_ids, int(self.pad_idx)), (input_mask, 0), (input_segment, 0),
(input_lm_label_ids, -1))
# ocr vectors
ocr_tokens = self.tokenizer.get_limited_tokens(item_feature.ocr_tokens, self.max_ocr_length)
item_feature.ocr_vectors_glove = self.get_tokens_glove_vectors(ocr_tokens)
item_feature.ocr_vectors_order = self.get_tokens_order_vectors(ocr_tokens)
item_feature.ocr_vectors_phoc = self.get_tokens_phoc_vectors(ocr_tokens)
item_feature.ocr_vectors_fasttext = self.get_tokens_fasttext_vectors(ocr_tokens)
# ocr features and bboxes
features_ocr = torch.zeros(
(self.max_ocr_length,
item_feature.features_ocr.shape[1] if item_feature.features_ocr is not None else 2048),
dtype=torch.float)
bbox_ocr_normalized = torch.zeros(
(self.max_ocr_length,
item_feature.ocr_normalized_boxes.shape[1] if item_feature.ocr_normalized_boxes is not None else 4),
dtype=torch.float)
if item_feature.features_ocr is not None:
limit = min(self.max_ocr_length, len(item_feature.features_ocr))
features_ocr[:limit] = torch.tensor(item_feature.features_ocr[:limit])
bbox_ocr_normalized[:limit] = torch.tensor(item_feature.ocr_normalized_boxes[:limit])
item_feature.features_ocr = features_ocr
item_feature.ocr_normalized_boxes = bbox_ocr_normalized
# features and bboxes
img_h = item_feature.image_height
img_w = item_feature.image_width
item_feature.bbox = self._get_bbox_from_normalized(item_feature.obj_normalized_boxes, img_h, img_w)
item_feature.bbox_normalized = item_feature.obj_normalized_boxes
item_feature.bbox_ocr = self._get_bbox_from_normalized(item_feature.ocr_normalized_boxes, img_h, img_w)
item_feature.bbox_ocr_normalized = item_feature.ocr_normalized_boxes
item_feature.input_ids = torch.tensor(input_ids, dtype=torch.long)
item_feature.input_mask = torch.tensor(input_mask, dtype=torch.int)
item_feature.input_segment = torch.tensor(input_segment, dtype=torch.int)
item_feature.input_lm_label_ids = torch.tensor(input_lm_label_ids, dtype=torch.long)
item_feature.qa_ids = [self.qa_ans2id[ans] for ans in item_feature.answers if ans in self.qa_ans2id]
# item_feature.qa_allids = [self.qa_ans2id[ans] for ans in item_feature.all_answers if ans in self.qa_ans2id]
item_feature.answers_scores = self.compute_answers_scores(torch.Tensor(item_feature.qa_ids))
return item_feature
| 53.826087
| 117
| 0.716478
|
import torch
from ..utils.stream import ItemFeature
from .base_infocpler import BaseInfoCpler
class OCRVQAInfoCpler(BaseInfoCpler):
def __init__(self, cfg):
super().__init__(cfg)
def complete_info(self, item_feature: ItemFeature):
tokens = self.tokenizer.tokenize(item_feature.question.strip())
tokens = self.tokenizer.get_limited_tokens(tokens, self.max_seq_length - 2)
tokens, input_lm_label_ids = self.tokenizer.random_mask_tokens(tokens, self.word_mask_ratio)
tokens = [self._CLS_TOKEN] + tokens + [self._SEP_TOEKN]
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(tokens)
input_segment = [0] * len(tokens)
input_lm_label_ids = [-1] * len(tokens)
to_extd_length = self.max_seq_length - len(input_ids)
self.info_extend(to_extd_length, (input_ids, int(self.pad_idx)), (input_mask, 0), (input_segment, 0),
(input_lm_label_ids, -1))
ocr_tokens = self.tokenizer.get_limited_tokens(item_feature.ocr_tokens, self.max_ocr_length)
item_feature.ocr_vectors_glove = self.get_tokens_glove_vectors(ocr_tokens)
item_feature.ocr_vectors_order = self.get_tokens_order_vectors(ocr_tokens)
item_feature.ocr_vectors_phoc = self.get_tokens_phoc_vectors(ocr_tokens)
item_feature.ocr_vectors_fasttext = self.get_tokens_fasttext_vectors(ocr_tokens)
features_ocr = torch.zeros(
(self.max_ocr_length,
item_feature.features_ocr.shape[1] if item_feature.features_ocr is not None else 2048),
dtype=torch.float)
bbox_ocr_normalized = torch.zeros(
(self.max_ocr_length,
item_feature.ocr_normalized_boxes.shape[1] if item_feature.ocr_normalized_boxes is not None else 4),
dtype=torch.float)
if item_feature.features_ocr is not None:
limit = min(self.max_ocr_length, len(item_feature.features_ocr))
features_ocr[:limit] = torch.tensor(item_feature.features_ocr[:limit])
bbox_ocr_normalized[:limit] = torch.tensor(item_feature.ocr_normalized_boxes[:limit])
item_feature.features_ocr = features_ocr
item_feature.ocr_normalized_boxes = bbox_ocr_normalized
img_h = item_feature.image_height
img_w = item_feature.image_width
item_feature.bbox = self._get_bbox_from_normalized(item_feature.obj_normalized_boxes, img_h, img_w)
item_feature.bbox_normalized = item_feature.obj_normalized_boxes
item_feature.bbox_ocr = self._get_bbox_from_normalized(item_feature.ocr_normalized_boxes, img_h, img_w)
item_feature.bbox_ocr_normalized = item_feature.ocr_normalized_boxes
item_feature.input_ids = torch.tensor(input_ids, dtype=torch.long)
item_feature.input_mask = torch.tensor(input_mask, dtype=torch.int)
item_feature.input_segment = torch.tensor(input_segment, dtype=torch.int)
item_feature.input_lm_label_ids = torch.tensor(input_lm_label_ids, dtype=torch.long)
item_feature.qa_ids = [self.qa_ans2id[ans] for ans in item_feature.answers if ans in self.qa_ans2id]
item_feature.answers_scores = self.compute_answers_scores(torch.Tensor(item_feature.qa_ids))
return item_feature
| true
| true
|
f70a030b4c2a7491dbc7609927869cf77e179697
| 445
|
py
|
Python
|
scripts/item/consume_2434762.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/item/consume_2434762.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/item/consume_2434762.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Mini Black Heaven Mount Coupon | (2434762)
if sm.getSkillByItem() == 0:# Check whether item has an vehicleID stored, 0 if false.
sm.chat("An Error occurred whilst trying to find the mount.")
elif sm.hasSkill(sm.getSkillByItem()):
sm.chat("You already have the 'Mini Black Heaven' mount.")
else:
sm.consumeItem()
sm.giveSkill(sm.getSkillByItem())
sm.chat("Successfully added the 'Mini Black Heaven' mount.")
sm.dispose()
| 40.454545
| 86
| 0.707865
|
if sm.getSkillByItem() == 0: sm.chat("An Error occurred whilst trying to find the mount.")
elif sm.hasSkill(sm.getSkillByItem()):
sm.chat("You already have the 'Mini Black Heaven' mount.")
else:
sm.consumeItem()
sm.giveSkill(sm.getSkillByItem())
sm.chat("Successfully added the 'Mini Black Heaven' mount.")
sm.dispose()
| true
| true
|
f70a03d30736dd708147341c7dadfe0b222da100
| 618
|
py
|
Python
|
xgboost_ames_housing/test/conftest.py
|
qq2016/kubeflow_learning
|
930706686108f997aab42ccf2fe455dcf09a4afc
|
[
"Apache-2.0"
] | 1,165
|
2018-03-01T01:47:14.000Z
|
2022-03-31T08:35:00.000Z
|
xgboost_ames_housing/test/conftest.py
|
arki1/examples
|
c93b792d67c8c52bc91d4ccf5fbaead4e2324331
|
[
"Apache-2.0"
] | 929
|
2018-02-04T18:20:16.000Z
|
2022-03-31T18:20:43.000Z
|
xgboost_ames_housing/test/conftest.py
|
arki1/examples
|
c93b792d67c8c52bc91d4ccf5fbaead4e2324331
|
[
"Apache-2.0"
] | 687
|
2018-02-01T21:35:30.000Z
|
2022-03-29T07:47:47.000Z
|
import pytest
def pytest_addoption(parser):
parser.addoption(
"--master", action="store", default="", help="IP address of GKE master")
parser.addoption(
"--namespace", action="store", default="", help="namespace of server")
parser.addoption(
"--service", action="store", default="",
help="The name of the mnist K8s service")
@pytest.fixture
def master(request):
return request.config.getoption("--master")
@pytest.fixture
def namespace(request):
return request.config.getoption("--namespace")
@pytest.fixture
def service(request):
return request.config.getoption("--service")
| 24.72
| 78
| 0.697411
|
import pytest
def pytest_addoption(parser):
parser.addoption(
"--master", action="store", default="", help="IP address of GKE master")
parser.addoption(
"--namespace", action="store", default="", help="namespace of server")
parser.addoption(
"--service", action="store", default="",
help="The name of the mnist K8s service")
@pytest.fixture
def master(request):
return request.config.getoption("--master")
@pytest.fixture
def namespace(request):
return request.config.getoption("--namespace")
@pytest.fixture
def service(request):
return request.config.getoption("--service")
| true
| true
|
f70a051a93048684b0e2b78eec14871d90d388e0
| 13,037
|
py
|
Python
|
lte/gateway/python/integ_tests/s1aptests/test_attach_service_with_multi_pdns_and_bearers_mt_data.py
|
saurabhsoni88/magma
|
4236c9d8edb7bd203707ff7e861b1f7c12fb84c7
|
[
"BSD-3-Clause"
] | 1
|
2021-08-04T16:40:05.000Z
|
2021-08-04T16:40:05.000Z
|
lte/gateway/python/integ_tests/s1aptests/test_attach_service_with_multi_pdns_and_bearers_mt_data.py
|
saurabhsoni88/magma
|
4236c9d8edb7bd203707ff7e861b1f7c12fb84c7
|
[
"BSD-3-Clause"
] | 112
|
2020-09-03T06:41:43.000Z
|
2022-03-31T12:07:08.000Z
|
lte/gateway/python/integ_tests/s1aptests/test_attach_service_with_multi_pdns_and_bearers_mt_data.py
|
kkahrs/magma
|
73e666627dc28e0c492feab7321bb7d6dd433b09
|
[
"BSD-3-Clause"
] | 1
|
2021-05-26T03:41:46.000Z
|
2021-05-26T03:41:46.000Z
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import time
import gpp_types
import s1ap_types
import s1ap_wrapper
from integ_tests.s1aptests.s1ap_utils import SessionManagerUtil
import ipaddress
from lte.protos.policydb_pb2 import FlowMatch
class TestAttachServiceWithMultiPdnsAndBearersMtData(unittest.TestCase):
def setUp(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
self._sessionManager_util = SessionManagerUtil()
def tearDown(self):
self._s1ap_wrapper.cleanup()
def test_attach_service_with_multi_pdns_and_bearers_mt_data(self):
"""
Attach a UE + add secondary PDN
+ add 2 dedicated bearers + UE context release
+ trigger MT data + service request
+ PDN disconnect + detach
"""
self._s1ap_wrapper.configUEDevice(1)
req = self._s1ap_wrapper.ue_req
ue_id = req.ue_id
ips = []
# APN of the secondary PDN
ims = {
"apn_name": "ims", # APN-name
"qci": 5, # qci
"priority": 15, # priority
"pre_cap": 0, # preemption-capability
"pre_vul": 0, # preemption-vulnerability
"mbr_ul": 200000000, # MBR UL
"mbr_dl": 100000000, # MBR DL
}
# APN list to be configured
apn_list = [ims]
self._s1ap_wrapper.configAPN(
"IMSI" + "".join([str(i) for i in req.imsi]), apn_list
)
# UL Flow description #1
ulFlow1 = {
"ipv4_dst": "192.168.129.42", # IPv4 destination address
"tcp_dst_port": 5002, # TCP dest port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.UPLINK, # Direction
}
# UL Flow description #2
ulFlow2 = {
"ipv4_dst": "192.168.129.42", # IPv4 destination address
"tcp_dst_port": 5001, # TCP dest port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.UPLINK, # Direction
}
# UL Flow description #3
ulFlow3 = {
"ipv4_dst": "192.168.129.64", # IPv4 destination address
"tcp_dst_port": 5003, # TCP dest port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.UPLINK, # Direction
}
# UL Flow description #4
ulFlow4 = {
"ipv4_dst": "192.168.129.42", # IPv4 destination address
"tcp_dst_port": 5001, # TCP dest port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.UPLINK, # Direction
}
# DL Flow description #1
dlFlow1 = {
"ipv4_src": "192.168.129.42", # IPv4 source address
"tcp_src_port": 5001, # TCP source port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.DOWNLINK, # Direction
}
# DL Flow description #2
dlFlow2 = {
"ipv4_src": "192.168.129.64", # IPv4 source address
"tcp_src_port": 5002, # TCP source port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.DOWNLINK, # Direction
}
# DL Flow description #3
dlFlow3 = {
"ipv4_src": "192.168.129.64", # IPv4 source address
"tcp_src_port": 5003, # TCP source port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.DOWNLINK, # Direction
}
# DL Flow description #4
dlFlow4 = {
"ipv4_src": "192.168.129.42", # IPv4 source address
"tcp_src_port": 5001, # TCP source port
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.DOWNLINK, # Direction
}
# Flow lists to be configured
flow_list1 = [
ulFlow1,
ulFlow2,
ulFlow3,
dlFlow1,
dlFlow2,
dlFlow3,
]
flow_list2 = [
ulFlow4,
dlFlow4,
]
# QoS
qos1 = {
"qci": 1, # qci value [1 to 9]
"priority": 1, # Range [0-255]
"max_req_bw_ul": 10000000, # MAX bw Uplink
"max_req_bw_dl": 15000000, # MAX bw Downlink
"gbr_ul": 1000000, # GBR Uplink
"gbr_dl": 2000000, # GBR Downlink
"arp_prio": 1, # ARP priority
"pre_cap": 1, # pre-emption capability
"pre_vul": 1, # pre-emption vulnerability
}
qos2 = {
"qci": 2, # qci value [1 to 9]
"priority": 5, # Range [0-255]
"max_req_bw_ul": 10000000, # MAX bw Uplink
"max_req_bw_dl": 15000000, # MAX bw Downlink
"gbr_ul": 1000000, # GBR Uplink
"gbr_dl": 2000000, # GBR Downlink
"arp_prio": 1, # ARP priority
"pre_cap": 1, # pre-emption capability
"pre_vul": 1, # pre-emption vulnerability
}
policy_id1 = "internet"
policy_id2 = "ims"
print(
"************************* Running End to End attach for UE id ",
ue_id,
)
# Now actually complete the attach
attach = self._s1ap_wrapper._s1_util.attach(
ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
addr = attach.esmInfo.pAddr.addrInfo
default_ip = ipaddress.ip_address(bytes(addr[:4]))
ips.append(default_ip)
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
# Delay to ensure S1APTester sends attach complete before sending UE
# context release
print("Sleeping for 5 seconds")
time.sleep(5)
# Add dedicated bearer for default bearer 5
print(
"********************** Adding dedicated bearer to magma.ipv4"
" PDN"
)
print(
"********************** Sending RAR for IMSI",
"".join([str(i) for i in req.imsi]),
)
self._sessionManager_util.create_ReAuthRequest(
"IMSI" + "".join([str(i) for i in req.imsi]),
policy_id1,
flow_list1,
qos1,
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value
)
act_ded_ber_req_oai_apn = response.cast(
s1ap_types.UeActDedBearCtxtReq_t
)
self._s1ap_wrapper.sendActDedicatedBearerAccept(
req.ue_id, act_ded_ber_req_oai_apn.bearerId
)
print("Sleeping for 5 seconds")
time.sleep(5)
# Send PDN Connectivity Request
apn = "ims"
self._s1ap_wrapper.sendPdnConnectivityReq(ue_id, apn)
# Receive PDN CONN RSP/Activate default EPS bearer context request
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_PDN_CONN_RSP_IND.value
)
act_def_bearer_req = response.cast(s1ap_types.uePdnConRsp_t)
addr = act_def_bearer_req.m.pdnInfo.pAddr.addrInfo
sec_ip = ipaddress.ip_address(bytes(addr[:4]))
ips.append(sec_ip)
print(
"********************** Sending Activate default EPS bearer "
"context accept for UE id ",
ue_id,
)
print("Sleeping for 5 seconds")
time.sleep(5)
# Add dedicated bearer to 2nd PDN
print("********************** Adding dedicated bearer to ims PDN")
print(
"********************** Sending RAR for IMSI",
"".join([str(i) for i in req.imsi]),
)
self._sessionManager_util.create_ReAuthRequest(
"IMSI" + "".join([str(i) for i in req.imsi]),
policy_id2,
flow_list2,
qos2,
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value
)
act_ded_ber_req_ims_apn = response.cast(
s1ap_types.UeActDedBearCtxtReq_t
)
self._s1ap_wrapper.sendActDedicatedBearerAccept(
req.ue_id, act_ded_ber_req_ims_apn.bearerId
)
print(
"************* Added dedicated bearer",
act_ded_ber_req_ims_apn.bearerId,
)
print("Sleeping for 5 seconds")
time.sleep(5)
dl_flow_rules = {
default_ip: [flow_list1],
sec_ip: [flow_list2],
}
# 1 UL flow is created per bearer
num_ul_flows = 4
# Verify if flow rules are created
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows, dl_flow_rules
)
print("*********** Moving UE to idle mode")
print(
"************* Sending UE context release request ",
"for UE id ",
ue_id,
)
# Send UE context release request to move UE to idle mode
rel_req = s1ap_types.ueCntxtRelReq_t()
rel_req.ue_Id = ue_id
rel_req.cause.causeVal = (
gpp_types.CauseRadioNetwork.USER_INACTIVITY.value
)
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_CNTXT_REL_REQUEST, rel_req
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_CTX_REL_IND.value
)
# Verify if paging flow rules are created
ip_list = [default_ip, sec_ip]
self._s1ap_wrapper.s1_util.verify_paging_flow_rules(ip_list)
print(
"************************* Running UE downlink (UDP) for UE id ",
ue_id,
)
with self._s1ap_wrapper.configDownlinkTest(
req, duration=1, is_udp=True
) as test:
response = self._s1ap_wrapper.s1_util.get_response()
self.assertTrue(response, s1ap_types.tfwCmd.UE_PAGING_IND.value)
# Send service request to reconnect UE
print(
"************************* Sending Service request for UE id ",
ue_id,
)
ser_req = s1ap_types.ueserviceReq_t()
ser_req.ue_Id = ue_id
ser_req.ueMtmsi = s1ap_types.ueMtmsi_t()
ser_req.ueMtmsi.pres = False
ser_req.rrcCause = s1ap_types.Rrc_Cause.TFW_MT_ACCESS.value
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_SERVICE_REQUEST, ser_req
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value
)
test.verify()
print("Sleeping for 5 seconds")
time.sleep(5)
# Verify if flow rules are created
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows, dl_flow_rules
)
pdn_disconnect_req = s1ap_types.uepdnDisconnectReq_t()
pdn_disconnect_req.ue_Id = ue_id
pdn_disconnect_req.epsBearerId = (
act_def_bearer_req.m.pdnInfo.epsBearerId
)
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_PDN_DISCONNECT_REQ, pdn_disconnect_req
)
# Receive UE_DEACTIVATE_BER_REQ
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_DEACTIVATE_BER_REQ.value
)
print(
"******************* Received deactivate eps bearer context"
" request"
)
# Send DeactDedicatedBearerAccept
deactv_bearer_req = response.cast(s1ap_types.UeDeActvBearCtxtReq_t)
self._s1ap_wrapper.sendDeactDedicatedBearerAccept(
ue_id, deactv_bearer_req.bearerId
)
print("Sleeping for 5 seconds")
time.sleep(5)
print("************************* Running UE detach for UE id ", ue_id)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
ue_id, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, True
)
if __name__ == "__main__":
unittest.main()
| 34.217848
| 79
| 0.571527
|
import unittest
import time
import gpp_types
import s1ap_types
import s1ap_wrapper
from integ_tests.s1aptests.s1ap_utils import SessionManagerUtil
import ipaddress
from lte.protos.policydb_pb2 import FlowMatch
class TestAttachServiceWithMultiPdnsAndBearersMtData(unittest.TestCase):
def setUp(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
self._sessionManager_util = SessionManagerUtil()
def tearDown(self):
self._s1ap_wrapper.cleanup()
def test_attach_service_with_multi_pdns_and_bearers_mt_data(self):
self._s1ap_wrapper.configUEDevice(1)
req = self._s1ap_wrapper.ue_req
ue_id = req.ue_id
ips = []
ims = {
"apn_name": "ims", "qci": 5, "priority": 15, "pre_cap": 0, "pre_vul": 0, "mbr_ul": 200000000, "mbr_dl": 100000000, }
apn_list = [ims]
self._s1ap_wrapper.configAPN(
"IMSI" + "".join([str(i) for i in req.imsi]), apn_list
)
ulFlow1 = {
"ipv4_dst": "192.168.129.42", "tcp_dst_port": 5002, "ip_proto": FlowMatch.IPPROTO_TCP, "direction": FlowMatch.UPLINK, }
ulFlow2 = {
"ipv4_dst": "192.168.129.42", "tcp_dst_port": 5001, "ip_proto": FlowMatch.IPPROTO_TCP, "direction": FlowMatch.UPLINK, }
ulFlow3 = {
"ipv4_dst": "192.168.129.64", "tcp_dst_port": 5003, "ip_proto": FlowMatch.IPPROTO_TCP, "direction": FlowMatch.UPLINK, }
ulFlow4 = {
"ipv4_dst": "192.168.129.42", "tcp_dst_port": 5001, "ip_proto": FlowMatch.IPPROTO_TCP, "direction": FlowMatch.UPLINK, }
dlFlow1 = {
"ipv4_src": "192.168.129.42", "tcp_src_port": 5001, "ip_proto": FlowMatch.IPPROTO_TCP, "direction": FlowMatch.DOWNLINK, }
dlFlow2 = {
"ipv4_src": "192.168.129.64", "tcp_src_port": 5002, "ip_proto": FlowMatch.IPPROTO_TCP, "direction": FlowMatch.DOWNLINK, }
dlFlow3 = {
"ipv4_src": "192.168.129.64", "tcp_src_port": 5003, "ip_proto": FlowMatch.IPPROTO_TCP, "direction": FlowMatch.DOWNLINK, }
dlFlow4 = {
"ipv4_src": "192.168.129.42", "tcp_src_port": 5001, "ip_proto": FlowMatch.IPPROTO_TCP, "direction": FlowMatch.DOWNLINK, }
flow_list1 = [
ulFlow1,
ulFlow2,
ulFlow3,
dlFlow1,
dlFlow2,
dlFlow3,
]
flow_list2 = [
ulFlow4,
dlFlow4,
]
qos1 = {
"qci": 1, "priority": 1, "max_req_bw_ul": 10000000, "max_req_bw_dl": 15000000, "gbr_ul": 1000000, "gbr_dl": 2000000, "arp_prio": 1, "pre_cap": 1, "pre_vul": 1, }
qos2 = {
"qci": 2, "priority": 5, "max_req_bw_ul": 10000000, "max_req_bw_dl": 15000000, "gbr_ul": 1000000, "gbr_dl": 2000000, "arp_prio": 1, "pre_cap": 1, "pre_vul": 1, }
policy_id1 = "internet"
policy_id2 = "ims"
print(
"************************* Running End to End attach for UE id ",
ue_id,
)
attach = self._s1ap_wrapper._s1_util.attach(
ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
addr = attach.esmInfo.pAddr.addrInfo
default_ip = ipaddress.ip_address(bytes(addr[:4]))
ips.append(default_ip)
self._s1ap_wrapper._s1_util.receive_emm_info()
print("Sleeping for 5 seconds")
time.sleep(5)
print(
"********************** Adding dedicated bearer to magma.ipv4"
" PDN"
)
print(
"********************** Sending RAR for IMSI",
"".join([str(i) for i in req.imsi]),
)
self._sessionManager_util.create_ReAuthRequest(
"IMSI" + "".join([str(i) for i in req.imsi]),
policy_id1,
flow_list1,
qos1,
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value
)
act_ded_ber_req_oai_apn = response.cast(
s1ap_types.UeActDedBearCtxtReq_t
)
self._s1ap_wrapper.sendActDedicatedBearerAccept(
req.ue_id, act_ded_ber_req_oai_apn.bearerId
)
print("Sleeping for 5 seconds")
time.sleep(5)
apn = "ims"
self._s1ap_wrapper.sendPdnConnectivityReq(ue_id, apn)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_PDN_CONN_RSP_IND.value
)
act_def_bearer_req = response.cast(s1ap_types.uePdnConRsp_t)
addr = act_def_bearer_req.m.pdnInfo.pAddr.addrInfo
sec_ip = ipaddress.ip_address(bytes(addr[:4]))
ips.append(sec_ip)
print(
"********************** Sending Activate default EPS bearer "
"context accept for UE id ",
ue_id,
)
print("Sleeping for 5 seconds")
time.sleep(5)
print("********************** Adding dedicated bearer to ims PDN")
print(
"********************** Sending RAR for IMSI",
"".join([str(i) for i in req.imsi]),
)
self._sessionManager_util.create_ReAuthRequest(
"IMSI" + "".join([str(i) for i in req.imsi]),
policy_id2,
flow_list2,
qos2,
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value
)
act_ded_ber_req_ims_apn = response.cast(
s1ap_types.UeActDedBearCtxtReq_t
)
self._s1ap_wrapper.sendActDedicatedBearerAccept(
req.ue_id, act_ded_ber_req_ims_apn.bearerId
)
print(
"************* Added dedicated bearer",
act_ded_ber_req_ims_apn.bearerId,
)
print("Sleeping for 5 seconds")
time.sleep(5)
dl_flow_rules = {
default_ip: [flow_list1],
sec_ip: [flow_list2],
}
num_ul_flows = 4
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows, dl_flow_rules
)
print("*********** Moving UE to idle mode")
print(
"************* Sending UE context release request ",
"for UE id ",
ue_id,
)
rel_req = s1ap_types.ueCntxtRelReq_t()
rel_req.ue_Id = ue_id
rel_req.cause.causeVal = (
gpp_types.CauseRadioNetwork.USER_INACTIVITY.value
)
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_CNTXT_REL_REQUEST, rel_req
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_CTX_REL_IND.value
)
ip_list = [default_ip, sec_ip]
self._s1ap_wrapper.s1_util.verify_paging_flow_rules(ip_list)
print(
"************************* Running UE downlink (UDP) for UE id ",
ue_id,
)
with self._s1ap_wrapper.configDownlinkTest(
req, duration=1, is_udp=True
) as test:
response = self._s1ap_wrapper.s1_util.get_response()
self.assertTrue(response, s1ap_types.tfwCmd.UE_PAGING_IND.value)
print(
"************************* Sending Service request for UE id ",
ue_id,
)
ser_req = s1ap_types.ueserviceReq_t()
ser_req.ue_Id = ue_id
ser_req.ueMtmsi = s1ap_types.ueMtmsi_t()
ser_req.ueMtmsi.pres = False
ser_req.rrcCause = s1ap_types.Rrc_Cause.TFW_MT_ACCESS.value
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_SERVICE_REQUEST, ser_req
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value
)
test.verify()
print("Sleeping for 5 seconds")
time.sleep(5)
self._s1ap_wrapper.s1_util.verify_flow_rules(
num_ul_flows, dl_flow_rules
)
pdn_disconnect_req = s1ap_types.uepdnDisconnectReq_t()
pdn_disconnect_req.ue_Id = ue_id
pdn_disconnect_req.epsBearerId = (
act_def_bearer_req.m.pdnInfo.epsBearerId
)
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_PDN_DISCONNECT_REQ, pdn_disconnect_req
)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_DEACTIVATE_BER_REQ.value
)
print(
"******************* Received deactivate eps bearer context"
" request"
)
deactv_bearer_req = response.cast(s1ap_types.UeDeActvBearCtxtReq_t)
self._s1ap_wrapper.sendDeactDedicatedBearerAccept(
ue_id, deactv_bearer_req.bearerId
)
print("Sleeping for 5 seconds")
time.sleep(5)
print("************************* Running UE detach for UE id ", ue_id)
self._s1ap_wrapper.s1_util.detach(
ue_id, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, True
)
if __name__ == "__main__":
unittest.main()
| true
| true
|
f70a05c6a865eba6a06188b0d8d211a463686254
| 420
|
py
|
Python
|
xonsh/parser.py
|
caputomarcos/xonsh
|
9ca29d8f2aad9f026af23b83fb7263d0fb702b1a
|
[
"BSD-2-Clause-FreeBSD"
] | 7,986
|
2015-11-07T11:59:21.000Z
|
2022-03-27T17:20:49.000Z
|
xonsh/parser.py
|
caputomarcos/xonsh
|
9ca29d8f2aad9f026af23b83fb7263d0fb702b1a
|
[
"BSD-2-Clause-FreeBSD"
] | 161
|
2016-05-09T09:53:48.000Z
|
2022-02-22T04:18:59.000Z
|
xonsh/parser.py
|
caputomarcos/xonsh
|
9ca29d8f2aad9f026af23b83fb7263d0fb702b1a
|
[
"BSD-2-Clause-FreeBSD"
] | 539
|
2016-04-05T05:39:58.000Z
|
2022-03-23T20:47:52.000Z
|
# -*- coding: utf-8 -*-
"""Implements the xonsh parser."""
from xonsh.lazyasd import lazyobject
from xonsh.platform import PYTHON_VERSION_INFO
@lazyobject
def Parser():
if PYTHON_VERSION_INFO > (3, 6):
from xonsh.parsers.v36 import Parser as p
elif PYTHON_VERSION_INFO > (3, 5):
from xonsh.parsers.v35 import Parser as p
else:
from xonsh.parsers.v34 import Parser as p
return p
| 26.25
| 49
| 0.685714
|
from xonsh.lazyasd import lazyobject
from xonsh.platform import PYTHON_VERSION_INFO
@lazyobject
def Parser():
if PYTHON_VERSION_INFO > (3, 6):
from xonsh.parsers.v36 import Parser as p
elif PYTHON_VERSION_INFO > (3, 5):
from xonsh.parsers.v35 import Parser as p
else:
from xonsh.parsers.v34 import Parser as p
return p
| true
| true
|
f70a066635c0d20ddf6993ebb8b958bc79edcc42
| 619
|
py
|
Python
|
01-Lists_as_stacks_and_queues/balanced_parentheses.py
|
nmoskova/Python-advanced
|
007f496e868aa151e39d79446b055e76ffb2db95
|
[
"MIT"
] | null | null | null |
01-Lists_as_stacks_and_queues/balanced_parentheses.py
|
nmoskova/Python-advanced
|
007f496e868aa151e39d79446b055e76ffb2db95
|
[
"MIT"
] | null | null | null |
01-Lists_as_stacks_and_queues/balanced_parentheses.py
|
nmoskova/Python-advanced
|
007f496e868aa151e39d79446b055e76ffb2db95
|
[
"MIT"
] | null | null | null |
parentheses = input()
stack_opening_brackets = []
pairs = {
'(': ')',
'{': '}',
'[': ']'
}
balanced = True
for el in parentheses:
if el in "({[":
stack_opening_brackets.append(el)
else:
if len(stack_opening_brackets) > 0:
opening_bracket = stack_opening_brackets.pop()
closing_bracket = el
if pairs[opening_bracket] != closing_bracket:
balanced = False
break
else:
balanced = False
break
if balanced and len(stack_opening_brackets) == 0:
print("YES")
else:
print("NO")
| 19.34375
| 58
| 0.537964
|
parentheses = input()
stack_opening_brackets = []
pairs = {
'(': ')',
'{': '}',
'[': ']'
}
balanced = True
for el in parentheses:
if el in "({[":
stack_opening_brackets.append(el)
else:
if len(stack_opening_brackets) > 0:
opening_bracket = stack_opening_brackets.pop()
closing_bracket = el
if pairs[opening_bracket] != closing_bracket:
balanced = False
break
else:
balanced = False
break
if balanced and len(stack_opening_brackets) == 0:
print("YES")
else:
print("NO")
| true
| true
|
f70a06c82a572505b28b82a34b5eb08e9de1b593
| 1,121
|
py
|
Python
|
VOTA_Control/VOTAScopeHW/daq_do/daq_do_hw.py
|
fullerene12/VOTA
|
3a5cfc1e210ac7ea274537a8d189b54660416599
|
[
"MIT"
] | null | null | null |
VOTA_Control/VOTAScopeHW/daq_do/daq_do_hw.py
|
fullerene12/VOTA
|
3a5cfc1e210ac7ea274537a8d189b54660416599
|
[
"MIT"
] | null | null | null |
VOTA_Control/VOTAScopeHW/daq_do/daq_do_hw.py
|
fullerene12/VOTA
|
3a5cfc1e210ac7ea274537a8d189b54660416599
|
[
"MIT"
] | 1
|
2021-08-01T22:39:18.000Z
|
2021-08-01T22:39:18.000Z
|
'''
Created on Aug 9, 2017
@author: Hao Wu
'''
from ScopeFoundry import HardwareComponent
from .daq_do_dev import DAQSimpleDOTask
from PyDAQmx import *
import numpy as np
import time
class DAQdoHW(HardwareComponent):
'''
Hardware Component Class for receiving AI input for breathing, licking etc
'''
name='daq_do'
def setup(self,channels='Dev2/port0/line2'):
'''
add settings for analog input eventsss
'''
self.settings.New(name='channels',initial=channels,dtype=str,ro=False)
self.settings.New(name='on',initial=False,dtype=bool,ro=False)
def connect(self):
self._dev=DAQSimpleDOTask(self.settings.channels.value())
self.settings.on.hardware_set_func = self._dev.write_bool
def disconnect(self):
try:
self._dev.StopTask()
self._dev.ClearTask()
del self._dev
except AttributeError:
pass
if __name__ == '__main__':
ai=DAQdoHW()
ai.connect()
print(ai._data)
time.sleep(1)
ai.disconnect()
| 23.354167
| 78
| 0.613738
|
from ScopeFoundry import HardwareComponent
from .daq_do_dev import DAQSimpleDOTask
from PyDAQmx import *
import numpy as np
import time
class DAQdoHW(HardwareComponent):
name='daq_do'
def setup(self,channels='Dev2/port0/line2'):
self.settings.New(name='channels',initial=channels,dtype=str,ro=False)
self.settings.New(name='on',initial=False,dtype=bool,ro=False)
def connect(self):
self._dev=DAQSimpleDOTask(self.settings.channels.value())
self.settings.on.hardware_set_func = self._dev.write_bool
def disconnect(self):
try:
self._dev.StopTask()
self._dev.ClearTask()
del self._dev
except AttributeError:
pass
if __name__ == '__main__':
ai=DAQdoHW()
ai.connect()
print(ai._data)
time.sleep(1)
ai.disconnect()
| true
| true
|
f70a06d2005dcef3d0954fa546eda9fa99fac2cb
| 4,394
|
py
|
Python
|
tests/test_malquery.py
|
kra-ts/falconpy
|
c7c4ed93cb3b56cdfd86757f573fde57e4ccf857
|
[
"Unlicense"
] | null | null | null |
tests/test_malquery.py
|
kra-ts/falconpy
|
c7c4ed93cb3b56cdfd86757f573fde57e4ccf857
|
[
"Unlicense"
] | null | null | null |
tests/test_malquery.py
|
kra-ts/falconpy
|
c7c4ed93cb3b56cdfd86757f573fde57e4ccf857
|
[
"Unlicense"
] | null | null | null |
# test_malquery.py
# This class tests the malquery service class
import os
import sys
import pytest
# Authentication via test_authorization.py
from tests import test_authorization as Authorization
# Import our sibling src folder into the path
sys.path.append(os.path.abspath('src'))
# Classes to test - manually imported from sibling folder
from falconpy import MalQuery
auth = Authorization.TestAuthorization()
config = auth.getConfigObject()
falcon = MalQuery(auth_object=config)
AllowedResponses = [200, 400, 404, 429] # Adding rate-limiting as an allowed response for now
class TestMalQuery:
def mq_get_quotas(self):
returned = False
if falcon.GetMalQueryQuotasV1()["status_code"] in AllowedResponses:
returned = True
return returned
def mq_test_all_paths(self):
error_checks = True
tests = {
"fuzzy_search": falcon.fuzzy_search(body={
"options": {
"filter_meta": [
"string"
],
"limit": 0
},
"patterns": [
{
"type": "string",
"value": "string"
}
]
}),
"really_fuzzy": falcon.fuzzy_search(filter_meta="whatevs,something_else",
limit=1,
patterns=[{"type": "file", "value": "test"}]
),
"get_download": falcon.get_download(ids="12345678"),
"get_metadata": falcon.get_metadata(ids="12345678"),
"get_request": falcon.get_request(ids="12345678"),
"get_samples": falcon.get_samples(ids="12345678"),
"multi_download": falcon.samples_multidownload(ids="12345678"),
"exact_search": falcon.exact_search(body={}),
"exact_search_too": falcon.exact_search(filter_filetypes="xls,doc",
filter_meta="whatevers,something",
limit=1,
max_date="UTC_Date_Here",
min_date="UTC Date Here",
max_size="200",
min_size="1",
patterns=[
{
"type": "file",
"value": "spreadsheet"
}
]),
"hunt": falcon.hunt(body={}),
"cry_of_the_hunter": falcon.hunt(filter_filetypes=["exe"],
filter_meta="some metadata",
limit=1,
max_date="UTC_Date_Here",
min_date="UTC Date Here",
max_size="200",
min_size="1",
yara_rule="Some Yara rule"
)
}
for key in tests:
if tests[key]["status_code"] not in AllowedResponses:
# print(tests[key])
error_checks = False
pytest.skip("Skipping due to test flakiness")
return error_checks
def test_get_quotas(self):
assert self.mq_get_quotas() is True
def test_all_functionality(self):
assert self.mq_test_all_paths() is True
| 48.285714
| 94
| 0.382795
|
import os
import sys
import pytest
from tests import test_authorization as Authorization
sys.path.append(os.path.abspath('src'))
from falconpy import MalQuery
auth = Authorization.TestAuthorization()
config = auth.getConfigObject()
falcon = MalQuery(auth_object=config)
AllowedResponses = [200, 400, 404, 429]
class TestMalQuery:
def mq_get_quotas(self):
returned = False
if falcon.GetMalQueryQuotasV1()["status_code"] in AllowedResponses:
returned = True
return returned
def mq_test_all_paths(self):
error_checks = True
tests = {
"fuzzy_search": falcon.fuzzy_search(body={
"options": {
"filter_meta": [
"string"
],
"limit": 0
},
"patterns": [
{
"type": "string",
"value": "string"
}
]
}),
"really_fuzzy": falcon.fuzzy_search(filter_meta="whatevs,something_else",
limit=1,
patterns=[{"type": "file", "value": "test"}]
),
"get_download": falcon.get_download(ids="12345678"),
"get_metadata": falcon.get_metadata(ids="12345678"),
"get_request": falcon.get_request(ids="12345678"),
"get_samples": falcon.get_samples(ids="12345678"),
"multi_download": falcon.samples_multidownload(ids="12345678"),
"exact_search": falcon.exact_search(body={}),
"exact_search_too": falcon.exact_search(filter_filetypes="xls,doc",
filter_meta="whatevers,something",
limit=1,
max_date="UTC_Date_Here",
min_date="UTC Date Here",
max_size="200",
min_size="1",
patterns=[
{
"type": "file",
"value": "spreadsheet"
}
]),
"hunt": falcon.hunt(body={}),
"cry_of_the_hunter": falcon.hunt(filter_filetypes=["exe"],
filter_meta="some metadata",
limit=1,
max_date="UTC_Date_Here",
min_date="UTC Date Here",
max_size="200",
min_size="1",
yara_rule="Some Yara rule"
)
}
for key in tests:
if tests[key]["status_code"] not in AllowedResponses:
error_checks = False
pytest.skip("Skipping due to test flakiness")
return error_checks
def test_get_quotas(self):
assert self.mq_get_quotas() is True
def test_all_functionality(self):
assert self.mq_test_all_paths() is True
| true
| true
|
f70a08c0ccb36b65a873e4524fb5358bf0678621
| 2,908
|
py
|
Python
|
results/8112a5333cb1bb472ee14fa5342f6422/pyfile.py
|
CatrionaMarr/OnlineMCMCTest
|
92899d082c1bdfc2d61128ced453ac59812ae03a
|
[
"MIT"
] | null | null | null |
results/8112a5333cb1bb472ee14fa5342f6422/pyfile.py
|
CatrionaMarr/OnlineMCMCTest
|
92899d082c1bdfc2d61128ced453ac59812ae03a
|
[
"MIT"
] | null | null | null |
results/8112a5333cb1bb472ee14fa5342f6422/pyfile.py
|
CatrionaMarr/OnlineMCMCTest
|
92899d082c1bdfc2d61128ced453ac59812ae03a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# import required packages
import emcee
import numpy as np
from numpy import exp, log
# import model function from separate file
from mymodel import mymodel
# import post-processing function from theonlinemcmc package
from theonlinemcmc import *
# initialise error code value
errval = 0
# define the log posterior function
def lnprob(theta, x, sigma_gauss, data):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, sigma_gauss, data)
# define the log prior function
def lnprior(theta):
lp = 0.
m,c = theta
if -10 < m < 10:
lp = 0.
else:
return -np.inf
if -10 < c < 10:
lp = 0.
else:
return -np.inf
return lp
# define log likelihood function
def lnlike(theta, x, sigma_gauss, data):
m,c = theta
md = mymodel(m,c,x)
return -0.5*np.sum(((md - data)/sigma_gauss)**2)
# set number of MCMC points
Nmcmc = 1000
Nburnin = 1000
Nens = 100
ndim = 2
# initialise the start ensemble points
try:
mini = -10 + np.random.rand(Nens)*20
cini = -10 + np.random.rand(Nens)*20
pos = np.array([mini, cini]).T
except:
errval = PRIOR_INIT_ERR
# read in the data
if errval == 0:
try:
data = np.loadtxt("data_file.txt")
except:
try:
data = np.loadtxt("data_file.txt", delimiter=",")
except:
errval = DATA_READ_ERR
# read in the abscissa values
if errval == 0:
try:
x = np.loadtxt("abscissa_file.txt")
except:
try:
x = np.loadtxt("abscissa_file.txt", delimiter=",")
except:
errval = ABSCISSA_READ_ERR
# read in sigma (standard deviation) values (there may be nothing here if it not applicable to your likelihood)
# run the MCMC
if errval == 0:
if len(data) != len(x):
errval = DATA_LENGTH_ERR
argslist = (x, 0.65, data)
if errval == 0:
# set up sampler
try:
sampler = emcee.EnsembleSampler(Nens, ndim, lnprob, args=argslist)
except:
errval = MCMC_INIT_ERR
# run sampler
try:
sampler.run_mcmc(pos, Nmcmc+Nburnin)
# remove burn-in and flatten
samples = sampler.chain[:, Nburnin:, :].reshape((-1, ndim))
lnp = np.reshape(sampler.lnprobability[:, Nburnin:].flatten(), (-1,1))
samples = np.hstack((samples, lnp))
except:
errval = MCMC_RUN_ERR
# output the posterior samples, likelihood and variables
try:
np.savetxt('posterior_samples.txt.gz', samples)
fv = open('variables.txt', 'w')
fv.write("m,c")
fv.close()
except:
errval = POST_OUTPUT_ERR
# run post-processing script
try:
postprocessing(samples, "m,c", x, "x", data, "you@example.com", "http://localhost/results/8112a5333cb1bb472ee14fa5342f6422")
except:
errval = POST_PROCESS_ERR
success = True
if errval != 0:
# run different script in case error codes are encountered
errorpage(errval, "you@example.com", "http://localhost/results/8112a5333cb1bb472ee14fa5342f6422")
success = False
| 21.540741
| 128
| 0.670564
|
import emcee
import numpy as np
from numpy import exp, log
from mymodel import mymodel
from theonlinemcmc import *
errval = 0
def lnprob(theta, x, sigma_gauss, data):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, sigma_gauss, data)
def lnprior(theta):
lp = 0.
m,c = theta
if -10 < m < 10:
lp = 0.
else:
return -np.inf
if -10 < c < 10:
lp = 0.
else:
return -np.inf
return lp
def lnlike(theta, x, sigma_gauss, data):
m,c = theta
md = mymodel(m,c,x)
return -0.5*np.sum(((md - data)/sigma_gauss)**2)
Nmcmc = 1000
Nburnin = 1000
Nens = 100
ndim = 2
try:
mini = -10 + np.random.rand(Nens)*20
cini = -10 + np.random.rand(Nens)*20
pos = np.array([mini, cini]).T
except:
errval = PRIOR_INIT_ERR
if errval == 0:
try:
data = np.loadtxt("data_file.txt")
except:
try:
data = np.loadtxt("data_file.txt", delimiter=",")
except:
errval = DATA_READ_ERR
if errval == 0:
try:
x = np.loadtxt("abscissa_file.txt")
except:
try:
x = np.loadtxt("abscissa_file.txt", delimiter=",")
except:
errval = ABSCISSA_READ_ERR
if errval == 0:
if len(data) != len(x):
errval = DATA_LENGTH_ERR
argslist = (x, 0.65, data)
if errval == 0:
try:
sampler = emcee.EnsembleSampler(Nens, ndim, lnprob, args=argslist)
except:
errval = MCMC_INIT_ERR
try:
sampler.run_mcmc(pos, Nmcmc+Nburnin)
samples = sampler.chain[:, Nburnin:, :].reshape((-1, ndim))
lnp = np.reshape(sampler.lnprobability[:, Nburnin:].flatten(), (-1,1))
samples = np.hstack((samples, lnp))
except:
errval = MCMC_RUN_ERR
try:
np.savetxt('posterior_samples.txt.gz', samples)
fv = open('variables.txt', 'w')
fv.write("m,c")
fv.close()
except:
errval = POST_OUTPUT_ERR
try:
postprocessing(samples, "m,c", x, "x", data, "you@example.com", "http://localhost/results/8112a5333cb1bb472ee14fa5342f6422")
except:
errval = POST_PROCESS_ERR
success = True
if errval != 0:
errorpage(errval, "you@example.com", "http://localhost/results/8112a5333cb1bb472ee14fa5342f6422")
success = False
| true
| true
|
f70a0908b0719bf6610dc731089facdb963866e2
| 597
|
py
|
Python
|
scoreboard.py
|
MdGhulamAzadAnsari/Turtle-Crossing-Game
|
24c45e048c3ab994638a3b77f904c602070eda87
|
[
"MIT"
] | 1
|
2021-12-17T15:29:59.000Z
|
2021-12-17T15:29:59.000Z
|
scoreboard.py
|
MdGhulamAzadAnsari/Turtle-Crossing-Game
|
24c45e048c3ab994638a3b77f904c602070eda87
|
[
"MIT"
] | null | null | null |
scoreboard.py
|
MdGhulamAzadAnsari/Turtle-Crossing-Game
|
24c45e048c3ab994638a3b77f904c602070eda87
|
[
"MIT"
] | null | null | null |
from turtle import Turtle
FONT = ("Courier", 20, "normal")
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.hideturtle()
self.penup()
self.level = 1
self.goto(x=-230, y=260)
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.write(f"Level: {self.level}", align="center", font=FONT)
def increase_level(self):
self.level += 1
self.update_scoreboard()
def game_over(self):
self.goto(0, 0)
self.write("GAME OVER", align="center", font=FONT)
| 22.961538
| 69
| 0.586265
|
from turtle import Turtle
FONT = ("Courier", 20, "normal")
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.hideturtle()
self.penup()
self.level = 1
self.goto(x=-230, y=260)
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.write(f"Level: {self.level}", align="center", font=FONT)
def increase_level(self):
self.level += 1
self.update_scoreboard()
def game_over(self):
self.goto(0, 0)
self.write("GAME OVER", align="center", font=FONT)
| true
| true
|
f70a09258b593b19b6aa36de6f5f883ef6e7bdbe
| 529
|
py
|
Python
|
zSPA/zSPA/spa/views.py
|
hawwestin/MSR.DjangoByExample
|
b1649ccb92e9d557c09f27c15fc779de64282fcb
|
[
"MIT"
] | 1
|
2021-02-08T20:29:06.000Z
|
2021-02-08T20:29:06.000Z
|
zSPA/zSPA/spa/views.py
|
hawwestin/MSR.DjangoByExample
|
b1649ccb92e9d557c09f27c15fc779de64282fcb
|
[
"MIT"
] | 13
|
2021-06-09T17:28:23.000Z
|
2022-03-12T00:54:20.000Z
|
zSPA/zSPA/spa/views.py
|
hawwestin/MSR.DjangoByExample
|
b1649ccb92e9d557c09f27c15fc779de64282fcb
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from decouple import config
from .models import Images
# Create your views here.
def index(request, **kwargs):
"""
Render single page application and provide context data.
:param request:
:param kwargs:
:return:
"""
context = {
'images': Images.objects.filter(is_active=True),
'googleForm': config('googleForm', default=''),
'googleMaps': config('googleMaps', default=''),
}
return render(request, 'spa/main_SPA.html', context)
| 23
| 60
| 0.654064
|
from django.shortcuts import render
from decouple import config
from .models import Images
def index(request, **kwargs):
context = {
'images': Images.objects.filter(is_active=True),
'googleForm': config('googleForm', default=''),
'googleMaps': config('googleMaps', default=''),
}
return render(request, 'spa/main_SPA.html', context)
| true
| true
|
f70a097f52bce624bb2820b41fa155ec232643eb
| 2,720
|
py
|
Python
|
var/spack/repos/builtin/packages/sparskit/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/sparskit/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/sparskit/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Sparskit(MakefilePackage):
"""SPARSKIT: A basic tool-kit for sparse matrix computations (Version 2).
Made by Yousef Saad, University of Minnesota.
"""
homepage = "https://www-users.cse.umn.edu/~saad/software/SPARSKIT/"
version('develop', sha256='ecdd0a9968d6b45153a328710a42fe87600f0bba0e3c53896090b8ae1c113b7a',
url='http://www-users.cs.umn.edu/~saad/software/SPARSKIT/SPARSKIT2.tar.gz')
# The library uses blas routine which needs to be known when the lib is used.
# A dependent package should add self.spec['blas'].libs.ld_flags
# at the end of its link line.
# But, asis, this packages compiles without needing to know about it.
# depends_on('blas', type='run')
variant('pic', default=True,
description='Compile with position independent code.')
variant('debug', default=False,
description='Builds a debug version of the library')
# We provide the standard Make flags here:
# https://spack.readthedocs.io/en/latest/packaging_guide.html?highlight=flag_handler#compiler-flags
def flag_handler(self, name, flags):
spec = self.spec
if '+pic' in spec:
if name == 'fflags':
flags.append(self.compiler.fc_pic_flag)
if name == 'fflags':
if 'gfortran' in self.compiler.fc:
flags.append('-std=legacy')
flags.append('-Wall')
if '+debug' in spec:
if '-g' in self.compiler.debug_flags:
flags.append('-g')
if '-O0' in self.compiler.opt_flags:
flags.append('-O0')
elif '-O' in self.compiler.opt_flags:
flags.append('-O')
else:
if '-O3' in self.compiler.opt_flags:
flags.append('-O3')
elif '-O2' in self.compiler.opt_flags:
flags.append('-O2')
return (None, flags, None)
def edit(self, spec, prefix):
mkfile = FileFilter('makefile')
mkfile.filter(r'^(OPT).*=.+', r'\1= -c $(FFLAGS)')
if os.path.exists('libskit.a'):
os.unlink('libskit.a')
def build(self, spec, prefix):
make('clean')
make('F77={0}'.format(spack_fc))
def install(self, spec, prefix):
mkdirp(prefix.lib)
install('libskit.*', prefix.lib)
@property
def libs(self):
return find_libraries(
"libskit*", root=self.prefix, shared=False, recursive=True
)
| 34.871795
| 103
| 0.609926
|
import os
from spack import *
class Sparskit(MakefilePackage):
homepage = "https://www-users.cse.umn.edu/~saad/software/SPARSKIT/"
version('develop', sha256='ecdd0a9968d6b45153a328710a42fe87600f0bba0e3c53896090b8ae1c113b7a',
url='http://www-users.cs.umn.edu/~saad/software/SPARSKIT/SPARSKIT2.tar.gz')
variant('pic', default=True,
description='Compile with position independent code.')
variant('debug', default=False,
description='Builds a debug version of the library')
def flag_handler(self, name, flags):
spec = self.spec
if '+pic' in spec:
if name == 'fflags':
flags.append(self.compiler.fc_pic_flag)
if name == 'fflags':
if 'gfortran' in self.compiler.fc:
flags.append('-std=legacy')
flags.append('-Wall')
if '+debug' in spec:
if '-g' in self.compiler.debug_flags:
flags.append('-g')
if '-O0' in self.compiler.opt_flags:
flags.append('-O0')
elif '-O' in self.compiler.opt_flags:
flags.append('-O')
else:
if '-O3' in self.compiler.opt_flags:
flags.append('-O3')
elif '-O2' in self.compiler.opt_flags:
flags.append('-O2')
return (None, flags, None)
def edit(self, spec, prefix):
mkfile = FileFilter('makefile')
mkfile.filter(r'^(OPT).*=.+', r'\1= -c $(FFLAGS)')
if os.path.exists('libskit.a'):
os.unlink('libskit.a')
def build(self, spec, prefix):
make('clean')
make('F77={0}'.format(spack_fc))
def install(self, spec, prefix):
mkdirp(prefix.lib)
install('libskit.*', prefix.lib)
@property
def libs(self):
return find_libraries(
"libskit*", root=self.prefix, shared=False, recursive=True
)
| true
| true
|
f70a0a2cf88801ad20816738947597123b97a106
| 3,932
|
py
|
Python
|
pygments/lexers/verification.py
|
KMilhan/pygments
|
5120e9943d137f7aa1d33499b79d5ebd5c9f775d
|
[
"BSD-2-Clause"
] | 1,198
|
2015-01-02T12:08:49.000Z
|
2021-10-07T02:46:59.000Z
|
pygments/lexers/verification.py
|
KMilhan/pygments
|
5120e9943d137f7aa1d33499b79d5ebd5c9f775d
|
[
"BSD-2-Clause"
] | 249
|
2015-01-22T13:31:12.000Z
|
2021-05-01T08:01:22.000Z
|
pygments/lexers/verification.py
|
KMilhan/pygments
|
5120e9943d137f7aa1d33499b79d5ebd5c9f775d
|
[
"BSD-2-Clause"
] | 118
|
2015-01-16T19:13:15.000Z
|
2021-07-21T15:09:15.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.verification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Intermediate Verification Languages (IVLs).
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Comment, Operator, Keyword, Name, Number, \
Punctuation, Text, Generic
__all__ = ['BoogieLexer', 'SilverLexer']
class BoogieLexer(RegexLexer):
"""
For `Boogie <https://boogie.codeplex.com/>`_ source code.
.. versionadded:: 2.1
"""
name = 'Boogie'
aliases = ['boogie']
filenames = ['*.bpl']
tokens = {
'root': [
# Whitespace and Comments
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//[/!](.*?)\n', Comment.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(words((
'axiom', 'break', 'call', 'ensures', 'else', 'exists', 'function',
'forall', 'if', 'invariant', 'modifies', 'procedure', 'requires',
'then', 'var', 'while'),
suffix=r'\b'), Keyword),
(words(('const',), suffix=r'\b'), Keyword.Reserved),
(words(('bool', 'int', 'ref'), suffix=r'\b'), Keyword.Type),
include('numbers'),
(r"(>=|<=|:=|!=|==>|&&|\|\||[+/\-=>*<\[\]])", Operator),
(r'\{.*?\}', Generic.Emph), #triggers
(r"([{}():;,.])", Punctuation),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'numbers': [
(r'[0-9]+', Number.Integer),
],
}
class SilverLexer(RegexLexer):
"""
For `Silver <https://bitbucket.org/viperproject/silver>`_ source code.
.. versionadded:: 2.2
"""
name = 'Silver'
aliases = ['silver']
filenames = ['*.sil', '*.vpr']
tokens = {
'root': [
# Whitespace and Comments
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//[/!](.*?)\n', Comment.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(words((
'result', 'true', 'false', 'null', 'method', 'function',
'predicate', 'program', 'domain', 'axiom', 'var', 'returns',
'field', 'define', 'fold', 'unfold', 'inhale', 'exhale', 'new', 'assert',
'assume', 'goto', 'while', 'if', 'elseif', 'else', 'fresh',
'constraining', 'Seq', 'Set', 'Multiset', 'union', 'intersection',
'setminus', 'subset', 'unfolding', 'in', 'old', 'forall', 'exists',
'acc', 'wildcard', 'write', 'none', 'epsilon', 'perm', 'unique',
'apply', 'package', 'folding', 'label', 'forperm'),
suffix=r'\b'), Keyword),
(words(('requires', 'ensures', 'invariant'), suffix=r'\b'), Name.Decorator),
(words(('Int', 'Perm', 'Bool', 'Ref', 'Rational'), suffix=r'\b'), Keyword.Type),
include('numbers'),
(r'[!%&*+=|?:<>/\-\[\]]', Operator),
(r'\{.*?\}', Generic.Emph), #triggers
(r'([{}():;,.])', Punctuation),
# Identifier
(r'[\w$]\w*', Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'numbers': [
(r'[0-9]+', Number.Integer),
],
}
| 34.191304
| 92
| 0.444049
|
from pygments.lexer import RegexLexer, include, words
from pygments.token import Comment, Operator, Keyword, Name, Number, \
Punctuation, Text, Generic
__all__ = ['BoogieLexer', 'SilverLexer']
class BoogieLexer(RegexLexer):
name = 'Boogie'
aliases = ['boogie']
filenames = ['*.bpl']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), (r'//[/!](.*?)\n', Comment.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(words((
'axiom', 'break', 'call', 'ensures', 'else', 'exists', 'function',
'forall', 'if', 'invariant', 'modifies', 'procedure', 'requires',
'then', 'var', 'while'),
suffix=r'\b'), Keyword),
(words(('const',), suffix=r'\b'), Keyword.Reserved),
(words(('bool', 'int', 'ref'), suffix=r'\b'), Keyword.Type),
include('numbers'),
(r"(>=|<=|:=|!=|==>|&&|\|\||[+/\-=>*<\[\]])", Operator),
(r'\{.*?\}', Generic.Emph), (r"([{}():;,.])", Punctuation),
(r'[a-zA-Z_]\w*', Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'numbers': [
(r'[0-9]+', Number.Integer),
],
}
class SilverLexer(RegexLexer):
name = 'Silver'
aliases = ['silver']
filenames = ['*.sil', '*.vpr']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), (r'//[/!](.*?)\n', Comment.Doc),
(r'//(.*?)\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(words((
'result', 'true', 'false', 'null', 'method', 'function',
'predicate', 'program', 'domain', 'axiom', 'var', 'returns',
'field', 'define', 'fold', 'unfold', 'inhale', 'exhale', 'new', 'assert',
'assume', 'goto', 'while', 'if', 'elseif', 'else', 'fresh',
'constraining', 'Seq', 'Set', 'Multiset', 'union', 'intersection',
'setminus', 'subset', 'unfolding', 'in', 'old', 'forall', 'exists',
'acc', 'wildcard', 'write', 'none', 'epsilon', 'perm', 'unique',
'apply', 'package', 'folding', 'label', 'forperm'),
suffix=r'\b'), Keyword),
(words(('requires', 'ensures', 'invariant'), suffix=r'\b'), Name.Decorator),
(words(('Int', 'Perm', 'Bool', 'Ref', 'Rational'), suffix=r'\b'), Keyword.Type),
include('numbers'),
(r'[!%&*+=|?:<>/\-\[\]]', Operator),
(r'\{.*?\}', Generic.Emph), (r'([{}():;,.])', Punctuation),
(r'[\w$]\w*', Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'numbers': [
(r'[0-9]+', Number.Integer),
],
}
| true
| true
|
f70a0b08e0b1f2114d4fef8c6ff7995adeceb92d
| 1,319
|
py
|
Python
|
DownloadSECFilingsExample.py
|
sdincer11/DownloadSECFilings
|
be7738bf9062d3e5c28aa3acb8209d868d846953
|
[
"Apache-2.0"
] | null | null | null |
DownloadSECFilingsExample.py
|
sdincer11/DownloadSECFilings
|
be7738bf9062d3e5c28aa3acb8209d868d846953
|
[
"Apache-2.0"
] | null | null | null |
DownloadSECFilingsExample.py
|
sdincer11/DownloadSECFilings
|
be7738bf9062d3e5c28aa3acb8209d868d846953
|
[
"Apache-2.0"
] | null | null | null |
from DownloadSECFilings import *
# SEC EDGAR system saves all the URL links for each filing under quarterly folders. These files storing
# the URL information are called "master" files.
# 1) You need to call for the "createMasterFile" function if you want to keep the master file updated.
# createMasterFile generates a TXT file under the folder provided as the input.
# 2) You need to call for the "downloadSECFilings" function if you want to download specific filings that are filed
# within dateStart and dateFinish. The "downloadSECFilings" function first creates a folder structure
# , such as "folderPath/10-K/1000015000/" where 1000015000 represents the CIK number (firm identifier)
# and downloads each file to the corresponding path with a file specific name
# YOU CAN SEE A SAMPLE CALL BELOW to download all 10-Ks reported between January 1st 2000 and June 30, 2001.
# WARNING : YOU NEED TO MAKE SURE THAT YOU HAVE ENOUGH HARD DRIVE SPACE BEFORE YOU DOWNLOAD THE FILINGS.
createMasterFile('C:/path to your folder where you want to download the filings to', 2000, 2001)
downloadSECFilings('C:/path to your folder where you want to download the filings to',
formTypesList=['10-K'],
dateStart=20000101,
dateFinish=20010630,
)
| 69.421053
| 115
| 0.738438
|
from DownloadSECFilings import *
createMasterFile('C:/path to your folder where you want to download the filings to', 2000, 2001)
downloadSECFilings('C:/path to your folder where you want to download the filings to',
formTypesList=['10-K'],
dateStart=20000101,
dateFinish=20010630,
)
| true
| true
|
f70a0bca3d6810cc9945ef26f760662ef36c5232
| 339
|
py
|
Python
|
happy_kids.py
|
anudeep586/Codechef_hackerrank_codeforces1
|
39a536d6ad6d670e0bce2ba8657cf5715b0037e0
|
[
"0BSD"
] | null | null | null |
happy_kids.py
|
anudeep586/Codechef_hackerrank_codeforces1
|
39a536d6ad6d670e0bce2ba8657cf5715b0037e0
|
[
"0BSD"
] | null | null | null |
happy_kids.py
|
anudeep586/Codechef_hackerrank_codeforces1
|
39a536d6ad6d670e0bce2ba8657cf5715b0037e0
|
[
"0BSD"
] | null | null | null |
try:
tests=int(input())
z=[]
for _ in range(tests):
n,x=[int(xi) for xi in input().split(" ")]
arr=list(map(int,input().rstrip().split()))
m=max(arr)-min(arr)
if m>x:
z.append("NO")
else:
z.append("YES")
for x in z:
print(x)
except:pass
| 22.6
| 52
| 0.439528
|
try:
tests=int(input())
z=[]
for _ in range(tests):
n,x=[int(xi) for xi in input().split(" ")]
arr=list(map(int,input().rstrip().split()))
m=max(arr)-min(arr)
if m>x:
z.append("NO")
else:
z.append("YES")
for x in z:
print(x)
except:pass
| true
| true
|
f70a0bd231c19af2ab9ae0085efc1a97b5a73b9f
| 100
|
py
|
Python
|
object_oriented_design/deck_of_cards/suite.py
|
bpatel28/practice_python
|
358c3ce1ca59d80711df07e6509af14e9c784f60
|
[
"MIT"
] | 1
|
2021-10-13T07:33:25.000Z
|
2021-10-13T07:33:25.000Z
|
object_oriented_design/deck_of_cards/suite.py
|
bpatel28/practice_python
|
358c3ce1ca59d80711df07e6509af14e9c784f60
|
[
"MIT"
] | null | null | null |
object_oriented_design/deck_of_cards/suite.py
|
bpatel28/practice_python
|
358c3ce1ca59d80711df07e6509af14e9c784f60
|
[
"MIT"
] | null | null | null |
from enum import Enum
class Suite(Enum):
Diamond = 1
Heart = 2
Club = 3
Spade = 4
| 11.111111
| 21
| 0.57
|
from enum import Enum
class Suite(Enum):
Diamond = 1
Heart = 2
Club = 3
Spade = 4
| true
| true
|
f70a0c6d4c741fe95e6abbe3c262e8e24fbf85c3
| 1,902
|
py
|
Python
|
quapy/method/non_aggregative.py
|
valgur/QuaPy
|
6b1ba4886a1d64b086829306cbba689cdcfd60e8
|
[
"BSD-3-Clause"
] | null | null | null |
quapy/method/non_aggregative.py
|
valgur/QuaPy
|
6b1ba4886a1d64b086829306cbba689cdcfd60e8
|
[
"BSD-3-Clause"
] | null | null | null |
quapy/method/non_aggregative.py
|
valgur/QuaPy
|
6b1ba4886a1d64b086829306cbba689cdcfd60e8
|
[
"BSD-3-Clause"
] | null | null | null |
from quapy.data import LabelledCollection
from .base import BaseQuantifier
class MaximumLikelihoodPrevalenceEstimation(BaseQuantifier):
"""
The `Maximum Likelihood Prevalence Estimation` (MLPE) method is a lazy method that assumes there is no prior
probability shift between training and test instances (put it other way, that the i.i.d. assumpion holds).
The estimation of class prevalence values for any test sample is always (i.e., irrespective of the test sample
itself) the class prevalence seen during training. This method is considered to be a lower-bound quantifier that
any quantification method should beat.
"""
def __init__(self):
self._classes_ = None
def fit(self, data: LabelledCollection):
"""
Computes the training prevalence and stores it.
:param data: the training sample
:return: self
"""
self._classes_ = data.classes_
self.estimated_prevalence = data.prevalence()
return self
def quantify(self, instances):
"""
Ignores the input instances and returns, as the class prevalence estimantes, the training prevalence.
:param instances: array-like (ignored)
:return: the class prevalence seen during training
"""
return self.estimated_prevalence
@property
def classes_(self):
"""
Number of classes
:return: integer
"""
return self._classes_
def get_params(self, deep=True):
"""
Does nothing, since this learner has no parameters.
:param deep: for compatibility with sklearn
:return: `None`
"""
return None
def set_params(self, **parameters):
"""
Does nothing, since this learner has no parameters.
:param parameters: dictionary of param-value pairs (ignored)
"""
pass
| 30.190476
| 116
| 0.655626
|
from quapy.data import LabelledCollection
from .base import BaseQuantifier
class MaximumLikelihoodPrevalenceEstimation(BaseQuantifier):
def __init__(self):
self._classes_ = None
def fit(self, data: LabelledCollection):
self._classes_ = data.classes_
self.estimated_prevalence = data.prevalence()
return self
def quantify(self, instances):
return self.estimated_prevalence
@property
def classes_(self):
return self._classes_
def get_params(self, deep=True):
return None
def set_params(self, **parameters):
pass
| true
| true
|
f70a0cb3b2ed71901741e067150e72927820d222
| 31,386
|
py
|
Python
|
testslide/strict_mock.py
|
deathowl/TestSlide
|
22958af9d487caa9bbc309405106591a48716ad5
|
[
"MIT"
] | 48
|
2021-01-06T15:11:09.000Z
|
2022-03-09T22:52:25.000Z
|
testslide/strict_mock.py
|
deathowl/TestSlide
|
22958af9d487caa9bbc309405106591a48716ad5
|
[
"MIT"
] | 63
|
2021-01-06T15:04:49.000Z
|
2022-03-30T22:12:24.000Z
|
testslide/strict_mock.py
|
deathowl/TestSlide
|
22958af9d487caa9bbc309405106591a48716ad5
|
[
"MIT"
] | 33
|
2021-03-03T19:55:14.000Z
|
2022-03-29T21:49:30.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import dis
import inspect
import os.path
from types import FrameType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Type,
Union,
get_type_hints,
)
import testslide.lib
import testslide.mock_callable
if TYPE_CHECKING:
# Hack to enable typing information for mypy
from testslide.mock_callable import _CallableMock, _YieldValuesRunner # noqa: F401
class UndefinedAttribute(BaseException):
"""
Tentative access of an attribute from a StrictMock that is not defined yet.
Inherits from BaseException to avoid being caught by tested code.
"""
def __init__(
self, strict_mock: "StrictMock", name: str, extra_msg: Optional[str] = None
) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
self.extra_msg = extra_msg
def __str__(self) -> str:
message = (
f"'{self.name}' is not set.\n"
f"{repr(self.strict_mock)} must have a value set for this attribute "
"if it is going to be accessed."
)
if self.extra_msg is not None:
message += f"\n{self.extra_msg}"
return message
class NonExistentAttribute(BaseException):
"""
Tentative of setting of an attribute from a StrictMock that is not present
at the template class.
Inherits from BaseException to avoid being caught by tested code.
"""
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return (
f"'{self.name}' is not part of the API.\n"
f"{self.strict_mock} template class API does not have this "
"attribute so the mock can not have it as well.\n"
"If you are inheriting StrictMock, you can define private "
"attributes, that will not interfere with the API, by prefixing "
"them with '__' (and at most one '_' suffix) "
" (https://docs.python.org/3/tutorial/classes.html#tut-private).\n"
"See also: 'runtime_attrs' at StrictMock.__init__."
)
class NonCallableValue(BaseException):
"""
Raised when trying to set a non callable value to a callable attribute of
a StrictMock instance.
"""
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return (
f"'{self.name}' can not be set with a non-callable value.\n"
f"{self.strict_mock} template class requires this attribute to "
"be callable."
)
class NonAwaitableReturn(BaseException):
"""
Raised when a coroutine method at a StrictMock is assigned a not coroutine
callable function.
"""
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return (
f"'{self.name}' can not be set with a callable that does not "
"return an awaitable.\n"
f"{self.strict_mock} template class requires this attribute to "
"be a callable that returns an awaitable (eg: a 'async def' "
"function)."
)
class UnsupportedMagic(BaseException):
"""
Raised when trying to set an unsupported magic attribute to a StrictMock
instance.
"""
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return f"setting '{self.name}' is not supported."
class _DefaultMagic:
CONTEXT_MANAGER_METHODS = ["__enter__", "__exit__", "__aenter__", "__aexit__"]
def __init__(self, strict_mock: "StrictMock", name: str):
self.strict_mock = strict_mock
self.name = name
def __call__(self, *args: Any, **kwargs: Any) -> None:
message = None
if self.name in self.CONTEXT_MANAGER_METHODS:
message = (
"Tip: most context managers can be automatically configured "
"with 'default_context_manager=True'."
)
raise UndefinedAttribute(self.strict_mock, self.name, message)
def __copy__(self) -> "_DefaultMagic":
return type(self)(strict_mock=self.strict_mock, name=self.name)
def __deepcopy__(self, memo: Optional[Dict[Any, Any]] = None) -> "_DefaultMagic":
if memo is None:
memo = {}
self_copy = type(self)(strict_mock=self.strict_mock, name=self.name)
memo[id(self)] = self_copy
return self_copy
class _MethodProxy:
"""
When setting callable attributes, the new value is wrapped by another
function that does signature and async validations. We then need this proxy
around it, so that when the attribute is called, the mock value is called
(the wrapper function which then calls the new value) but all attribute
access is forwarded to the new value.
"""
def __init__(self, value: Any, callable_value: Optional[Callable] = None) -> None:
self.__dict__["_value"] = value
self.__dict__["_callable_value"] = callable_value or value
def __get__(
self, instance: "StrictMock", owner: Optional[Type["StrictMock"]] = None
) -> Union[object, Callable]:
if self.__dict__["_value"] is self.__dict__["_callable_value"]:
return self.__dict__["_callable_value"]
else:
return self
def __getattr__(self, name: str) -> str:
return getattr(self.__dict__["_value"], name)
def __setattr__(self, name: str, value: str) -> None:
return setattr(self.__dict__["_value"], name, value)
def __delattr__(self, name: str) -> None:
return delattr(self.__dict__["_value"], name)
def __call__(self, *args: Any, **kwargs: Any) -> Optional[Any]:
return self.__dict__["_callable_value"](*args, **kwargs)
def __copy__(self) -> "_MethodProxy":
return type(self)(
callable_value=self.__dict__["_callable_value"],
value=self.__dict__["_value"],
)
def __deepcopy__(self, memo: Optional[Dict[Any, Any]] = None) -> "_MethodProxy":
if memo is None:
memo = {}
self_copy = type(self)(
callable_value=copy.deepcopy(self.__dict__["_callable_value"]),
value=copy.deepcopy(self.__dict__["_value"]),
)
memo[id(self)] = self_copy
return self_copy
def __repr__(self) -> str:
# Override repr to have a representation that provides information
# about the wrapped method
return repr(self.__dict__["_value"])
class StrictMock:
"""
Mock object that won't allow any attribute access or method call, unless its
behavior has been explicitly defined. This is meant to be a safer
alternative to Python's standard Mock object, that will always return
another mock when referred by default.
StrictMock is "safe by default", meaning that it will never misbehave by
lack of configuration. It will raise in the following situations:
- Get/Set attribute that's not part of the specification (template or
runtime_attrs).
- Get attribute that is part of the specification, but has not yet been
defined.
- Call a method with different signature from the template.
When appropriate, raised exceptions inherits from BaseException, in order to
let exceptions raise the test, outside tested code, so we can get a clear
signal of what is happening: either the mock is missing a required behavior
or the tested code is misbehaving.
"""
TRIM_PATH_PREFIX = ""
# All of these magic should be OK to be set at the mock and they are
# expected to work as they should. If implemented by the template class,
# they will have default values assigned to them, that raise
# UndefinedAttribute until configured.
__SETTABLE_MAGICS = [
"__abs__",
"__add__",
"__aenter__",
"__aexit__",
"__aiter__",
"__and__",
"__anext__",
"__await__",
"__bool__",
"__bytes__",
"__call__",
"__ceil__",
"__complex__",
"__contains__",
"__delete__",
"__delitem__",
"__divmod__",
"__enter__",
"__enter__",
"__eq__",
"__exit__",
"__exit__",
"__float__",
"__floor__",
"__floordiv__",
"__format__",
"__ge__",
"__get__",
"__getformat__",
"__getinitargs__",
"__getitem__",
"__getnewargs__",
"__getnewargs_ex__",
"__getstate__",
"__gt__",
"__iadd__",
"__iand__",
"__ifloordiv__",
"__ilshift__",
"__imatmul__",
"__imod__",
"__imul__",
"__index__",
"__instancecheck__",
"__int__",
"__invert__",
"__ior__",
"__ipow__",
"__irshift__",
"__isub__",
"__iter__",
"__iter__",
"__iter__",
"__itruediv__",
"__ixor__",
"__le__",
"__len__",
"__length_hint__",
"__lshift__",
"__lt__",
"__matmul__",
"__missing__",
"__mod__",
"__mul__",
"__name__",
"__ne__",
"__neg__",
"__next__",
"__or__",
"__pos__",
"__pow__",
"__qualname__",
"__radd__",
"__rand__",
"__rdivmod__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__rfloordiv__",
"__rlshift__",
"__rmatmul__",
"__rmod__",
"__rmul__",
"__ror__",
"__round__",
"__rpow__",
"__rrshift__",
"__rshift__",
"__rsub__",
"__rtruediv__",
"__rxor__",
"__set__",
"__set_name__",
"__setformat__",
"__setitem__",
"__setstate__",
"__sizeof__",
"__str__",
"__sub__",
"__subclasscheck__",
"__truediv__",
"__trunc__",
"__xor__",
]
# These magics either won't work or makes no sense to be set for mock after
# an instance of a class. Trying to set them will raise UnsupportedMagic.
__UNSETTABLE_MAGICS = [
"__bases__",
"__class__",
"__class_getitem__",
"__copy__",
"__deepcopy__",
"__del__",
"__delattr__",
"__dict__",
"__dir__",
"__getattr__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__mro__",
"__new__",
"__setattr__",
"__slots__",
"__subclasses__",
]
def __new__(
cls,
template: Optional[type] = None,
runtime_attrs: Optional[List[Any]] = None,
name: Optional[str] = None,
default_context_manager: bool = False,
type_validation: bool = True,
attributes_to_skip_type_validation: List[str] = [],
) -> "StrictMock":
"""
For every new instance of StrictMock we dynamically create a subclass of
StrictMock and return an instance of it. This allows us to use this new
subclass dictionary for all attributes, including magic ones, that must
be defined at the class to work.
"""
name = f"{template.__name__}{cls.__name__}" if template else cls.__name__
strict_mock_subclass = type(name, (cls,), {})
return object.__new__(strict_mock_subclass)
def __setup_magic_methods(self) -> None:
"""
Populate all template's magic methods with expected default behavior.
This is important as things such as bool() depend on they existing
on the object's class __dict__.
https://github.com/facebook/TestSlide/issues/23
"""
if not self._template:
return
implemented_magic_methods = []
for klass in type(self).mro():
if klass is object:
continue
for name in klass.__dict__:
if name.startswith("__") and name.endswith("__"):
implemented_magic_methods.append(name)
for klass in self._template.mro():
if klass is object:
continue
for name in klass.__dict__:
if name in type(self).__dict__:
continue
if name == "__hash__":
if klass.__dict__["__hash__"] is None:
setattr(self, name, None)
else:
setattr(self, name, lambda: id(self))
continue
if (
callable(klass.__dict__[name])
and name in self.__SETTABLE_MAGICS
and name not in self.__UNSETTABLE_MAGICS
and name not in implemented_magic_methods
):
setattr(self, name, _DefaultMagic(self, name))
def __setup_default_context_manager(self, default_context_manager: bool) -> None:
if self._template and default_context_manager:
if hasattr(self._template, "__enter__") and hasattr(
self._template, "__exit__"
):
self.__enter__ = lambda: self
self.__exit__ = lambda exc_type, exc_value, traceback: None
if hasattr(self._template, "__aenter__") and hasattr(
self._template, "__aexit__"
):
async def aenter():
return self
async def aexit(exc_type, exc_value, traceback):
pass
self.__aenter__ = aenter
self.__aexit__ = aexit
def __get_caller_frame(self, depth: int) -> FrameType:
# Adding extra 3 to account for the stack:
# __get_caller_frame
# __get_caller
# __init__
depth += 3
current_frame = inspect.currentframe()
while current_frame:
depth -= 1
if not depth:
break
current_frame = current_frame.f_back
return current_frame # type: ignore
def __get_caller(self, depth: int) -> Optional[str]:
# Doing inspect.stack will retrieve the whole stack, including context
# and that is really slow, this only retrieves the minimum, and does
# not read the file contents.
caller_frame = self.__get_caller_frame(depth)
# loading the context ends up reading files from disk and that might block
# the event loop, so we don't do it.
frameinfo = inspect.getframeinfo(caller_frame, context=0)
filename = frameinfo.filename
lineno = frameinfo.lineno
if self.TRIM_PATH_PREFIX:
split = filename.split(self.TRIM_PATH_PREFIX)
if len(split) == 2 and not split[0]:
filename = split[1]
if os.path.exists(filename):
return "{}:{}".format(filename, lineno)
else:
return None
def __setup_subclass(self):
"""
When StrictMock is subclassed, any attributes defined at the subclass
will override any of StrictMock's validations. In order to overcome
this, for attributes that makes sense, we set them at StrictMock's
dynamically created subclass from __new__ using __setattr__, so that
all validations work.
"""
if type(self).mro()[1] == StrictMock:
return
for klass in type(self).mro()[1:]:
if klass == StrictMock:
break
for name in klass.__dict__.keys():
if name in [
"__doc__",
"__init__",
"__module__",
]:
continue
# https://docs.python.org/3/tutorial/classes.html#tut-private
if name.startswith(f"_{type(self).__name__}__") and not name.endswith(
"__"
):
continue
if name == "__hash__" and klass.__dict__["__hash__"] is None:
continue
StrictMock.__setattr__(self, name, getattr(self, name))
def __init__(
self,
template: Optional[type] = None,
runtime_attrs: Optional[List[Any]] = None,
name: Optional[str] = None,
default_context_manager: bool = False,
type_validation: bool = True,
attributes_to_skip_type_validation: List[str] = [],
) -> None:
"""
template: Template class to be used as a template for the mock.
runtime_attrs: Often attributes are created within an instance's
lifecycle, typically from __init__(). To allow mocking such attributes,
specify their names here.
name: an optional name for this mock instance.
default_context_manager: If the template class is a context manager,
setup a mock for __enter__/__aenter__ that yields itself and an empty function
for __exit__/__aexit__.
type_validation: validate callable attributes calls against the
template's method signature and use type hinting information from template
to validate that mock attribute types match them. Type validation also
happens forcallable attributes (instance/static/class methods) calls.
_attributes_to_skip_type_validation: do not validate type for these attributes
of the strictmock instance.
"""
if template is not None and not inspect.isclass(template):
raise ValueError("Template must be a class.")
self.__dict__["_template"] = template
self.__dict__["_runtime_attrs"] = runtime_attrs or []
self.__dict__["_name"] = name
self.__dict__["_type_validation"] = type_validation
self.__dict__["__caller"] = self.__get_caller(1)
self.__dict__[
"_attributes_to_skip_type_validation"
] = attributes_to_skip_type_validation
caller_frame = inspect.currentframe().f_back # type: ignore
# loading the context ends up reading files from disk and that might block
# the event loop, so we don't do it.
caller_frame_info = inspect.getframeinfo(caller_frame, context=0) # type: ignore
self.__dict__["_caller_frame_info"] = caller_frame_info
self.__setup_magic_methods()
self.__setup_default_context_manager(default_context_manager)
self.__setup_subclass()
@property # type: ignore
def __class__(self) -> type:
return self._template if self._template is not None else type(self)
@property
def _template(self) -> None:
import testslide.mock_constructor # Avoid cyclic dependencies
# If the template class was mocked with mock_constructor(), this will
# return the mocked subclass, which contains all attributes we need for
# introspection.
return testslide.mock_constructor._get_class_or_mock(self.__dict__["_template"])
# FIXME change to __runtime_attrs
@property
def _runtime_attrs(self) -> Optional[List[Any]]:
return self.__dict__["_runtime_attrs"]
def __template_has_attr(self, name: str) -> bool:
def get_class_init(klass: type) -> Callable:
import testslide.mock_constructor # Avoid cyclic dependencies
if not testslide.mock_constructor._is_mocked_class(klass):
return klass.__init__ # type: ignore
# If klass is the mocked subclass, pull the original version of
# __init__ so we can introspect into its implementation (and
# not the __init__ wrapper at the mocked class).
mocked_class = klass
original_class = mocked_class.mro()[1]
return testslide.mock_constructor._get_original_init(
original_class, instance=None, owner=mocked_class
)
def is_runtime_attr() -> bool:
if self._template:
for klass in self._template.mro():
template_init = get_class_init(klass)
if not inspect.isfunction(template_init):
continue
for instruction in dis.get_instructions(template_init):
if (
instruction.opname == "STORE_ATTR"
and name == instruction.argval
):
return True
return False
return (
hasattr(self._template, name)
or name in self._runtime_attrs # type: ignore
or name in getattr(self._template, "__slots__", [])
or is_runtime_attr()
)
@staticmethod
def __is_magic_method(name: str) -> bool:
return name.startswith("__") and name.endswith("__")
def __validate_attribute_type(self, name: str, value: Any) -> None:
if (
not self.__dict__["_type_validation"]
or name in self.__dict__["_attributes_to_skip_type_validation"]
):
return
if self._template is not None:
try:
annotations = get_type_hints(self._template)
except Exception:
# Some modules can throw KeyError : https://bugs.python.org/issue41515
annotations = {}
if name in annotations:
testslide.lib._validate_argument_type(annotations[name], name, value)
def __validate_and_wrap_mock_value(self, name: str, value: Any) -> Any:
if self._template:
if not self.__template_has_attr(name):
if not (
name.startswith(f"_{type(self).__name__}__")
and not name.endswith("__")
):
raise NonExistentAttribute(self, name)
self.__validate_attribute_type(name, value)
if hasattr(self._template, name):
template_value = getattr(self._template, name)
if callable(template_value):
if not callable(value):
raise NonCallableValue(self, name)
if self.__dict__["_type_validation"]:
signature_validation_wrapper = (
testslide.lib._wrap_signature_and_type_validation(
value,
self._template,
name,
self.__dict__["_type_validation"],
)
)
if inspect.iscoroutinefunction(template_value):
async def awaitable_return_validation_wrapper(
*args, **kwargs
):
result_awaitable = signature_validation_wrapper(
*args, **kwargs
)
if not inspect.isawaitable(result_awaitable):
raise NonAwaitableReturn(self, name)
return_value = await result_awaitable
if not testslide.lib._is_wrapped_for_signature_and_type_validation(
# The original value was already wrapped for type
# validation. Skipping additional validation to
# allow, for example, mock_callable to disable
# validation for a very specific mock call rather
# for the whole StrictMock instance
value
) and not isinstance(
# If the return value is a _BaseRunner then type
# validation, if needed, has already been performed
return_value,
testslide.mock_callable._BaseRunner,
):
testslide.lib._validate_return_type(
template_value,
return_value,
self.__dict__["_caller_frame_info"],
)
return return_value
callable_value = awaitable_return_validation_wrapper
else:
def return_validation_wrapper(*args, **kwargs):
return_value = signature_validation_wrapper(
*args, **kwargs
)
if not testslide.lib._is_wrapped_for_signature_and_type_validation(
# The original value was already wrapped for type
# validation. Skipping additional validation to
# allow, for example, mock_callable to disable
# validation for a very specific mock call rather
# for the whole StrictMock instance
value
) and not isinstance(
# If the return value is a _BaseRunner then type
# validation, if needed, has already been performed
return_value,
testslide.mock_callable._BaseRunner,
):
testslide.lib._validate_return_type(
template_value,
return_value,
self.__dict__["_caller_frame_info"],
)
return return_value
callable_value = return_validation_wrapper
else:
callable_value = None
return _MethodProxy(value=value, callable_value=callable_value)
else:
if callable(value):
# We don't really need the proxy here, but it serves the
# double purpose of swallowing self / cls when needed.
return _MethodProxy(value=value)
else:
if callable(value):
# We don't really need the proxy here, but it serves the
# double purpose of swallowing self / cls when needed.
return _MethodProxy(value=value)
return value
def __setattr__(self, name: str, value: Any) -> None:
if self.__is_magic_method(name):
# ...check whether we're allowed to mock...
if (
name in self.__UNSETTABLE_MAGICS
or (name in StrictMock.__dict__ and name not in self.__SETTABLE_MAGICS)
) and name != "__hash__":
raise UnsupportedMagic(self, name)
# ...or if it is something unsupported.
if name not in self.__SETTABLE_MAGICS and name != "__hash__":
raise NotImplementedError(
f"StrictMock does not implement support for {name}"
)
if name == "__hash__" and name in type(self).__dict__:
raise UnsupportedMagic(self, name)
mock_value = self.__validate_and_wrap_mock_value(name, value)
setattr(type(self), name, mock_value)
def __getattr__(self, name: str) -> Any:
if self._template and self.__template_has_attr(name):
raise UndefinedAttribute(self, name)
else:
raise AttributeError(f"'{name}' was not set for {repr(self)}.")
def __delattr__(self, name: str) -> None:
if name in type(self).__dict__:
delattr(type(self), name)
def __repr__(self) -> str:
template_str = (
" template={}.{}".format(self._template.__module__, self._template.__name__) # type: ignore
if self._template
else ""
)
if self.__dict__["_name"]:
name_str = " name={}".format(repr(self.__dict__["_name"]))
else:
name_str = ""
if self.__dict__["__caller"]:
caller_str = " {}".format(self.__dict__["__caller"])
else:
caller_str = ""
return "<StrictMock 0x{:02X}{name}{template}{caller}>".format(
id(self), name=name_str, template=template_str, caller=caller_str
)
def __str__(self) -> str:
return self.__repr__()
def __get_copy(self) -> "StrictMock":
self_copy = StrictMock(
template=self._template,
runtime_attrs=self._runtime_attrs,
name=self._name,
type_validation=self._type_validation,
attributes_to_skip_type_validation=self._attributes_to_skip_type_validation,
)
self_copy.__dict__["__caller"] = self.__get_caller(2)
return self_copy
def __get_copyable_attrs(self, self_copy: "StrictMock") -> List[str]:
return [
name
for name in type(self).__dict__
if name not in self_copy.__dict__
and (
not name.startswith("__")
or not name.endswith("__")
or name in self.__SETTABLE_MAGICS
)
]
def __copy__(self) -> "StrictMock":
self_copy = self.__get_copy()
for name in self.__get_copyable_attrs(self_copy):
setattr(type(self_copy), name, type(self).__dict__[name])
return self_copy
def __deepcopy__(self, memo: Optional[Dict[Any, Any]] = None) -> "StrictMock":
if memo is None:
memo = {}
self_copy = self.__get_copy()
memo[id(self)] = self_copy
for name in self.__get_copyable_attrs(self_copy):
value = copy.deepcopy(type(self).__dict__[name], memo)
setattr(type(self_copy), name, value)
return self_copy
def _extract_StrictMock_template(mock_obj: StrictMock) -> Optional[Any]:
if "_template" in mock_obj.__dict__ and mock_obj._template is not None:
return mock_obj._template
return None
testslide.lib.MOCK_TEMPLATE_EXTRACTORS[StrictMock] = _extract_StrictMock_template # type: ignore
| 37.05549
| 104
| 0.567419
|
import copy
import dis
import inspect
import os.path
from types import FrameType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Type,
Union,
get_type_hints,
)
import testslide.lib
import testslide.mock_callable
if TYPE_CHECKING:
from testslide.mock_callable import _CallableMock, _YieldValuesRunner
class UndefinedAttribute(BaseException):
def __init__(
self, strict_mock: "StrictMock", name: str, extra_msg: Optional[str] = None
) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
self.extra_msg = extra_msg
def __str__(self) -> str:
message = (
f"'{self.name}' is not set.\n"
f"{repr(self.strict_mock)} must have a value set for this attribute "
"if it is going to be accessed."
)
if self.extra_msg is not None:
message += f"\n{self.extra_msg}"
return message
class NonExistentAttribute(BaseException):
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return (
f"'{self.name}' is not part of the API.\n"
f"{self.strict_mock} template class API does not have this "
"attribute so the mock can not have it as well.\n"
"If you are inheriting StrictMock, you can define private "
"attributes, that will not interfere with the API, by prefixing "
"them with '__' (and at most one '_' suffix) "
" (https://docs.python.org/3/tutorial/classes.html#tut-private).\n"
"See also: 'runtime_attrs' at StrictMock.__init__."
)
class NonCallableValue(BaseException):
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return (
f"'{self.name}' can not be set with a non-callable value.\n"
f"{self.strict_mock} template class requires this attribute to "
"be callable."
)
class NonAwaitableReturn(BaseException):
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return (
f"'{self.name}' can not be set with a callable that does not "
"return an awaitable.\n"
f"{self.strict_mock} template class requires this attribute to "
"be a callable that returns an awaitable (eg: a 'async def' "
"function)."
)
class UnsupportedMagic(BaseException):
def __init__(self, strict_mock: "StrictMock", name: str) -> None:
super().__init__(strict_mock, name)
self.strict_mock = strict_mock
self.name = name
def __str__(self) -> str:
return f"setting '{self.name}' is not supported."
class _DefaultMagic:
CONTEXT_MANAGER_METHODS = ["__enter__", "__exit__", "__aenter__", "__aexit__"]
def __init__(self, strict_mock: "StrictMock", name: str):
self.strict_mock = strict_mock
self.name = name
def __call__(self, *args: Any, **kwargs: Any) -> None:
message = None
if self.name in self.CONTEXT_MANAGER_METHODS:
message = (
"Tip: most context managers can be automatically configured "
"with 'default_context_manager=True'."
)
raise UndefinedAttribute(self.strict_mock, self.name, message)
def __copy__(self) -> "_DefaultMagic":
return type(self)(strict_mock=self.strict_mock, name=self.name)
def __deepcopy__(self, memo: Optional[Dict[Any, Any]] = None) -> "_DefaultMagic":
if memo is None:
memo = {}
self_copy = type(self)(strict_mock=self.strict_mock, name=self.name)
memo[id(self)] = self_copy
return self_copy
class _MethodProxy:
def __init__(self, value: Any, callable_value: Optional[Callable] = None) -> None:
self.__dict__["_value"] = value
self.__dict__["_callable_value"] = callable_value or value
def __get__(
self, instance: "StrictMock", owner: Optional[Type["StrictMock"]] = None
) -> Union[object, Callable]:
if self.__dict__["_value"] is self.__dict__["_callable_value"]:
return self.__dict__["_callable_value"]
else:
return self
def __getattr__(self, name: str) -> str:
return getattr(self.__dict__["_value"], name)
def __setattr__(self, name: str, value: str) -> None:
return setattr(self.__dict__["_value"], name, value)
def __delattr__(self, name: str) -> None:
return delattr(self.__dict__["_value"], name)
def __call__(self, *args: Any, **kwargs: Any) -> Optional[Any]:
return self.__dict__["_callable_value"](*args, **kwargs)
def __copy__(self) -> "_MethodProxy":
return type(self)(
callable_value=self.__dict__["_callable_value"],
value=self.__dict__["_value"],
)
def __deepcopy__(self, memo: Optional[Dict[Any, Any]] = None) -> "_MethodProxy":
if memo is None:
memo = {}
self_copy = type(self)(
callable_value=copy.deepcopy(self.__dict__["_callable_value"]),
value=copy.deepcopy(self.__dict__["_value"]),
)
memo[id(self)] = self_copy
return self_copy
def __repr__(self) -> str:
return repr(self.__dict__["_value"])
class StrictMock:
TRIM_PATH_PREFIX = ""
__SETTABLE_MAGICS = [
"__abs__",
"__add__",
"__aenter__",
"__aexit__",
"__aiter__",
"__and__",
"__anext__",
"__await__",
"__bool__",
"__bytes__",
"__call__",
"__ceil__",
"__complex__",
"__contains__",
"__delete__",
"__delitem__",
"__divmod__",
"__enter__",
"__enter__",
"__eq__",
"__exit__",
"__exit__",
"__float__",
"__floor__",
"__floordiv__",
"__format__",
"__ge__",
"__get__",
"__getformat__",
"__getinitargs__",
"__getitem__",
"__getnewargs__",
"__getnewargs_ex__",
"__getstate__",
"__gt__",
"__iadd__",
"__iand__",
"__ifloordiv__",
"__ilshift__",
"__imatmul__",
"__imod__",
"__imul__",
"__index__",
"__instancecheck__",
"__int__",
"__invert__",
"__ior__",
"__ipow__",
"__irshift__",
"__isub__",
"__iter__",
"__iter__",
"__iter__",
"__itruediv__",
"__ixor__",
"__le__",
"__len__",
"__length_hint__",
"__lshift__",
"__lt__",
"__matmul__",
"__missing__",
"__mod__",
"__mul__",
"__name__",
"__ne__",
"__neg__",
"__next__",
"__or__",
"__pos__",
"__pow__",
"__qualname__",
"__radd__",
"__rand__",
"__rdivmod__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__rfloordiv__",
"__rlshift__",
"__rmatmul__",
"__rmod__",
"__rmul__",
"__ror__",
"__round__",
"__rpow__",
"__rrshift__",
"__rshift__",
"__rsub__",
"__rtruediv__",
"__rxor__",
"__set__",
"__set_name__",
"__setformat__",
"__setitem__",
"__setstate__",
"__sizeof__",
"__str__",
"__sub__",
"__subclasscheck__",
"__truediv__",
"__trunc__",
"__xor__",
]
# an instance of a class. Trying to set them will raise UnsupportedMagic.
__UNSETTABLE_MAGICS = [
"__bases__",
"__class__",
"__class_getitem__",
"__copy__",
"__deepcopy__",
"__del__",
"__delattr__",
"__dict__",
"__dir__",
"__getattr__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__mro__",
"__new__",
"__setattr__",
"__slots__",
"__subclasses__",
]
def __new__(
cls,
template: Optional[type] = None,
runtime_attrs: Optional[List[Any]] = None,
name: Optional[str] = None,
default_context_manager: bool = False,
type_validation: bool = True,
attributes_to_skip_type_validation: List[str] = [],
) -> "StrictMock":
name = f"{template.__name__}{cls.__name__}" if template else cls.__name__
strict_mock_subclass = type(name, (cls,), {})
return object.__new__(strict_mock_subclass)
def __setup_magic_methods(self) -> None:
if not self._template:
return
implemented_magic_methods = []
for klass in type(self).mro():
if klass is object:
continue
for name in klass.__dict__:
if name.startswith("__") and name.endswith("__"):
implemented_magic_methods.append(name)
for klass in self._template.mro():
if klass is object:
continue
for name in klass.__dict__:
if name in type(self).__dict__:
continue
if name == "__hash__":
if klass.__dict__["__hash__"] is None:
setattr(self, name, None)
else:
setattr(self, name, lambda: id(self))
continue
if (
callable(klass.__dict__[name])
and name in self.__SETTABLE_MAGICS
and name not in self.__UNSETTABLE_MAGICS
and name not in implemented_magic_methods
):
setattr(self, name, _DefaultMagic(self, name))
def __setup_default_context_manager(self, default_context_manager: bool) -> None:
if self._template and default_context_manager:
if hasattr(self._template, "__enter__") and hasattr(
self._template, "__exit__"
):
self.__enter__ = lambda: self
self.__exit__ = lambda exc_type, exc_value, traceback: None
if hasattr(self._template, "__aenter__") and hasattr(
self._template, "__aexit__"
):
async def aenter():
return self
async def aexit(exc_type, exc_value, traceback):
pass
self.__aenter__ = aenter
self.__aexit__ = aexit
def __get_caller_frame(self, depth: int) -> FrameType:
# Adding extra 3 to account for the stack:
# __get_caller_frame
# __get_caller
# __init__
depth += 3
current_frame = inspect.currentframe()
while current_frame:
depth -= 1
if not depth:
break
current_frame = current_frame.f_back
return current_frame # type: ignore
def __get_caller(self, depth: int) -> Optional[str]:
# Doing inspect.stack will retrieve the whole stack, including context
# and that is really slow, this only retrieves the minimum, and does
# not read the file contents.
caller_frame = self.__get_caller_frame(depth)
# loading the context ends up reading files from disk and that might block
# the event loop, so we don't do it.
frameinfo = inspect.getframeinfo(caller_frame, context=0)
filename = frameinfo.filename
lineno = frameinfo.lineno
if self.TRIM_PATH_PREFIX:
split = filename.split(self.TRIM_PATH_PREFIX)
if len(split) == 2 and not split[0]:
filename = split[1]
if os.path.exists(filename):
return "{}:{}".format(filename, lineno)
else:
return None
def __setup_subclass(self):
if type(self).mro()[1] == StrictMock:
return
for klass in type(self).mro()[1:]:
if klass == StrictMock:
break
for name in klass.__dict__.keys():
if name in [
"__doc__",
"__init__",
"__module__",
]:
continue
if name.startswith(f"_{type(self).__name__}__") and not name.endswith(
"__"
):
continue
if name == "__hash__" and klass.__dict__["__hash__"] is None:
continue
StrictMock.__setattr__(self, name, getattr(self, name))
def __init__(
self,
template: Optional[type] = None,
runtime_attrs: Optional[List[Any]] = None,
name: Optional[str] = None,
default_context_manager: bool = False,
type_validation: bool = True,
attributes_to_skip_type_validation: List[str] = [],
) -> None:
if template is not None and not inspect.isclass(template):
raise ValueError("Template must be a class.")
self.__dict__["_template"] = template
self.__dict__["_runtime_attrs"] = runtime_attrs or []
self.__dict__["_name"] = name
self.__dict__["_type_validation"] = type_validation
self.__dict__["__caller"] = self.__get_caller(1)
self.__dict__[
"_attributes_to_skip_type_validation"
] = attributes_to_skip_type_validation
caller_frame = inspect.currentframe().f_back caller_frame_info = inspect.getframeinfo(caller_frame, context=0) # type: ignore
self.__dict__["_caller_frame_info"] = caller_frame_info
self.__setup_magic_methods()
self.__setup_default_context_manager(default_context_manager)
self.__setup_subclass()
@property # type: ignore
def __class__(self) -> type:
return self._template if self._template is not None else type(self)
@property
def _template(self) -> None:
import testslide.mock_constructor # Avoid cyclic dependencies
# If the template class was mocked with mock_constructor(), this will
# return the mocked subclass, which contains all attributes we need for
# introspection.
return testslide.mock_constructor._get_class_or_mock(self.__dict__["_template"])
# FIXME change to __runtime_attrs
@property
def _runtime_attrs(self) -> Optional[List[Any]]:
return self.__dict__["_runtime_attrs"]
def __template_has_attr(self, name: str) -> bool:
def get_class_init(klass: type) -> Callable:
import testslide.mock_constructor # Avoid cyclic dependencies
if not testslide.mock_constructor._is_mocked_class(klass):
return klass.__init__ # type: ignore
# If klass is the mocked subclass, pull the original version of
# __init__ so we can introspect into its implementation (and
# not the __init__ wrapper at the mocked class).
mocked_class = klass
original_class = mocked_class.mro()[1]
return testslide.mock_constructor._get_original_init(
original_class, instance=None, owner=mocked_class
)
def is_runtime_attr() -> bool:
if self._template:
for klass in self._template.mro():
template_init = get_class_init(klass)
if not inspect.isfunction(template_init):
continue
for instruction in dis.get_instructions(template_init):
if (
instruction.opname == "STORE_ATTR"
and name == instruction.argval
):
return True
return False
return (
hasattr(self._template, name)
or name in self._runtime_attrs # type: ignore
or name in getattr(self._template, "__slots__", [])
or is_runtime_attr()
)
@staticmethod
def __is_magic_method(name: str) -> bool:
return name.startswith("__") and name.endswith("__")
def __validate_attribute_type(self, name: str, value: Any) -> None:
if (
not self.__dict__["_type_validation"]
or name in self.__dict__["_attributes_to_skip_type_validation"]
):
return
if self._template is not None:
try:
annotations = get_type_hints(self._template)
except Exception:
# Some modules can throw KeyError : https://bugs.python.org/issue41515
annotations = {}
if name in annotations:
testslide.lib._validate_argument_type(annotations[name], name, value)
def __validate_and_wrap_mock_value(self, name: str, value: Any) -> Any:
if self._template:
if not self.__template_has_attr(name):
if not (
name.startswith(f"_{type(self).__name__}__")
and not name.endswith("__")
):
raise NonExistentAttribute(self, name)
self.__validate_attribute_type(name, value)
if hasattr(self._template, name):
template_value = getattr(self._template, name)
if callable(template_value):
if not callable(value):
raise NonCallableValue(self, name)
if self.__dict__["_type_validation"]:
signature_validation_wrapper = (
testslide.lib._wrap_signature_and_type_validation(
value,
self._template,
name,
self.__dict__["_type_validation"],
)
)
if inspect.iscoroutinefunction(template_value):
async def awaitable_return_validation_wrapper(
*args, **kwargs
):
result_awaitable = signature_validation_wrapper(
*args, **kwargs
)
if not inspect.isawaitable(result_awaitable):
raise NonAwaitableReturn(self, name)
return_value = await result_awaitable
if not testslide.lib._is_wrapped_for_signature_and_type_validation(
# The original value was already wrapped for type
# validation. Skipping additional validation to
# allow, for example, mock_callable to disable
# validation for a very specific mock call rather
# for the whole StrictMock instance
value
) and not isinstance(
# If the return value is a _BaseRunner then type
# validation, if needed, has already been performed
return_value,
testslide.mock_callable._BaseRunner,
):
testslide.lib._validate_return_type(
template_value,
return_value,
self.__dict__["_caller_frame_info"],
)
return return_value
callable_value = awaitable_return_validation_wrapper
else:
def return_validation_wrapper(*args, **kwargs):
return_value = signature_validation_wrapper(
*args, **kwargs
)
if not testslide.lib._is_wrapped_for_signature_and_type_validation(
# The original value was already wrapped for type
# validation. Skipping additional validation to
# allow, for example, mock_callable to disable
# validation for a very specific mock call rather
# for the whole StrictMock instance
value
) and not isinstance(
# If the return value is a _BaseRunner then type
# validation, if needed, has already been performed
return_value,
testslide.mock_callable._BaseRunner,
):
testslide.lib._validate_return_type(
template_value,
return_value,
self.__dict__["_caller_frame_info"],
)
return return_value
callable_value = return_validation_wrapper
else:
callable_value = None
return _MethodProxy(value=value, callable_value=callable_value)
else:
if callable(value):
# We don't really need the proxy here, but it serves the
return _MethodProxy(value=value)
else:
if callable(value):
# double purpose of swallowing self / cls when needed.
return _MethodProxy(value=value)
return value
def __setattr__(self, name: str, value: Any) -> None:
if self.__is_magic_method(name):
# ...check whether we're allowed to mock...
if (
name in self.__UNSETTABLE_MAGICS
or (name in StrictMock.__dict__ and name not in self.__SETTABLE_MAGICS)
) and name != "__hash__":
raise UnsupportedMagic(self, name)
if name not in self.__SETTABLE_MAGICS and name != "__hash__":
raise NotImplementedError(
f"StrictMock does not implement support for {name}"
)
if name == "__hash__" and name in type(self).__dict__:
raise UnsupportedMagic(self, name)
mock_value = self.__validate_and_wrap_mock_value(name, value)
setattr(type(self), name, mock_value)
def __getattr__(self, name: str) -> Any:
if self._template and self.__template_has_attr(name):
raise UndefinedAttribute(self, name)
else:
raise AttributeError(f"'{name}' was not set for {repr(self)}.")
def __delattr__(self, name: str) -> None:
if name in type(self).__dict__:
delattr(type(self), name)
def __repr__(self) -> str:
template_str = (
" template={}.{}".format(self._template.__module__, self._template.__name__) if self._template
else ""
)
if self.__dict__["_name"]:
name_str = " name={}".format(repr(self.__dict__["_name"]))
else:
name_str = ""
if self.__dict__["__caller"]:
caller_str = " {}".format(self.__dict__["__caller"])
else:
caller_str = ""
return "<StrictMock 0x{:02X}{name}{template}{caller}>".format(
id(self), name=name_str, template=template_str, caller=caller_str
)
def __str__(self) -> str:
return self.__repr__()
def __get_copy(self) -> "StrictMock":
self_copy = StrictMock(
template=self._template,
runtime_attrs=self._runtime_attrs,
name=self._name,
type_validation=self._type_validation,
attributes_to_skip_type_validation=self._attributes_to_skip_type_validation,
)
self_copy.__dict__["__caller"] = self.__get_caller(2)
return self_copy
def __get_copyable_attrs(self, self_copy: "StrictMock") -> List[str]:
return [
name
for name in type(self).__dict__
if name not in self_copy.__dict__
and (
not name.startswith("__")
or not name.endswith("__")
or name in self.__SETTABLE_MAGICS
)
]
def __copy__(self) -> "StrictMock":
self_copy = self.__get_copy()
for name in self.__get_copyable_attrs(self_copy):
setattr(type(self_copy), name, type(self).__dict__[name])
return self_copy
def __deepcopy__(self, memo: Optional[Dict[Any, Any]] = None) -> "StrictMock":
if memo is None:
memo = {}
self_copy = self.__get_copy()
memo[id(self)] = self_copy
for name in self.__get_copyable_attrs(self_copy):
value = copy.deepcopy(type(self).__dict__[name], memo)
setattr(type(self_copy), name, value)
return self_copy
def _extract_StrictMock_template(mock_obj: StrictMock) -> Optional[Any]:
if "_template" in mock_obj.__dict__ and mock_obj._template is not None:
return mock_obj._template
return None
testslide.lib.MOCK_TEMPLATE_EXTRACTORS[StrictMock] = _extract_StrictMock_template
| true
| true
|
f70a0ce72f008baa85734794ad2293305ba995a2
| 3,653
|
py
|
Python
|
mmhid/datasets/builder.py
|
zjutcv/mmhid
|
faeaf4fb5c634037c6e482f63ef73e7f2144c7b5
|
[
"Apache-2.0"
] | 1
|
2021-11-26T07:41:39.000Z
|
2021-11-26T07:41:39.000Z
|
mmhid/datasets/builder.py
|
zjutcv/mmhid
|
faeaf4fb5c634037c6e482f63ef73e7f2144c7b5
|
[
"Apache-2.0"
] | null | null | null |
mmhid/datasets/builder.py
|
zjutcv/mmhid
|
faeaf4fb5c634037c6e482f63ef73e7f2144c7b5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
DATA_ROOT = {
'BIT': './data/BIT',
'UT': './data/ut120',
'highfive': './data/highfive'
}
FRAMES_ROOT = {
'BIT': 'Bit-frames',
}
ANNO_ROOT = {
'BIT': 'BIT-anno/tidy_anno'
}
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
HID_DATASETS = Registry('hid_dataset')
HID_PIPELINES = Registry('hid_pipeline')
def build_dataset(cfg, default_args=None):
dataset = build_from_cfg(cfg, HID_DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
# DistributedGroupSampler will definitely shuffle the data to satisfy
# that images on each GPU are in the same group
if shuffle:
sampler = DistributedGroupSampler(
dataset, samples_per_gpu, world_size, rank, seed=seed)
else:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=False, seed=seed)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=False,
worker_init_fn=init_fn,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 31.491379
| 79
| 0.662743
|
import copy
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
DATA_ROOT = {
'BIT': './data/BIT',
'UT': './data/ut120',
'highfive': './data/highfive'
}
FRAMES_ROOT = {
'BIT': 'Bit-frames',
}
ANNO_ROOT = {
'BIT': 'BIT-anno/tidy_anno'
}
if platform.system() != 'Windows':
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
HID_DATASETS = Registry('hid_dataset')
HID_PIPELINES = Registry('hid_pipeline')
def build_dataset(cfg, default_args=None):
dataset = build_from_cfg(cfg, HID_DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
**kwargs):
rank, world_size = get_dist_info()
if dist:
if shuffle:
sampler = DistributedGroupSampler(
dataset, samples_per_gpu, world_size, rank, seed=seed)
else:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=False, seed=seed)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=False,
worker_init_fn=init_fn,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| true
| true
|
f70a0d2a2029fd539f60c8d2b5e2c5f1548616f4
| 9,653
|
py
|
Python
|
bases_2021_1S/Grupo 01/Parser/ui/PantallaPrincipal.py
|
dadu0699/tytus
|
e1920f6932c840859e3e79eb8756a1d3da88bd77
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
bases_2021_1S/Grupo 01/Parser/ui/PantallaPrincipal.py
|
dadu0699/tytus
|
e1920f6932c840859e3e79eb8756a1d3da88bd77
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
bases_2021_1S/Grupo 01/Parser/ui/PantallaPrincipal.py
|
dadu0699/tytus
|
e1920f6932c840859e3e79eb8756a1d3da88bd77
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
from sys import path
from os.path import dirname as dir
import webbrowser
import os
path.append(dir(path[0]))
from tkinter import ttk
import tkinter as tk
from tkinter import *
from ui.Pantalla_TS import *
from ui.Pantalla_AST import *
from ui.Pantalla_Error import *
import tkinter.messagebox
from analizer import interpreter
class Pantalla:
def __init__(self):
self.lexicalErrors = list()
self.syntacticErrors = list()
self.semanticErrors = list()
self.postgreSQL = list()
self.ts = list()
self.inicializarScreen()
def inicializarScreen(self):
# inicializacion de la pantalla
self.window = Tk()
self.window.geometry("700x750")
self.window.resizable(0, 0)
self.window.title("Query Tool")
self.frame_entrada = Frame(
self.window, height=300, width=520, bd=10, bg="#d3d3d3"
)
self.txt_scroll = Scrollbar(self.frame_entrada)
self.txt_scroll.pack(side=RIGHT, fill=Y)
self.txt_entrada = tk.Text(
self.frame_entrada, yscrollcommand=self.txt_scroll.set, height=15, width=80
)
self.txt_entrada.pack(side=TOP)
self.txt_scroll.config(command=self.txt_entrada.yview)
self.frame_entrada.pack()
# Definicion del menu de items
navMenu = Menu(self.window)
navMenu.add_command(label="Tabla de Simbolos", command=self.open_ST)
navMenu.add_command(label="AST", command=self.open_AST)
navMenu.add_command(label="AST pdf", command=self.open_PDF)
navMenu.add_command(
label="Reporte de errores",
command=self.open_Reporte,
)
self.window.config(menu=navMenu)
frame_btn = Frame(self.window)
btn = Button(frame_btn, text="Consultar", command=self.analize)
btn.pack(side=LEFT, anchor=E, padx=25, pady=20)
btn_1 = Button(frame_btn, text="Parsear", command=self.parse)
btn_1.pack(side=LEFT, anchor=E, padx=25, pady=20)
frame_btn.pack()
# Creacion del notebook
self.tabControl = ttk.Notebook(self.window, width=650, height=300)
console_frame = Frame(self.tabControl, height=20, width=150, bg="#d3d3d3")
self.text_Consola = tk.Text(console_frame, height=20, width=150)
self.text_Consola.pack(fill=BOTH)
console_frame.pack(fill=BOTH)
self.tabControl.add(console_frame, text="Consola")
self.tabControl.pack()
self.window.mainloop()
def show_result(self, consults):
if consults != None:
i = 0
for consult in consults:
i += 1
if consult != None:
frame = Frame(self.tabControl, height=300, width=450, bg="#d3d3d3")
# Creacion del scrollbar
table_scroll = Scrollbar(frame, orient="vertical")
table_scrollX = Scrollbar(frame, orient="horizontal")
table = ttk.Treeview(
frame,
yscrollcommand=table_scroll.set,
xscrollcommand=table_scrollX.set,
height=12,
)
table_scroll.config(command=table.yview)
table_scrollX.config(command=table.xview)
self.fill_table(consult[0], consult[1], table)
table_scroll.pack(side=RIGHT, fill=Y)
table_scrollX.pack(side=BOTTOM, fill=X)
table.pack(side=LEFT, fill=BOTH)
frame.pack(fill=BOTH)
self.tabControl.add(frame, text="Consulta " + str(i))
else:
self.text_Consola.insert(
INSERT, "Error: Consulta sin resultado" + "\n"
)
self.tabControl.pack()
def parse(self):
self.refresh()
input = ""
input = self.txt_entrada.get(
"1.0", END
) # variable de almacenamiento de la entrada
result = interpreter.parser(input)
if len(result["lexical"]) + len(result["syntax"]) == 0:
tkinter.messagebox.showerror(
title="Mensaje", message="La consulta no contiene errores"
)
else:
self.lexicalErrors = result["lexical"]
self.syntacticErrors = result["syntax"]
tkinter.messagebox.showerror(
title="Error", message="La consulta contiene errores"
)
def analize(self):
self.refresh()
entrada = ""
entrada = self.txt_entrada.get(
"1.0", END
) # variable de almacenamiento de la entrada
result = interpreter.execution(entrada)
self.lexicalErrors = result["lexical"]
self.syntacticErrors = result["syntax"]
self.semanticErrors = result["semantic"]
self.postgreSQL = result["postgres"]
self.ts = result["symbols"]
self.indexes = result["indexes"]
if (
len(self.lexicalErrors)
+ len(self.syntacticErrors)
+ len(self.semanticErrors)
+ len(self.postgreSQL)
> 0
):
tkinter.messagebox.showerror(
title="Error", message="La consulta contiene errores"
)
if len(self.postgreSQL) > 0:
i = 0
self.text_Consola.insert(INSERT, "-----------ERRORS----------" + "\n")
while i < len(self.postgreSQL):
self.text_Consola.insert(INSERT, self.postgreSQL[i] + "\n")
i += 1
querys = result["querys"]
self.show_result(querys)
messages = result["messages"]
if len(messages) > 0:
i = 0
self.text_Consola.insert(INSERT, "-----------MESSAGES----------" + "\n")
while i < len(messages):
self.text_Consola.insert(INSERT, ">> " + str(messages[i]) + "\n")
i += 1
self.tabControl.pack()
def refresh(self):
tabls = self.tabControl.tabs()
i = 1
while i < len(tabls):
self.tabControl.forget(tabls[i])
i += 1
self.text_Consola.delete("1.0", "end")
self.semanticErrors.clear()
self.syntacticErrors.clear()
self.lexicalErrors.clear()
self.postgreSQL.clear()
self.ts.clear()
def fill_table(
self, columns, rows, table
): # funcion que muestra la salida de la/s consulta/s
table["columns"] = columns
"""
Definicion de columnas y encabezado
"""
table.column("#0", width=25, minwidth=50)
i = 0
ancho = int(600 / len(columns))
if ancho < 100:
ancho = 100
while i < len(columns):
table.column(str(i), width=ancho, minwidth=50, anchor=CENTER)
i += 1
table.heading("#0", text="#", anchor=CENTER)
i = 0
while i < len(columns):
table.heading(str(i), text=str(columns[i]), anchor=CENTER)
i += 1
"""
Insercion de filas
"""
i = 0
for row in rows:
i += 1
table.insert(parent="", index="end", iid=i, text=i, values=(row))
def open_ST(self): # Abre la pantalla de la table de simbolos
windowTableS = Pantalla_TS(self.window, self.ts, self.indexes)
def open_AST(self): # Abre la pantalla del AST
windowTableS = Pantalla_AST(self.window)
def open_Reporte(self): # Abre la pantalla de los reportes de errores
windowTableS = Pantalla_Error(
self.window, self.lexicalErrors, self.syntacticErrors, self.semanticErrors
)
def open_PDF(self):
url = "file:///" + os.path.realpath("test-output/round-table.gv.pdf")
webbrowser.open(url)
class Pantalla2:
def __init__(self):
self.lexicalErrors = list()
self.syntacticErrors = list()
self.semanticErrors = list()
self.postgreSQL = list()
self.ts = list()
def MetodoParser(self, texto):
#DECLARAR RETORNO
salida = "";
# EJECUTAR PARSER
result = interpreter.execution(texto)
self.lexicalErrors = result["lexical"]
self.syntacticErrors = result["syntax"]
self.semanticErrors = result["semantic"]
self.postgreSQL = result["postgres"]
self.ts = result["symbols"]
self.indexes = result["indexes"]
if (
len(self.lexicalErrors)
+ len(self.syntacticErrors)
+ len(self.semanticErrors)
+ len(self.postgreSQL)
> 0
):
if len(self.postgreSQL) > 0:
i = 0
salida += "================================================== \n"
salida += " TYTUS ERROR \n"
salida += "================================================== \n"
while i < len(self.postgreSQL):
salida += ">> " + str(self.postgreSQL[i]) + "\n"
i += 1
querys = result["querys"]
messages = result["messages"]
if len(messages) > 0:
i = 0
salida += "==================================================\n"
salida += " TYTUS \n"
salida += "================================================== \n"
while i < len(messages):
salida += ">> " + str(messages[i]) + "\n"
i += 1
return salida
| 36.984674
| 87
| 0.530716
|
from sys import path
from os.path import dirname as dir
import webbrowser
import os
path.append(dir(path[0]))
from tkinter import ttk
import tkinter as tk
from tkinter import *
from ui.Pantalla_TS import *
from ui.Pantalla_AST import *
from ui.Pantalla_Error import *
import tkinter.messagebox
from analizer import interpreter
class Pantalla:
def __init__(self):
self.lexicalErrors = list()
self.syntacticErrors = list()
self.semanticErrors = list()
self.postgreSQL = list()
self.ts = list()
self.inicializarScreen()
def inicializarScreen(self):
self.window = Tk()
self.window.geometry("700x750")
self.window.resizable(0, 0)
self.window.title("Query Tool")
self.frame_entrada = Frame(
self.window, height=300, width=520, bd=10, bg="#d3d3d3"
)
self.txt_scroll = Scrollbar(self.frame_entrada)
self.txt_scroll.pack(side=RIGHT, fill=Y)
self.txt_entrada = tk.Text(
self.frame_entrada, yscrollcommand=self.txt_scroll.set, height=15, width=80
)
self.txt_entrada.pack(side=TOP)
self.txt_scroll.config(command=self.txt_entrada.yview)
self.frame_entrada.pack()
navMenu = Menu(self.window)
navMenu.add_command(label="Tabla de Simbolos", command=self.open_ST)
navMenu.add_command(label="AST", command=self.open_AST)
navMenu.add_command(label="AST pdf", command=self.open_PDF)
navMenu.add_command(
label="Reporte de errores",
command=self.open_Reporte,
)
self.window.config(menu=navMenu)
frame_btn = Frame(self.window)
btn = Button(frame_btn, text="Consultar", command=self.analize)
btn.pack(side=LEFT, anchor=E, padx=25, pady=20)
btn_1 = Button(frame_btn, text="Parsear", command=self.parse)
btn_1.pack(side=LEFT, anchor=E, padx=25, pady=20)
frame_btn.pack()
self.tabControl = ttk.Notebook(self.window, width=650, height=300)
console_frame = Frame(self.tabControl, height=20, width=150, bg="#d3d3d3")
self.text_Consola = tk.Text(console_frame, height=20, width=150)
self.text_Consola.pack(fill=BOTH)
console_frame.pack(fill=BOTH)
self.tabControl.add(console_frame, text="Consola")
self.tabControl.pack()
self.window.mainloop()
def show_result(self, consults):
if consults != None:
i = 0
for consult in consults:
i += 1
if consult != None:
frame = Frame(self.tabControl, height=300, width=450, bg="#d3d3d3")
table_scroll = Scrollbar(frame, orient="vertical")
table_scrollX = Scrollbar(frame, orient="horizontal")
table = ttk.Treeview(
frame,
yscrollcommand=table_scroll.set,
xscrollcommand=table_scrollX.set,
height=12,
)
table_scroll.config(command=table.yview)
table_scrollX.config(command=table.xview)
self.fill_table(consult[0], consult[1], table)
table_scroll.pack(side=RIGHT, fill=Y)
table_scrollX.pack(side=BOTTOM, fill=X)
table.pack(side=LEFT, fill=BOTH)
frame.pack(fill=BOTH)
self.tabControl.add(frame, text="Consulta " + str(i))
else:
self.text_Consola.insert(
INSERT, "Error: Consulta sin resultado" + "\n"
)
self.tabControl.pack()
def parse(self):
self.refresh()
input = ""
input = self.txt_entrada.get(
"1.0", END
) result = interpreter.parser(input)
if len(result["lexical"]) + len(result["syntax"]) == 0:
tkinter.messagebox.showerror(
title="Mensaje", message="La consulta no contiene errores"
)
else:
self.lexicalErrors = result["lexical"]
self.syntacticErrors = result["syntax"]
tkinter.messagebox.showerror(
title="Error", message="La consulta contiene errores"
)
def analize(self):
self.refresh()
entrada = ""
entrada = self.txt_entrada.get(
"1.0", END
) result = interpreter.execution(entrada)
self.lexicalErrors = result["lexical"]
self.syntacticErrors = result["syntax"]
self.semanticErrors = result["semantic"]
self.postgreSQL = result["postgres"]
self.ts = result["symbols"]
self.indexes = result["indexes"]
if (
len(self.lexicalErrors)
+ len(self.syntacticErrors)
+ len(self.semanticErrors)
+ len(self.postgreSQL)
> 0
):
tkinter.messagebox.showerror(
title="Error", message="La consulta contiene errores"
)
if len(self.postgreSQL) > 0:
i = 0
self.text_Consola.insert(INSERT, "-----------ERRORS----------" + "\n")
while i < len(self.postgreSQL):
self.text_Consola.insert(INSERT, self.postgreSQL[i] + "\n")
i += 1
querys = result["querys"]
self.show_result(querys)
messages = result["messages"]
if len(messages) > 0:
i = 0
self.text_Consola.insert(INSERT, "-----------MESSAGES----------" + "\n")
while i < len(messages):
self.text_Consola.insert(INSERT, ">> " + str(messages[i]) + "\n")
i += 1
self.tabControl.pack()
def refresh(self):
tabls = self.tabControl.tabs()
i = 1
while i < len(tabls):
self.tabControl.forget(tabls[i])
i += 1
self.text_Consola.delete("1.0", "end")
self.semanticErrors.clear()
self.syntacticErrors.clear()
self.lexicalErrors.clear()
self.postgreSQL.clear()
self.ts.clear()
def fill_table(
self, columns, rows, table
): table["columns"] = columns
table.column("#0", width=25, minwidth=50)
i = 0
ancho = int(600 / len(columns))
if ancho < 100:
ancho = 100
while i < len(columns):
table.column(str(i), width=ancho, minwidth=50, anchor=CENTER)
i += 1
table.heading("#0", text="#", anchor=CENTER)
i = 0
while i < len(columns):
table.heading(str(i), text=str(columns[i]), anchor=CENTER)
i += 1
i = 0
for row in rows:
i += 1
table.insert(parent="", index="end", iid=i, text=i, values=(row))
def open_ST(self): windowTableS = Pantalla_TS(self.window, self.ts, self.indexes)
def open_AST(self): windowTableS = Pantalla_AST(self.window)
def open_Reporte(self): windowTableS = Pantalla_Error(
self.window, self.lexicalErrors, self.syntacticErrors, self.semanticErrors
)
def open_PDF(self):
url = "file:///" + os.path.realpath("test-output/round-table.gv.pdf")
webbrowser.open(url)
class Pantalla2:
def __init__(self):
self.lexicalErrors = list()
self.syntacticErrors = list()
self.semanticErrors = list()
self.postgreSQL = list()
self.ts = list()
def MetodoParser(self, texto):
salida = "";
result = interpreter.execution(texto)
self.lexicalErrors = result["lexical"]
self.syntacticErrors = result["syntax"]
self.semanticErrors = result["semantic"]
self.postgreSQL = result["postgres"]
self.ts = result["symbols"]
self.indexes = result["indexes"]
if (
len(self.lexicalErrors)
+ len(self.syntacticErrors)
+ len(self.semanticErrors)
+ len(self.postgreSQL)
> 0
):
if len(self.postgreSQL) > 0:
i = 0
salida += "================================================== \n"
salida += " TYTUS ERROR \n"
salida += "================================================== \n"
while i < len(self.postgreSQL):
salida += ">> " + str(self.postgreSQL[i]) + "\n"
i += 1
querys = result["querys"]
messages = result["messages"]
if len(messages) > 0:
i = 0
salida += "==================================================\n"
salida += " TYTUS \n"
salida += "================================================== \n"
while i < len(messages):
salida += ">> " + str(messages[i]) + "\n"
i += 1
return salida
| true
| true
|
f70a0dbd0faf4dc6a673ed3d6bd04f9fe282784e
| 12,251
|
py
|
Python
|
jupyterhub_config.py
|
victor-moreno/jupyterhub-deploy-docker-VM
|
002af508122d0f1919c704f719acd3d837174d4b
|
[
"BSD-3-Clause"
] | 3
|
2021-11-15T12:54:24.000Z
|
2022-02-07T07:45:24.000Z
|
jupyterhub_config.py
|
victor-moreno/jupyterhub-deploy-docker-VM
|
002af508122d0f1919c704f719acd3d837174d4b
|
[
"BSD-3-Clause"
] | 1
|
2022-01-10T21:01:31.000Z
|
2022-03-15T03:48:13.000Z
|
jupyterhub_config.py
|
victor-moreno/jupyterhub-deploy-docker-VM
|
002af508122d0f1919c704f719acd3d837174d4b
|
[
"BSD-3-Clause"
] | 1
|
2022-02-08T20:05:45.000Z
|
2022-02-08T20:05:45.000Z
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
# Configuration file for JupyterHub
import os
# pre-spawn settings
NB_UID = 65534
NB_GID = 65534
CUDA = 'cuda' in os.environ['HOSTNODE']
c = get_config()
# read users/teams & images
import os, yaml
with open('/srv/jupyterhub/config.yaml', 'r') as cfgfile:
cfg = yaml.load(cfgfile, Loader=yaml.FullLoader)
team_map = cfg['users']
# Whitlelist users and admins # google: remove @gmail.com
c.Authenticator.allowed_users = list(team_map.keys())
c.Authenticator.admin_users = admin = set()
for u, team in team_map.items():
if 'admin' in team:
admin.add(u)
# Spawn single-user servers as Docker containers
# CustomDockerSpawner
# form to select image
def get_options_form(spawner):
username = spawner.user.name # .split('@')[0]
teams = cfg['users'][username]
images = cfg['images']
# list of image letters for user
img = {k:v for k,v in images.items() if k in teams }
images = [] # unique list
for t,i in img.items():
for k in i:
if k not in images:
images.append(k)
if not CUDA:
images = [i for i in images if i != 'G']
# dict of image label:build
available_images = cfg['available_images']
allowed_images = [v for k,v in available_images.items() if k in images]
images=[]
for i in allowed_images:
images = images | i.items()
allowed_images = dict(images)
allowed_images = dict(sorted(allowed_images.items(), key=lambda x: x[0]))
# prepare form
if len(allowed_images) > 1:
option_t = '<option value="{image}" {selected}>{label}</option>'
options = [
option_t.format(
image=image, label=label, selected='selected' if image == spawner.image else ''
)
for label, image in allowed_images.items()
]
return """
<br><br>
<h3>Select an image</h3><br><br>{havecuda}<br><br><b>User: {username}</b><br><br>
<select class="form-control" name="image" required autofocus>
{options}
</select>
""".format(options=options, username=username, havecuda='All can run CUDA' if CUDA else '')
else:
spawner.image = [v for k,v in allowed_images.items()][0]
c.DockerSpawner.options_form = get_options_form
def set_sudo(spawner):
username = spawner.user.name
teams = cfg['users'][username]
if 'sudo' in teams:
return 'yes'
else:
return 'no'
def set_USER(spawner):
username = spawner.user.name
if username[0:4].isnumeric():
return username.upper()
else:
return username
def set_HOME(spawner):
return '/home/' + spawner.user.name
def set_UID(spawner):
UID = cfg['users'][spawner.user.name][0]['uid']
if UID >= 1 and UID < 65536:
return UID
else:
return 1000
def set_GID(spawner):
GID = cfg['users'][spawner.user.name][1]['gid']
if GID >= 1 and GID < 65536:
return GID
else:
return 100
c.DockerSpawner.environment = {
'NB_USER': set_USER,
'NB_UID': set_UID,
'NB_GID': set_GID,
'NB_UMASK':'002',
'CHOWN_HOME':'yes',
'GRANT_SUDO': set_sudo,
}
home_dir = os.environ.get('HOME_DIR')
# notebook_dir = '/home/' + spawner.user.name
# c.DockerSpawner.notebook_dir = notebook_dir
from dockerspawner import DockerSpawner
class CustomDockerSpawner(DockerSpawner):
# mount volumes by team
def start(self):
username = set_USER(self)
# username = self.user.name
# home dir
self.volumes[f"{home_dir}/{username.split('@')[0]}"] = {
'bind': '/home/' + username ,
'mode': 'rw',
}
# copy system /etc/group file
self.volumes['/etc/group'] = {
'bind': '/tmp/group',
'mode': 'ro',
}
# mount /srv from files in /singleuser/srv/setup
self.volumes[os.environ['JHUB_DIR']+'/singleuser/srv/setup'] = {
'bind': '/srv',
'mode': 'ro',
}
# user specific mounts as in config.yaml
teams = cfg['users'][self.user.name] # lowercase
mounts = cfg['mounts']
mounts = {k:v for k,v in mounts.items() if k in teams }
for k,v in mounts.items():
for h,d in v.items():
self.volumes[h] = { 'bind': d[0].replace('USER',username), 'mode': d[1] }
return super().start()
# c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.JupyterHub.spawner_class = CustomDockerSpawner
# hub runs as 'root',
c.DockerSpawner.extra_create_kwargs = {
'user': 'root',
'hostname': 'hub',
}
# nvidia
# /dev/shm 64M > 16G
if CUDA:
c.DockerSpawner.extra_host_config = {
'runtime': 'nvidia',
'shm_size': '16gb'
}
# JupyterHub requires a single-user instance of the Notebook server, so we
# default to using the `start-singleuser.sh` script included in the
# jupyter/docker-stacks *-notebook images as the Docker run command when
# spawning containers. Optionally, you can override the Docker run command
# using the DOCKER_SPAWN_CMD environment variable.
spawn_cmd = "start-singleuser.sh"
c.DockerSpawner.extra_create_kwargs.update({ 'command': spawn_cmd })
# Connect containers to this Docker network
network_name = os.environ['DOCKER_NETWORK_NAME']
c.DockerSpawner.use_internal_ip = True
c.DockerSpawner.network_name = network_name
# Pass the network name as argument to spawned containers
c.DockerSpawner.extra_host_config.update({ 'network_mode': network_name })
# Mount the real user's Docker volume on the host to the notebook user's
# notebook directory in the container
#c.DockerSpawner.volumes = { 'jupyterhub-user-{username}': notebook_dir }
# external proxy
c.JupyterHub.cleanup_servers = False
# tells the hub to not stop servers when the hub restarts (proxy runs separately).
c.ConfigurableHTTPProxy.should_start = False
# tells the hub that the proxy should not be started (because you start it yourself).
c.ConfigurableHTTPProxy.auth_token = os.environ.get('CONFIGPROXY_AUTH_TOKEN')
# token for authenticating communication with the proxy.
c.ConfigurableHTTPProxy.api_url = 'http://jupyterproxy:8001'
# the URL which the hub uses to connect to the proxy’s API.
# Remove containers once they are stopped
c.DockerSpawner.remove_containers = True
# User containers will access hub by container name on the Docker network
c.JupyterHub.base_url = '/jhub/'
c.JupyterHub.hub_ip = 'jupyterhub'
c.JupyterHub.hub_port = 8080
# don't need because we are behind an https reverse proxy
# # TLS config: requires generating certificates
# c.JupyterHub.port = 443
# c.JupyterHub.ssl_key = os.environ['SSL_KEY']
# c.JupyterHub.ssl_cert = os.environ['SSL_CERT']
# Persist hub data on volume mounted inside container
data_dir = '/data'
c.JupyterHub.cookie_secret_file = os.path.join(data_dir,
'jupyterhub_cookie_secret')
c.JupyterHub.db_url = f'sqlite:///{data_dir}/jupyterhub.sqlite'
# c.JupyterHub.db_url = 'postgresql://postgres:{password}@{host}/{db}'.format(
# host=os.environ['POSTGRES_HOST'],
# password=os.environ['POSTGRES_PASSWORD'],
# db=os.environ['POSTGRES_DB'],
# )
# reset database
# c.JupyterHub.reset_db = False
# Authenticate users
'''
# GitHub
c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
c.GitHubOAuthenticator.oauth_callback_url = os.environ['OAUTH_CALLBACK_URL']
# Native
# admin users in c.Authenticator.admin_users are automatically authorized when signup
c.JupyterHub.authenticator_class = 'nativeauthenticator.NativeAuthenticator'
'''
##### multioauth
# https://github.com/jupyterhub/oauthenticator/issues/136
from traitlets import List
from jupyterhub.auth import Authenticator
def url_path_join(*parts):
return '/'.join([p.strip().strip('/') for p in parts])
class MultiOAuthenticator(Authenticator):
authenticators = List(help="The subauthenticators to use", config=True)
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
self._authenticators = []
for authenticator_klass, url_scope, configs in self.authenticators:
c = self.trait_values()
c.update(configs)
self._authenticators.append({"instance": authenticator_klass(**c), "url_scope": url_scope})
def get_custom_html(self, base_url):
html = []
for authenticator in self._authenticators:
login_service = authenticator["instance"].login_service
if login_service == 'User/Pass':
url = url_path_join(authenticator["url_scope"], "login")
else:
url = url_path_join(authenticator["url_scope"], "oauth_login")
# html.append(
# f"""
# <div class="service-login">
# <a role="button" class='btn btn-jupyter btn-lg' href='{url}'>
# Sign in with {login_service}
# </a>
# </div>
# """
# )
return "\n".join(html)
def get_handlers(self, app):
routes = []
for _authenticator in self._authenticators:
for path, handler in _authenticator["instance"].get_handlers(app):
class SubHandler(handler):
authenticator = _authenticator["instance"]
routes.append((f'{_authenticator["url_scope"]}{path}', SubHandler))
return routes
c.JupyterHub.authenticator_class = MultiOAuthenticator
from oauthenticator.github import GitHubOAuthenticator
from oauthenticator.google import GoogleOAuthenticator
from nativeauthenticator import NativeAuthenticator
#from oauthenticator.azuread import AzureAdOAuthenticator
c.MultiOAuthenticator.authenticators = [
(GitHubOAuthenticator, '/github', {
'client_id': os.environ['GITHUB_CLIENT_ID'],
'client_secret': os.environ['GITHUB_CLIENT_SECRET'],
'oauth_callback_url': os.environ['GITHUB_CALLBACK_URL']
}),
(GoogleOAuthenticator, '/google', {
'client_id': os.environ['GOOGLE_CLIENT_ID'],
'client_secret': os.environ['GOOGLE_CLIENT_SECRET'],
'oauth_callback_url': os.environ['GOOGLE_CALLBACK_URL'],
'login_service': 'Google'
}),
(NativeAuthenticator, '/', {
'login_service': 'User/Pass'
}),
]
import nativeauthenticator
c.JupyterHub.template_paths = [f"{os.path.dirname(nativeauthenticator.__file__)}/templates/"]
# template modified to allow github/google oauth
# ["/usr/local/lib/python3.8/dist-packages/nativeauthenticator/templates/"]
# google
# https://oauthenticator.readthedocs.io/en/latest/api/gen/oauthenticator.google.html
c.GoogleOAuthenticator.hosted_domain = ['gmail.com']
c.GoogleOAuthenticator.login_service = 'Google'
c.GoogleOAuthenticator.delete_invalid_users = True
c.NativeAuthenticator.check_common_password = True
c.NativeAuthenticator.minimum_password_length = 8
c.NativeAuthenticator.allowed_failed_logins = 3
c.NativeAuthenticator.enable_signup = True
# recaptcha config
# https://www.google.com/recaptcha/admin/site/500725121/settings
c.NativeAuthenticator.recaptcha_key = os.environ['RECAPCHA_KEY']
c.NativeAuthenticator.recaptcha_secret = os.environ['RECAPCHA_SECRET']
c.NativeAuthenticator.tos = 'Acepto las <a href="https://remote.genrisk.org/CDU.html" target="_blank">condiciones de uso</a>'
## enable authentication state0
c.MultiOAuthenticator.enable_auth_state = True
import warnings
if 'JUPYTERHUB_CRYPT_KEY' not in os.environ:
warnings.warn(
"Need JUPYTERHUB_CRYPT_KEY env for persistent auth_state.\n"
" export JUPYTERHUB_CRYPT_KEY=$(openssl rand -hex 32)"
)
c.CryptKeeper.keys = [ os.urandom(32) ]
pass
'''
# remove idle notebooks after inactive time
# https://github.com/jupyterhub/jupyterhub-idle-culler
import sys
c.JupyterHub.services = [
{
'name': 'idle-culler',
'admin': True,
'command': [sys.executable, '-m', 'jupyterhub_idle_culler', '--timeout=3600'],
}
]
'''
# max simultaneous users
c.JupyterHub.concurrent_spawn_limit = 10
# user limits
# c.Spawner.cpu_limit = 2 # cores
# c.Spawner.mem_limit = 8G
| 31.656331
| 125
| 0.671455
|
import os
NB_UID = 65534
NB_GID = 65534
CUDA = 'cuda' in os.environ['HOSTNODE']
c = get_config()
import os, yaml
with open('/srv/jupyterhub/config.yaml', 'r') as cfgfile:
cfg = yaml.load(cfgfile, Loader=yaml.FullLoader)
team_map = cfg['users']
c.Authenticator.allowed_users = list(team_map.keys())
c.Authenticator.admin_users = admin = set()
for u, team in team_map.items():
if 'admin' in team:
admin.add(u)
def get_options_form(spawner):
username = spawner.user.name teams = cfg['users'][username]
images = cfg['images']
img = {k:v for k,v in images.items() if k in teams }
images = [] for t,i in img.items():
for k in i:
if k not in images:
images.append(k)
if not CUDA:
images = [i for i in images if i != 'G']
available_images = cfg['available_images']
allowed_images = [v for k,v in available_images.items() if k in images]
images=[]
for i in allowed_images:
images = images | i.items()
allowed_images = dict(images)
allowed_images = dict(sorted(allowed_images.items(), key=lambda x: x[0]))
if len(allowed_images) > 1:
option_t = '<option value="{image}" {selected}>{label}</option>'
options = [
option_t.format(
image=image, label=label, selected='selected' if image == spawner.image else ''
)
for label, image in allowed_images.items()
]
return """
<br><br>
<h3>Select an image</h3><br><br>{havecuda}<br><br><b>User: {username}</b><br><br>
<select class="form-control" name="image" required autofocus>
{options}
</select>
""".format(options=options, username=username, havecuda='All can run CUDA' if CUDA else '')
else:
spawner.image = [v for k,v in allowed_images.items()][0]
c.DockerSpawner.options_form = get_options_form
def set_sudo(spawner):
username = spawner.user.name
teams = cfg['users'][username]
if 'sudo' in teams:
return 'yes'
else:
return 'no'
def set_USER(spawner):
username = spawner.user.name
if username[0:4].isnumeric():
return username.upper()
else:
return username
def set_HOME(spawner):
return '/home/' + spawner.user.name
def set_UID(spawner):
UID = cfg['users'][spawner.user.name][0]['uid']
if UID >= 1 and UID < 65536:
return UID
else:
return 1000
def set_GID(spawner):
GID = cfg['users'][spawner.user.name][1]['gid']
if GID >= 1 and GID < 65536:
return GID
else:
return 100
c.DockerSpawner.environment = {
'NB_USER': set_USER,
'NB_UID': set_UID,
'NB_GID': set_GID,
'NB_UMASK':'002',
'CHOWN_HOME':'yes',
'GRANT_SUDO': set_sudo,
}
home_dir = os.environ.get('HOME_DIR')
from dockerspawner import DockerSpawner
class CustomDockerSpawner(DockerSpawner):
def start(self):
username = set_USER(self)
self.volumes[f"{home_dir}/{username.split('@')[0]}"] = {
'bind': '/home/' + username ,
'mode': 'rw',
}
self.volumes['/etc/group'] = {
'bind': '/tmp/group',
'mode': 'ro',
}
self.volumes[os.environ['JHUB_DIR']+'/singleuser/srv/setup'] = {
'bind': '/srv',
'mode': 'ro',
}
teams = cfg['users'][self.user.name] mounts = cfg['mounts']
mounts = {k:v for k,v in mounts.items() if k in teams }
for k,v in mounts.items():
for h,d in v.items():
self.volumes[h] = { 'bind': d[0].replace('USER',username), 'mode': d[1] }
return super().start()
c.JupyterHub.spawner_class = CustomDockerSpawner
c.DockerSpawner.extra_create_kwargs = {
'user': 'root',
'hostname': 'hub',
}
if CUDA:
c.DockerSpawner.extra_host_config = {
'runtime': 'nvidia',
'shm_size': '16gb'
}
spawn_cmd = "start-singleuser.sh"
c.DockerSpawner.extra_create_kwargs.update({ 'command': spawn_cmd })
network_name = os.environ['DOCKER_NETWORK_NAME']
c.DockerSpawner.use_internal_ip = True
c.DockerSpawner.network_name = network_name
c.DockerSpawner.extra_host_config.update({ 'network_mode': network_name })
c.JupyterHub.cleanup_servers = False
c.ConfigurableHTTPProxy.should_start = False
c.ConfigurableHTTPProxy.auth_token = os.environ.get('CONFIGPROXY_AUTH_TOKEN')
c.ConfigurableHTTPProxy.api_url = 'http://jupyterproxy:8001'
c.DockerSpawner.remove_containers = True
c.JupyterHub.base_url = '/jhub/'
c.JupyterHub.hub_ip = 'jupyterhub'
c.JupyterHub.hub_port = 8080
# # TLS config: requires generating certificates
# c.JupyterHub.port = 443
# c.JupyterHub.ssl_key = os.environ['SSL_KEY']
# c.JupyterHub.ssl_cert = os.environ['SSL_CERT']
# Persist hub data on volume mounted inside container
data_dir = '/data'
c.JupyterHub.cookie_secret_file = os.path.join(data_dir,
'jupyterhub_cookie_secret')
c.JupyterHub.db_url = f'sqlite:///{data_dir}/jupyterhub.sqlite'
# c.JupyterHub.db_url = 'postgresql://postgres:{password}@{host}/{db}'.format(
# host=os.environ['POSTGRES_HOST'],
# password=os.environ['POSTGRES_PASSWORD'],
# db=os.environ['POSTGRES_DB'],
# )
# reset database
# c.JupyterHub.reset_db = False
# Authenticate users
##### multioauth
# https://github.com/jupyterhub/oauthenticator/issues/136
from traitlets import List
from jupyterhub.auth import Authenticator
def url_path_join(*parts):
return '/'.join([p.strip().strip('/') for p in parts])
class MultiOAuthenticator(Authenticator):
authenticators = List(help="The subauthenticators to use", config=True)
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
self._authenticators = []
for authenticator_klass, url_scope, configs in self.authenticators:
c = self.trait_values()
c.update(configs)
self._authenticators.append({"instance": authenticator_klass(**c), "url_scope": url_scope})
def get_custom_html(self, base_url):
html = []
for authenticator in self._authenticators:
login_service = authenticator["instance"].login_service
if login_service == 'User/Pass':
url = url_path_join(authenticator["url_scope"], "login")
else:
url = url_path_join(authenticator["url_scope"], "oauth_login")
# html.append(
# f"""
# <div class="service-login">
# <a role="button" class='btn btn-jupyter btn-lg' href='{url}'>
# Sign in with {login_service}
# </a>
# </div>
# """
# )
return "\n".join(html)
def get_handlers(self, app):
routes = []
for _authenticator in self._authenticators:
for path, handler in _authenticator["instance"].get_handlers(app):
class SubHandler(handler):
authenticator = _authenticator["instance"]
routes.append((f'{_authenticator["url_scope"]}{path}', SubHandler))
return routes
c.JupyterHub.authenticator_class = MultiOAuthenticator
from oauthenticator.github import GitHubOAuthenticator
from oauthenticator.google import GoogleOAuthenticator
from nativeauthenticator import NativeAuthenticator
#from oauthenticator.azuread import AzureAdOAuthenticator
c.MultiOAuthenticator.authenticators = [
(GitHubOAuthenticator, '/github', {
'client_id': os.environ['GITHUB_CLIENT_ID'],
'client_secret': os.environ['GITHUB_CLIENT_SECRET'],
'oauth_callback_url': os.environ['GITHUB_CALLBACK_URL']
}),
(GoogleOAuthenticator, '/google', {
'client_id': os.environ['GOOGLE_CLIENT_ID'],
'client_secret': os.environ['GOOGLE_CLIENT_SECRET'],
'oauth_callback_url': os.environ['GOOGLE_CALLBACK_URL'],
'login_service': 'Google'
}),
(NativeAuthenticator, '/', {
'login_service': 'User/Pass'
}),
]
import nativeauthenticator
c.JupyterHub.template_paths = [f"{os.path.dirname(nativeauthenticator.__file__)}/templates/"]
# template modified to allow github/google oauth
# ["/usr/local/lib/python3.8/dist-packages/nativeauthenticator/templates/"]
# google
# https://oauthenticator.readthedocs.io/en/latest/api/gen/oauthenticator.google.html
c.GoogleOAuthenticator.hosted_domain = ['gmail.com']
c.GoogleOAuthenticator.login_service = 'Google'
c.GoogleOAuthenticator.delete_invalid_users = True
c.NativeAuthenticator.check_common_password = True
c.NativeAuthenticator.minimum_password_length = 8
c.NativeAuthenticator.allowed_failed_logins = 3
c.NativeAuthenticator.enable_signup = True
# recaptcha config
# https://www.google.com/recaptcha/admin/site/500725121/settings
c.NativeAuthenticator.recaptcha_key = os.environ['RECAPCHA_KEY']
c.NativeAuthenticator.recaptcha_secret = os.environ['RECAPCHA_SECRET']
c.NativeAuthenticator.tos = 'Acepto las <a href="https://remote.genrisk.org/CDU.html" target="_blank">condiciones de uso</a>'
## enable authentication state0
c.MultiOAuthenticator.enable_auth_state = True
import warnings
if 'JUPYTERHUB_CRYPT_KEY' not in os.environ:
warnings.warn(
"Need JUPYTERHUB_CRYPT_KEY env for persistent auth_state.\n"
" export JUPYTERHUB_CRYPT_KEY=$(openssl rand -hex 32)"
)
c.CryptKeeper.keys = [ os.urandom(32) ]
pass
# max simultaneous users
c.JupyterHub.concurrent_spawn_limit = 10
# user limits
# c.Spawner.cpu_limit = 2 # cores
# c.Spawner.mem_limit = 8G
| true
| true
|
f70a0e8a1e36c7d6b49ffd219b5fb6ca1c543c6b
| 14,575
|
py
|
Python
|
Packs/DeprecatedContent/Integrations/Cymon/Cymon.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/DeprecatedContent/Integrations/Cymon/Cymon.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/DeprecatedContent/Integrations/Cymon/Cymon.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
import os
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
if not demisto.params().get('useProxy', False):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
''' GLOBAL VARS '''
SERVER_URL_V1 = 'https://www.cymon.io:443/api/nexus/v1'
SERVER_DASHBOARD_URL_V1 = 'https://www.cymon.io:443/api/dashboard/v1'
SERVER_URL_V2 = 'https://api.cymon.io/v2/ioc/search'
VERIFY_CERTIFICATES = False if demisto.params().get('unsecure') else True
DEFAULT_HEADERS = {
"Content-Type": "application/json"
}
''' HELPER FUNCTIONS '''
def cymon_says():
return_error('Cymon service discontinued. Please disable or delete the integration instance.')
def http_request(method, url, headers):
try:
res = requests.request(method,
url,
verify=VERIFY_CERTIFICATES,
headers=headers)
if res.status_code == 200:
return res.json()
# 204 HTTP status code is returned when api rate limit has been exceeded
elif res.status_code == 204:
return_error("You've reached your API call quota.")
elif res.status_code == 404:
return {}
res.raise_for_status()
except Exception as e:
raise (e)
''' DOMAIN COMMAND '''
# def get_domain_full_report(domain):
# report_results = []
#
# from_param = 0
# size_param = 10
# total = None
#
# url = '{}/{}/{}?from={}&size={}'.format(SERVER_URL_V2, 'domain', domain, from_param, size_param)
#
# while total is None or total > from_param:
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# hits = response.get('hits', [])
# for hit in hits:
# timestamp = datetime.strptime(
# hit.get('timestamp', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")),
# '%Y-%m-%dT%H:%M:%S.%fZ')
#
# report_results.append({
# 'Title': hit.get('title', "").title(),
# 'Feed': hit.get('feed'),
# 'Timestamp': timestamp.strftime("%Y-%m-%d %H:%M:%S"),
# # Formatting the timestamp to human readable date and time
# 'Tags': hit.get('tags'),
# 'Hostname': hit.get('ioc', {}).get('hostname'),
# 'IP': hit.get('ioc', {}).get('ip'),
# 'Domain': hit.get('ioc', {}).get('domain'),
# 'Reported By': hit.get('reported_by'),
# 'Location': hit.get('location', {}).get('country')
# })
#
# from_param = from_param + size_param
# total = int(response.get('total', 0))
#
# url = '{}/{}/{}?from={}&size={}'.format(SERVER_URL_V2, 'domain', domain, from_param, size_param)
#
# return report_results
# def get_domain_report(domain_full_report):
# reports = {} # type:dict
#
# for report in domain_full_report:
# title = report.get('Title')
# timestamp = datetime.strptime(
# report.get('Timestamp', datetime.now().strftime("%Y-%m-%d %H:%M:%S")), '%Y-%m-%d %H:%M:%S')
#
# if (title in reports and reports.get(title).get('Timestamp') < timestamp) or title not in reports: # type: ignore
# reports.update({title: {
# 'Feed': report.get('Feed'),
# 'Timestamp': timestamp,
# 'Tags': report.get('Tags'),
# 'Hostname': report.get('Hostname'),
# 'IP': report.get('IP'),
# 'Domain': report.get('Domain'),
# 'Reported By': report.get('Reported By'),
# 'Location': report.get('Location')
# }})
#
# report_results = []
#
# for report in reports:
# report_results.append({
# 'Title': report,
# 'Feed': reports.get(report).get('Feed'), # type: ignore
# 'Timestamp': reports.get(report).get('Timestamp').strftime("%Y-%m-%d %H:%M:%S"), # type: ignore
# # Formatting the timestamp to human readable date and time
# 'Tags': reports.get(report).get('Tags'), # type: ignore
# 'Hostname': reports.get(report).get('Hostname'), # type: ignore
# 'IP': reports.get(report).get('IP'), # type: ignore
# 'Domain': reports.get(report).get('Domain'), # type: ignore
# 'Reported By': reports.get(report).get('Reported By'), # type: ignore
# 'Location': reports.get(report).get('Location') # type: ignore
# })
#
# return {
# 'reports': report_results,
# 'total': len(domain_full_report)
# }
# def create_domain_command_markdown(domain, total_hits, reports, domain_full_report, is_full_response):
# md = '## Cymon Domain report for: {}\n'.format(domain)
#
# md += '\n'
#
# md += '**Total Hits:** {}'.format(total_hits)
#
# md += '\n'
#
# md += tableToMarkdown("The following reports are the latest malicious hits resolved to the given domain:", reports,
# ['Title', 'Hostname', 'IP', 'Timestamp', 'Feed', 'Tags', 'Location', 'Reported By', 'Domain'])
#
# if is_full_response:
# md += tableToMarkdown("Full report list:", domain_full_report,
# ['Title', 'Hostname', 'IP', 'Timestamp', 'Feed', 'Tags', 'Location', 'Reported By',
# 'Domain'])
#
# return md
# def create_context_domain_command(domain, reports):
# cymon_domain_context_activities = []
# description = 'Reported suspicious activities: '
#
# for report in reports:
# cymon_domain_context_activities.append({
# 'Title': report.get('Title'),
# 'Tags': report.get('Tags'),
# 'Time': report.get('Timestamp'),
# 'Hostname': report.get('Hostname'),
# 'IP': report.get('IP')
# })
#
# description += '{}, '.format(report.get('Title'))
#
# description = description[:-2]
#
# context = {
# outputPaths['domain']: {
# 'Name': domain,
# 'Malicious': {
# 'Vendor': 'Cymon',
# 'Description': description
# }
# },
# 'Cymon': {
# 'Domain': {
# 'Activities': cymon_domain_context_activities
# }
# }
# }
#
# return context
# def get_domain_report_command():
# args = demisto.args()
#
# domain = args.get('domain')
# is_full_response = args.get('fullResponse') == 'true'
#
# domain_full_report = get_domain_full_report(domain)
# domain_summarized_report = get_domain_report(domain_full_report)
#
# if len(domain_full_report) == 0:
# return "Domain " + domain + " is not in Cymons's dataset"
#
# markdown = create_domain_command_markdown(domain, domain_summarized_report.get('total'),
# domain_summarized_report.get('reports'), domain_full_report,
# is_full_response)
# context = create_context_domain_command(domain, domain_summarized_report.get('reports'))
#
# return {
# 'Type': entryTypes['note'],
# 'Contents': domain_full_report,
# 'ContentsFormat': formats['json'],
# 'HumanReadable': markdown,
# 'EntryContext': context
# }
''' IP COMMAND '''
# def get_ip_events_sources(ip):
# url = '{}/{}/{}'.format(SERVER_URL_V1, 'ip', ip)
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# return response.get('sources', None)
# def get_ip_events(ip):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'events', 100)
# events = {} # type:dict
#
# next_link = url
#
# while next_link is not None:
# response = http_request('GET', next_link, DEFAULT_HEADERS)
#
# for event in response.get('results', []):
# tag = event.get('tag')
# date = datetime.strptime(
# event.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")), '%Y-%m-%dT%H:%M:%SZ')
#
# if (tag in events and events[tag] < date) or tag not in events:
# events.update({tag: date})
#
# next_link = response.get('next')
#
# for event in events:
# events[event] = events[event].strftime(
# "%Y-%m-%d %H:%M:%S") # Formatting the timestamp to human readable date and time
#
# return events
# def get_ip_location(ip):
# url = '{}/{}/{}'.format(SERVER_DASHBOARD_URL_V1, 'geolocation', ip)
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# lon = response.get('longitude', None)
# lat = response.get('latitude', None)
#
# if not lon or not lat:
# return {}
# else:
# return {
# 'lon': lon,
# 'lat': lat
# }
# def get_ip_domains(ip, max_len):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'domains', max_len)
# domains = []
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# for domain in response.get('results', []):
# date = datetime.strptime(
# domain.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")), '%Y-%m-%dT%H:%M:%SZ')
#
# domains.append({'Hostname': domain.get('name'),
# 'Last Resolved': date.strftime("%Y-%m-%d %H:%M:%S")})
#
# return domains
# def get_ip_urls(ip, max_len):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'urls', max_len)
# urls = {} # type:dict
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# for response_url in response.get('results', []):
# url = response_url.get('location')
# if url.endswith("/"):
# url = url[:-1]
#
# date = datetime.strptime(
# response_url.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")),
# '%Y-%m-%dT%H:%M:%SZ')
#
# if (url in urls and urls[url] < date) or url not in urls:
# urls.update({url: date})
#
# urls_result = []
# for url in urls:
# urls_result.append({'Url': url, "Last Resolved": urls[url].strftime(
# "%Y-%m-%d %H:%M:%S")}) # Formatting the timestamp to human readable date and time
#
# return urls_result
# def get_ip_asn(ip):
# url = '{}/{}/{}'.format(SERVER_DASHBOARD_URL_V1, 'ipwhois', ip)
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# asn = response.get('asn')
# asn_country_code = response.get('asn_country_code')
#
# if not asn or not asn_country_code:
# return {}
# else:
# return {
# 'asn': asn,
# 'country': asn_country_code
# }
# def create_ip_command_markdown(ip, sources, events, domains, urls, asn):
# md = '## Cymon IP report for: {}\n'.format(ip)
#
# if asn:
# md += 'ASN: **{}** ({})\n'.format(asn.get('asn'), asn.get('country'))
#
# md += '\n'
#
# if events:
# md += '### Reports\n'
# for event in events:
# md += '**{}** (Last reported on: {})\n'.format(event.title(), events[event])
#
# if sources:
# md += '#### Sources\n'
# for source in sources:
# md += '{}\n'.format(source)
#
# if domains and len(domains) > 0:
# md += tableToMarkdown("The following domains were resolved to the given IP address:", domains)
#
# if urls and len(urls) > 0:
# md += tableToMarkdown("The following urls were resolved to the given IP address:", urls)
#
# return md
# def create_ip_command_context(ip, asn, events, domains):
# if events:
# description = 'Reported suspicious activities: '
#
# for event in events:
# description += '{}, '.format(event)
#
# description = description[:-2]
# else:
# description = 'No suspicious activities were reported'
#
# asn_in_context = {} # type:dict
#
# if asn:
# asn_in_context = {
# 'ASN': asn.get('asn'),
# 'Geo': {
# 'Country': asn.get('country')
# }
# }
#
# context = {'Cymon': {
# 'IP': {
# 'Domains': domains
# }
# }, outputPaths['ip']: {
# 'Address': ip,
# 'Malicious': {
# 'Vendor': 'Cymon',
# 'Description': description
# }
# }}
#
# context[outputPaths['ip']].update(asn_in_context)
#
# return context
# def get_ip_report_command():
# args = demisto.args()
#
# full_response = args.get('fullResponse') == 'true'
#
# ip = args.get('ip')
# if not is_ip_valid(ip):
# return_error('An inalid IP was specified')
#
# sources = get_ip_events_sources(ip)
#
# if not sources:
# return "IP " + ip + " is not in Cymons's dataset"
#
# if full_response:
# max_len = 1000
# else:
# max_len = 50
#
# events = get_ip_events(ip)
# location = get_ip_location(ip)
# domains = get_ip_domains(ip, max_len)
# urls = get_ip_urls(ip, max_len)
# asn = get_ip_asn(ip)
#
# markdown = create_ip_command_markdown(ip, sources, events, domains, urls, asn)
# context = create_ip_command_context(ip, asn, events, domains)
#
# return [
# {
# 'Type': entryTypes['map'],
# 'Contents': {
# 'lat': float(location.get('lat')),
# 'lng': float(location.get('lon'))
# },
# 'ContentsFormat': formats['json']
# },
# {
# 'Type': entryTypes['note'],
# 'Contents': {
# 'events': events,
# 'sources': sources,
# 'location': location,
# 'domains': domains,
# 'urls': urls,
# 'asn': asn
# },
# 'HumanReadable': markdown,
# 'EntryContext': context,
# 'ContentsFormat': formats['json']
# }]
''' EXECUTION CODE '''
try:
command = demisto.command()
if command == 'test-module':
demisto.results('Cymon has been Deprecated and is no longer in service. Please delete the instance.')
elif command == 'ip':
cymon_says()
elif command == 'domain':
cymon_says()
except Exception as e:
raise
| 31.344086
| 124
| 0.535232
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
import os
requests.packages.urllib3.disable_warnings()
if not demisto.params().get('useProxy', False):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
SERVER_URL_V1 = 'https://www.cymon.io:443/api/nexus/v1'
SERVER_DASHBOARD_URL_V1 = 'https://www.cymon.io:443/api/dashboard/v1'
SERVER_URL_V2 = 'https://api.cymon.io/v2/ioc/search'
VERIFY_CERTIFICATES = False if demisto.params().get('unsecure') else True
DEFAULT_HEADERS = {
"Content-Type": "application/json"
}
def cymon_says():
return_error('Cymon service discontinued. Please disable or delete the integration instance.')
def http_request(method, url, headers):
try:
res = requests.request(method,
url,
verify=VERIFY_CERTIFICATES,
headers=headers)
if res.status_code == 200:
return res.json()
elif res.status_code == 204:
return_error("You've reached your API call quota.")
elif res.status_code == 404:
return {}
res.raise_for_status()
except Exception as e:
raise (e)
# def get_domain_full_report(domain):
# report_results = []
#
# from_param = 0
# size_param = 10
# total = None
#
# url = '{}/{}/{}?from={}&size={}'.format(SERVER_URL_V2, 'domain', domain, from_param, size_param)
#
# while total is None or total > from_param:
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# hits = response.get('hits', [])
# for hit in hits:
# timestamp = datetime.strptime(
# hit.get('timestamp', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")),
# '%Y-%m-%dT%H:%M:%S.%fZ')
#
# report_results.append({
# 'Title': hit.get('title', "").title(),
# 'Feed': hit.get('feed'),
# 'Timestamp': timestamp.strftime("%Y-%m-%d %H:%M:%S"),
# # Formatting the timestamp to human readable date and time
# 'Tags': hit.get('tags'),
# 'Hostname': hit.get('ioc', {}).get('hostname'),
# 'IP': hit.get('ioc', {}).get('ip'),
# 'Domain': hit.get('ioc', {}).get('domain'),
# 'Reported By': hit.get('reported_by'),
# 'Location': hit.get('location', {}).get('country')
# })
#
# from_param = from_param + size_param
# total = int(response.get('total', 0))
#
# url = '{}/{}/{}?from={}&size={}'.format(SERVER_URL_V2, 'domain', domain, from_param, size_param)
#
# return report_results
# def get_domain_report(domain_full_report):
# reports = {} # type:dict
#
# for report in domain_full_report:
# title = report.get('Title')
# timestamp = datetime.strptime(
# report.get('Timestamp', datetime.now().strftime("%Y-%m-%d %H:%M:%S")), '%Y-%m-%d %H:%M:%S')
#
# if (title in reports and reports.get(title).get('Timestamp') < timestamp) or title not in reports: # type: ignore
# reports.update({title: {
# 'Feed': report.get('Feed'),
# 'Timestamp': timestamp,
# 'Tags': report.get('Tags'),
# 'Hostname': report.get('Hostname'),
# 'IP': report.get('IP'),
# 'Domain': report.get('Domain'),
# 'Reported By': report.get('Reported By'),
# 'Location': report.get('Location')
# }})
#
# report_results = []
#
# for report in reports:
# report_results.append({
# 'Title': report,
# 'Feed': reports.get(report).get('Feed'), # type: ignore
# 'Timestamp': reports.get(report).get('Timestamp').strftime("%Y-%m-%d %H:%M:%S"), # type: ignore
# # Formatting the timestamp to human readable date and time
# 'Tags': reports.get(report).get('Tags'), # type: ignore
# 'Hostname': reports.get(report).get('Hostname'), # type: ignore
# 'IP': reports.get(report).get('IP'), # type: ignore
# 'Domain': reports.get(report).get('Domain'), # type: ignore
# 'Reported By': reports.get(report).get('Reported By'), # type: ignore
# 'Location': reports.get(report).get('Location') # type: ignore
# })
#
# return {
# 'reports': report_results,
# 'total': len(domain_full_report)
# }
# def create_domain_command_markdown(domain, total_hits, reports, domain_full_report, is_full_response):
# md = '## Cymon Domain report for: {}\n'.format(domain)
#
# md += '\n'
#
# md += '**Total Hits:** {}'.format(total_hits)
#
# md += '\n'
#
# md += tableToMarkdown("The following reports are the latest malicious hits resolved to the given domain:", reports,
# ['Title', 'Hostname', 'IP', 'Timestamp', 'Feed', 'Tags', 'Location', 'Reported By', 'Domain'])
#
# if is_full_response:
# md += tableToMarkdown("Full report list:", domain_full_report,
# ['Title', 'Hostname', 'IP', 'Timestamp', 'Feed', 'Tags', 'Location', 'Reported By',
# 'Domain'])
#
# return md
# def create_context_domain_command(domain, reports):
# cymon_domain_context_activities = []
# description = 'Reported suspicious activities: '
#
# for report in reports:
# cymon_domain_context_activities.append({
# 'Title': report.get('Title'),
# 'Tags': report.get('Tags'),
# 'Time': report.get('Timestamp'),
# 'Hostname': report.get('Hostname'),
# 'IP': report.get('IP')
# })
#
# description += '{}, '.format(report.get('Title'))
#
# description = description[:-2]
#
# context = {
# outputPaths['domain']: {
# 'Name': domain,
# 'Malicious': {
# 'Vendor': 'Cymon',
# 'Description': description
# }
# },
# 'Cymon': {
# 'Domain': {
# 'Activities': cymon_domain_context_activities
# }
# }
# }
#
# return context
# def get_domain_report_command():
# args = demisto.args()
#
# domain = args.get('domain')
# is_full_response = args.get('fullResponse') == 'true'
#
# domain_full_report = get_domain_full_report(domain)
# domain_summarized_report = get_domain_report(domain_full_report)
#
# if len(domain_full_report) == 0:
# return "Domain " + domain + " is not in Cymons's dataset"
#
# if full_response:
# max_len = 1000
# else:
# max_len = 50
#
# events = get_ip_events(ip)
# location = get_ip_location(ip)
# domains = get_ip_domains(ip, max_len)
# urls = get_ip_urls(ip, max_len)
# asn = get_ip_asn(ip)
#
# markdown = create_ip_command_markdown(ip, sources, events, domains, urls, asn)
# context = create_ip_command_context(ip, asn, events, domains)
#
# return [
# {
# 'Type': entryTypes['map'],
# 'Contents': {
# 'lat': float(location.get('lat')),
# 'lng': float(location.get('lon'))
# },
# 'ContentsFormat': formats['json']
# },
# {
# 'Type': entryTypes['note'],
# 'Contents': {
# 'events': events,
# 'sources': sources,
# 'location': location,
# 'domains': domains,
# 'urls': urls,
# 'asn': asn
# },
# 'HumanReadable': markdown,
# 'EntryContext': context,
# 'ContentsFormat': formats['json']
# }]
try:
command = demisto.command()
if command == 'test-module':
demisto.results('Cymon has been Deprecated and is no longer in service. Please delete the instance.')
elif command == 'ip':
cymon_says()
elif command == 'domain':
cymon_says()
except Exception as e:
raise
| true
| true
|
f70a0f884ad3dff842c3597cedd90838041493d7
| 1,991
|
py
|
Python
|
src/compas_blender/artists/robotmodelartist.py
|
XingxinHE/compas
|
d2901dbbacdaf4694e5adae78ba8f093f10532bf
|
[
"MIT"
] | null | null | null |
src/compas_blender/artists/robotmodelartist.py
|
XingxinHE/compas
|
d2901dbbacdaf4694e5adae78ba8f093f10532bf
|
[
"MIT"
] | null | null | null |
src/compas_blender/artists/robotmodelartist.py
|
XingxinHE/compas
|
d2901dbbacdaf4694e5adae78ba8f093f10532bf
|
[
"MIT"
] | null | null | null |
import bpy
import mathutils
import compas_blender
from compas.robots.base_artist import BaseRobotModelArtist
__all__ = [
'RobotModelArtist',
]
class RobotModelArtist(BaseRobotModelArtist):
"""Visualizer for robot models inside a Blender environment.
Parameters
----------
model : :class:`compas.robots.RobotModel`
Robot model.
"""
def __init__(self, model, collection=None):
self.collection = collection
super(RobotModelArtist, self).__init__(model)
def transform(self, native_mesh, transformation):
native_mesh.matrix_world = mathutils.Matrix(transformation.matrix) @ native_mesh.matrix_world
def create_geoemetry(self, geometry, name=None, color=None):
# Imported colors take priority over a the parameter color
if 'mesh_color.diffuse' in geometry.attributes:
color = geometry.attributes['mesh_color.diffuse']
# If we have a color, we'll discard alpha because draw_mesh is hard coded for a=1
if color:
r, g, b, _a = color
color = (r, g, b)
else:
color = (1., 1., 1.)
if self.collection and self.collection not in bpy.data.collections.keys():
compas_blender.utilities.create_collection(self.collection)
v, f = geometry.to_vertices_and_faces()
native_mesh = compas_blender.draw_mesh(vertices=v, faces=f, name=name, color=color, centroid=False, collection=self.collection)
native_mesh.hide_set(True)
return native_mesh
def redraw(self, timeout=0.0):
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1, time_limit=timeout)
def draw_visual(self):
visuals = super(RobotModelArtist, self).draw_visual()
for visual in visuals:
visual.hide_set(False)
def draw_collision(self):
collisions = super(RobotModelArtist, self).draw_collision()
for collision in collisions:
collision.hide_set(False)
| 33.183333
| 135
| 0.675038
|
import bpy
import mathutils
import compas_blender
from compas.robots.base_artist import BaseRobotModelArtist
__all__ = [
'RobotModelArtist',
]
class RobotModelArtist(BaseRobotModelArtist):
def __init__(self, model, collection=None):
self.collection = collection
super(RobotModelArtist, self).__init__(model)
def transform(self, native_mesh, transformation):
native_mesh.matrix_world = mathutils.Matrix(transformation.matrix) @ native_mesh.matrix_world
def create_geoemetry(self, geometry, name=None, color=None):
if 'mesh_color.diffuse' in geometry.attributes:
color = geometry.attributes['mesh_color.diffuse']
if color:
r, g, b, _a = color
color = (r, g, b)
else:
color = (1., 1., 1.)
if self.collection and self.collection not in bpy.data.collections.keys():
compas_blender.utilities.create_collection(self.collection)
v, f = geometry.to_vertices_and_faces()
native_mesh = compas_blender.draw_mesh(vertices=v, faces=f, name=name, color=color, centroid=False, collection=self.collection)
native_mesh.hide_set(True)
return native_mesh
def redraw(self, timeout=0.0):
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1, time_limit=timeout)
def draw_visual(self):
visuals = super(RobotModelArtist, self).draw_visual()
for visual in visuals:
visual.hide_set(False)
def draw_collision(self):
collisions = super(RobotModelArtist, self).draw_collision()
for collision in collisions:
collision.hide_set(False)
| true
| true
|
f70a10aff0b277358ad768846f92507929b5b0b0
| 18,696
|
py
|
Python
|
venv/Lib/site-packages/skimage/transform/tests/test_radon_transform.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 4
|
2021-10-20T12:39:09.000Z
|
2022-02-26T15:02:08.000Z
|
venv/Lib/site-packages/skimage/transform/tests/test_radon_transform.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/skimage/transform/tests/test_radon_transform.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 20
|
2021-11-07T13:55:56.000Z
|
2021-12-02T10:54:01.000Z
|
import itertools
import pytest
import numpy as np
from skimage.data import shepp_logan_phantom
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage._shared.utils import convert_to_float
from skimage._shared import testing
from skimage._shared.testing import test_parallel
from skimage._shared._warnings import expected_warnings
PHANTOM = shepp_logan_phantom()[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1,
mode='constant', anti_aliasing=False, multichannel=False)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def test_iradon_bias_circular_phantom():
"""
test that a uniform circular phantom has a small reconstruction bias
"""
pixels = 128
xy = np.arange(-pixels / 2, pixels / 2) + 0.5
x, y = np.meshgrid(xy, xy)
image = x**2 + y**2 <= (pixels/4)**2
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta)
reconstruction_fbp = iradon(sinogram, theta=theta)
error = reconstruction_fbp - image
tol = 5e-5
roi_err = np.abs(np.mean(error))
assert roi_err < tol
def check_radon_center(shape, circle, dtype, preserve_range):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=dtype)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle,
preserve_range=preserve_range)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
@testing.parametrize("shape", [(16, 16), (17, 17)])
@testing.parametrize("circle", [False, True])
@testing.parametrize("dtype", [np.float64, np.float32, np.uint8, bool])
@testing.parametrize("preserve_range", [False, True])
def test_radon_center(shape, circle, dtype, preserve_range):
check_radon_center(shape, circle, dtype, preserve_range)
@testing.parametrize("shape", [(32, 16), (33, 17)])
@testing.parametrize("circle", [False])
@testing.parametrize("dtype", [np.float64, np.float32, np.uint8, bool])
@testing.parametrize("preserve_range", [False, True])
def test_radon_center_rectangular(shape, circle, dtype, preserve_range):
check_radon_center(shape, circle, dtype, preserve_range)
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
sizes_for_test_iradon_center = [16, 17]
thetas_for_test_iradon_center = [0, 90]
circles_for_test_iradon_center = [False, True]
@testing.parametrize("size, theta, circle",
itertools.product(sizes_for_test_iradon_center,
thetas_for_test_iradon_center,
circles_for_test_iradon_center))
def test_iradon_center(size, theta, circle):
check_iradon_center(size, theta, circle)
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image, circle=False), filter_name=filter_type,
interpolation=interpolation_type, circle=False)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
radon_iradon_inputs = list(itertools.product(interpolation_types,
filter_types))
# cubic interpolation is slow; only run one test for it
radon_iradon_inputs.append(('cubic', 'shepp-logan'))
@testing.parametrize("interpolation_type, filter_type",
radon_iradon_inputs)
def test_radon_iradon(interpolation_type, filter_type):
check_radon_iradon(interpolation_type, filter_type)
@pytest.mark.parametrize("filter_type", filter_types)
def test_iradon_new_signature(filter_type):
image = PHANTOM
sinogram = radon(image, circle=False)
with pytest.warns(FutureWarning):
assert np.array_equal(iradon(sinogram, filter=filter_type),
iradon(sinogram, filter_name=filter_type))
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
theta = np.linspace(0, 180, nb_angles, endpoint=False)
radon_image_200 = radon(image, theta=theta, circle=False)
reconstructed = iradon(radon_image_200, circle=False)
delta_200 = np.mean(abs(_rescale_intensity(image) -
_rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=theta, circle=False)
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80, circle=False)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=float)
image[slices] = 1.
sinogram = radon(image, theta, circle=False)
reconstructed = iradon(sinogram, theta, circle=False)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
shapes = [(3, 3), (4, 4), (5, 5)]
def generate_test_data_for_radon_iradon_minimal(shapes):
def shape2coordinates(shape):
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
return coordinates
def shape2shapeandcoordinates(shape):
return itertools.product([shape], shape2coordinates(shape))
return itertools.chain.from_iterable([shape2shapeandcoordinates(shape)
for shape in shapes])
@testing.parametrize("shape, coordinate",
generate_test_data_for_radon_iradon_minimal(shapes))
def test_radon_iradon_minimal(shape, coordinate):
check_radon_iradon_minimal(shape, coordinate)
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2], circle=False)
iradon(p, theta=[0, 1, 2], circle=False)
with testing.raises(ValueError):
iradon(p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
with expected_warnings(['reconstruction circle']):
radon(a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
def argmax_shape(a):
return np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square) ==
argmax_shape(sinogram_circle_to_square))
@testing.parametrize("size", (50, 51))
def test_sinogram_circle_to_square(size):
check_sinogram_circle_to_square(size)
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
# if adding more shapes to test data, you might want to look at commit d0f2bac3f
shapes_radon_iradon_circle = ((61, 79), )
interpolations = ('nearest', 'linear')
output_sizes = (None,
min(shapes_radon_iradon_circle[0]),
max(shapes_radon_iradon_circle[0]),
97)
@testing.parametrize("shape, interpolation, output_size",
itertools.product(shapes_radon_iradon_circle,
interpolations, output_sizes))
def test_radon_iradon_circle(shape, interpolation, output_size):
check_radon_iradon_circle(interpolation, shape, output_size)
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8, mode='reflect',
multichannel=False, anti_aliasing=False)
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack([np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])]).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
@pytest.mark.parametrize("preserve_range", [True, False])
def test_iradon_dtype(preserve_range):
sinogram = np.zeros((16, 1), dtype=int)
sinogram[8, 0] = 1.
sinogram64 = sinogram.astype('float64')
sinogram32 = sinogram.astype('float32')
assert iradon(sinogram, theta=[0],
preserve_range=preserve_range).dtype == 'float64'
assert iradon(sinogram64, theta=[0],
preserve_range=preserve_range).dtype == sinogram64.dtype
assert iradon(sinogram32, theta=[0],
preserve_range=preserve_range).dtype == sinogram32.dtype
def test_radon_dtype():
img = convert_to_float(PHANTOM, False)
img32 = img.astype(np.float32)
assert radon(img).dtype == img.dtype
assert radon(img32).dtype == img32.dtype
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_iradon_sart_dtype(dtype):
sinogram = np.zeros((16, 1), dtype=int)
sinogram[8, 0] = 1.
sinogram64 = sinogram.astype('float64')
sinogram32 = sinogram.astype('float32')
with expected_warnings(['Input data is cast to float']):
assert iradon_sart(sinogram, theta=[0]).dtype == 'float64'
assert iradon_sart(sinogram64, theta=[0]).dtype == sinogram64.dtype
assert iradon_sart(sinogram32, theta=[0]).dtype == sinogram32.dtype
assert iradon_sart(sinogram, theta=[0], dtype=dtype).dtype == dtype
assert iradon_sart(sinogram32, theta=[0], dtype=dtype).dtype == dtype
assert iradon_sart(sinogram64, theta=[0], dtype=dtype).dtype == dtype
def test_iradon_sart_wrong_dtype():
sinogram = np.zeros((16, 1))
with testing.raises(ValueError):
iradon_sart(sinogram, dtype=int)
| 37.846154
| 80
| 0.65153
|
import itertools
import pytest
import numpy as np
from skimage.data import shepp_logan_phantom
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage._shared.utils import convert_to_float
from skimage._shared import testing
from skimage._shared.testing import test_parallel
from skimage._shared._warnings import expected_warnings
PHANTOM = shepp_logan_phantom()[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1,
mode='constant', anti_aliasing=False, multichannel=False)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def test_iradon_bias_circular_phantom():
pixels = 128
xy = np.arange(-pixels / 2, pixels / 2) + 0.5
x, y = np.meshgrid(xy, xy)
image = x**2 + y**2 <= (pixels/4)**2
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta)
reconstruction_fbp = iradon(sinogram, theta=theta)
error = reconstruction_fbp - image
tol = 5e-5
roi_err = np.abs(np.mean(error))
assert roi_err < tol
def check_radon_center(shape, circle, dtype, preserve_range):
image = np.zeros(shape, dtype=dtype)
image[(shape[0] // 2, shape[1] // 2)] = 1.
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle,
preserve_range=preserve_range)
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
@testing.parametrize("shape", [(16, 16), (17, 17)])
@testing.parametrize("circle", [False, True])
@testing.parametrize("dtype", [np.float64, np.float32, np.uint8, bool])
@testing.parametrize("preserve_range", [False, True])
def test_radon_center(shape, circle, dtype, preserve_range):
check_radon_center(shape, circle, dtype, preserve_range)
@testing.parametrize("shape", [(32, 16), (33, 17)])
@testing.parametrize("circle", [False])
@testing.parametrize("dtype", [np.float64, np.float32, np.uint8, bool])
@testing.parametrize("preserve_range", [False, True])
def test_radon_center_rectangular(shape, circle, dtype, preserve_range):
check_radon_center(shape, circle, dtype, preserve_range)
def check_iradon_center(size, theta, circle):
debug = False
if circle:
sinogram = np.zeros((size, 1), dtype=float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
sizes_for_test_iradon_center = [16, 17]
thetas_for_test_iradon_center = [0, 90]
circles_for_test_iradon_center = [False, True]
@testing.parametrize("size, theta, circle",
itertools.product(sizes_for_test_iradon_center,
thetas_for_test_iradon_center,
circles_for_test_iradon_center))
def test_iradon_center(size, theta, circle):
check_iradon_center(size, theta, circle)
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image, circle=False), filter_name=filter_type,
interpolation=interpolation_type, circle=False)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
radon_iradon_inputs = list(itertools.product(interpolation_types,
filter_types))
radon_iradon_inputs.append(('cubic', 'shepp-logan'))
@testing.parametrize("interpolation_type, filter_type",
radon_iradon_inputs)
def test_radon_iradon(interpolation_type, filter_type):
check_radon_iradon(interpolation_type, filter_type)
@pytest.mark.parametrize("filter_type", filter_types)
def test_iradon_new_signature(filter_type):
image = PHANTOM
sinogram = radon(image, circle=False)
with pytest.warns(FutureWarning):
assert np.array_equal(iradon(sinogram, filter=filter_type),
iradon(sinogram, filter_name=filter_type))
def test_iradon_angles():
size = 100
image = np.tri(size) + np.tri(size)[::-1]
nb_angles = 200
theta = np.linspace(0, 180, nb_angles, endpoint=False)
radon_image_200 = radon(image, theta=theta, circle=False)
reconstructed = iradon(radon_image_200, circle=False)
delta_200 = np.mean(abs(_rescale_intensity(image) -
_rescale_intensity(reconstructed)))
assert delta_200 < 0.03
nb_angles = 80
radon_image_80 = radon(image, theta=theta, circle=False)
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80, circle=False)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=float)
image[slices] = 1.
sinogram = radon(image, theta, circle=False)
reconstructed = iradon(sinogram, theta, circle=False)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
shapes = [(3, 3), (4, 4), (5, 5)]
def generate_test_data_for_radon_iradon_minimal(shapes):
def shape2coordinates(shape):
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
return coordinates
def shape2shapeandcoordinates(shape):
return itertools.product([shape], shape2coordinates(shape))
return itertools.chain.from_iterable([shape2shapeandcoordinates(shape)
for shape in shapes])
@testing.parametrize("shape, coordinate",
generate_test_data_for_radon_iradon_minimal(shapes))
def test_radon_iradon_minimal(shape, coordinate):
check_radon_iradon_minimal(shape, coordinate)
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2], circle=False)
iradon(p, theta=[0, 1, 2], circle=False)
with testing.raises(ValueError):
iradon(p, theta=[0, 1, 2, 3])
def _random_circle(shape):
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
with expected_warnings(['reconstruction circle']):
radon(a, circle=True)
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
def argmax_shape(a):
return np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square) ==
argmax_shape(sinogram_circle_to_square))
@testing.parametrize("size", (50, 51))
def test_sinogram_circle_to_square(size):
check_sinogram_circle_to_square(size)
def check_radon_iradon_circle(interpolation, shape, output_size):
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
shapes_radon_iradon_circle = ((61, 79), )
interpolations = ('nearest', 'linear')
output_sizes = (None,
min(shapes_radon_iradon_circle[0]),
max(shapes_radon_iradon_circle[0]),
97)
@testing.parametrize("shape, interpolation, output_size",
itertools.product(shapes_radon_iradon_circle,
interpolations, output_sizes))
def test_radon_iradon_circle(shape, interpolation, output_size):
check_radon_iradon_circle(interpolation, shape, output_size)
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8, mode='reflect',
multichannel=False, anti_aliasing=False)
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack([np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])]).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
@pytest.mark.parametrize("preserve_range", [True, False])
def test_iradon_dtype(preserve_range):
sinogram = np.zeros((16, 1), dtype=int)
sinogram[8, 0] = 1.
sinogram64 = sinogram.astype('float64')
sinogram32 = sinogram.astype('float32')
assert iradon(sinogram, theta=[0],
preserve_range=preserve_range).dtype == 'float64'
assert iradon(sinogram64, theta=[0],
preserve_range=preserve_range).dtype == sinogram64.dtype
assert iradon(sinogram32, theta=[0],
preserve_range=preserve_range).dtype == sinogram32.dtype
def test_radon_dtype():
img = convert_to_float(PHANTOM, False)
img32 = img.astype(np.float32)
assert radon(img).dtype == img.dtype
assert radon(img32).dtype == img32.dtype
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_iradon_sart_dtype(dtype):
sinogram = np.zeros((16, 1), dtype=int)
sinogram[8, 0] = 1.
sinogram64 = sinogram.astype('float64')
sinogram32 = sinogram.astype('float32')
with expected_warnings(['Input data is cast to float']):
assert iradon_sart(sinogram, theta=[0]).dtype == 'float64'
assert iradon_sart(sinogram64, theta=[0]).dtype == sinogram64.dtype
assert iradon_sart(sinogram32, theta=[0]).dtype == sinogram32.dtype
assert iradon_sart(sinogram, theta=[0], dtype=dtype).dtype == dtype
assert iradon_sart(sinogram32, theta=[0], dtype=dtype).dtype == dtype
assert iradon_sart(sinogram64, theta=[0], dtype=dtype).dtype == dtype
def test_iradon_sart_wrong_dtype():
sinogram = np.zeros((16, 1))
with testing.raises(ValueError):
iradon_sart(sinogram, dtype=int)
| true
| true
|
f70a10d191655957dfeebe15ba978c3fba9a7a51
| 9,739
|
py
|
Python
|
qiskit/basicplotter.py
|
NickyBar/QIP
|
11747b40beb38d41faa297fb2b53f28c6519c753
|
[
"Apache-2.0"
] | 1
|
2017-07-12T02:04:53.000Z
|
2017-07-12T02:04:53.000Z
|
qiskit/basicplotter.py
|
NickyBar/QIP
|
11747b40beb38d41faa297fb2b53f28c6519c753
|
[
"Apache-2.0"
] | null | null | null |
qiskit/basicplotter.py
|
NickyBar/QIP
|
11747b40beb38d41faa297fb2b53f28c6519c753
|
[
"Apache-2.0"
] | 6
|
2018-05-27T10:52:02.000Z
|
2021-04-02T19:20:11.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Basic plotting methods using matplotlib.
These include methods to plot Bloch vectors, histograms, and quantum spheres.
Author: Andrew Cross, Jay Gambetta
"""
from mpl_toolkits.mplot3d import proj3d
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
import numpy as np
from collections import Counter
from functools import reduce
def plot_histogram(data, number_to_keep=None):
"""Plot a histogram of data.
data is a dictionary of {'000': 5, '010': 113, ...}
number_to_keep is the number of terms to plot and rest is made into a
single bar called other values
"""
if number_to_keep is not None:
data_temp = dict(Counter(data).most_common(number_to_keep))
data_temp["rest"] = sum(data.values()) - sum(data_temp.values())
data = data_temp
labels = sorted(data)
values = np.array([data[key] for key in labels], dtype=float)
pvalues = values / sum(values)
numelem = len(values)
ind = np.arange(numelem) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects = ax.bar(ind, pvalues, width, color='seagreen')
# add some text for labels, title, and axes ticks
ax.set_ylabel('Probabilities', fontsize=12)
ax.set_xticks(ind)
ax.set_xticklabels(labels, fontsize=12)
ax.set_ylim([0., min([1.2, max([1.2 * val for val in pvalues])])])
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%f' % float(height),
ha='center', va='bottom')
plt.show()
# Functions used for plotting on the qsphere.
#
# See:
# lex_index:
# https://msdn.microsoft.com/en-us/library/aa289166%28v=vs.71%29.aspx
# n_choose_k: http://stackoverflow.com/questions/
# 2096573/counting-combinations-and-permutations-efficiently
class Arrow3D(FancyArrowPatch):
"""Standard 3D arrow."""
def __init__(self, xs, ys, zs, *args, **kwargs):
"""Create arrow."""
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
"""Draw the arrow."""
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def compliment(value):
"""Swap 1 and 0 in a vector."""
return ''.join(COMPLEMENT[x] for x in value)
COMPLEMENT = {'1': '0', '0': '1'}
def n_choose_k(n, k):
"""Return the number of combinations."""
if n == 0:
return 0.0
else:
return reduce(lambda x, y: x * y[0] / y[1],
zip(range(n - k + 1, n + 1),
range(1, k + 1)), 1)
def lex_index(n, k, lst):
"""Return the index of a combination."""
assert len(lst) == k, "list should have length k"
comb = list(map(lambda x: n - 1 - x, lst))
dualm = sum([n_choose_k(comb[k - 1 - i], i + 1) for i in range(k)])
m = dualm
return int(m)
def bit_string_index(s):
"""Return the index of a string of 0s and 1s."""
n = len(s)
k = s.count("1")
assert s.count("0") == n - k, "s must be a string of 0 and 1"
ones = [pos for pos, char in enumerate(s) if char == "1"]
return lex_index(n, k, ones)
def plot_qsphere(data, number_to_keep, number_of_qubits):
"""Plot the qsphere of data."""
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
ax.axes.set_xlim3d(-1.0, 1.0)
ax.axes.set_ylim3d(-1.0, 1.0)
ax.axes.set_zlim3d(-1.0, 1.0)
ax.set_aspect("equal")
ax.axes.grid(False)
# Plot semi-transparent sphere
u = np.linspace(0, 2 * np.pi, 25)
v = np.linspace(0, np.pi, 25)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, rstride=1, cstride=1, color='k', alpha=0.05,
linewidth=0)
# wireframe
# Get rid of the panes
# ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Get rid of the spines
# ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# Get rid of the ticks
# ax.set_xticks([])
# ax.set_yticks([])
# ax.set_zticks([])
d = number_of_qubits
total_values = sum(data.values())
for key in data:
weight = key.count("1")
zvalue = -2 * weight / d + 1
number_of_divisions = n_choose_k(d, weight)
weight_order = bit_string_index(key)
if weight_order >= number_of_divisions / 2:
com_key = compliment(key)
weight_order_temp = bit_string_index(com_key)
weight_order = np.floor(
number_of_divisions / 2) + weight_order_temp + 1
print(key + " " + str(weight_order))
angle = (weight_order) * 2 * np.pi / number_of_divisions
xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)
yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)
linewidth = 5 * data.get(key) / total_values
print([xvalue, yvalue, zvalue])
a = Arrow3D([0, xvalue], [0, yvalue], [0, zvalue], mutation_scale=20,
lw=linewidth, arrowstyle="->", color="k")
ax.add_artist(a)
for weight in range(d + 1):
theta = np.linspace(-2 * np.pi, 2 * np.pi, 100)
z = -2 * weight / d + 1
if weight == 0:
z = z - 0.001
if weight == d:
z = z + 0.001
r = np.sqrt(1 - z**2)
x = r * np.cos(theta)
y = r * np.sin(theta)
ax.plot(x, y, z, 'k')
plt.show()
# Functions used for plotting tomography.
def plot_bloch_vector(bloch, title=""):
"""Plot a Bloch vector.
Plot a sphere, axes, the Bloch vector, and its projections onto each axis.
bloch is a 3-tuple (x, y, z)
title is a string, the plot title
"""
# Set arrow lengths
arlen = 1.3
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect("equal")
# Plot semi-transparent sphere
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, color="b", alpha=0.1)
# Plot arrows (axes, Bloch vector, its projections)
xa = Arrow3D([0, arlen], [0, 0], [0, 0], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
ya = Arrow3D([0, 0], [0, arlen], [0, 0], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
za = Arrow3D([0, 0], [0, 0], [0, arlen], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
a = Arrow3D([0, bloch[0]], [0, bloch[1]], [0, bloch[2]], mutation_scale=20,
lw=2, arrowstyle="simple", color="k")
bax = Arrow3D([0, bloch[0]], [0, 0], [0, 0], mutation_scale=20, lw=2,
arrowstyle="-", color="r")
bay = Arrow3D([0, 0], [0, bloch[1]], [0, 0], mutation_scale=20, lw=2,
arrowstyle="-", color="g")
baz = Arrow3D([0, 0], [0, 0], [0, bloch[2]], mutation_scale=20, lw=2,
arrowstyle="-", color="b")
arrowlist = [xa, ya, za, a, bax, bay, baz]
for arr in arrowlist:
ax.add_artist(arr)
# Rotate the view
ax.view_init(30, 30)
# Annotate the axes, shifts are ad-hoc for this (30, 30) view
xp, yp, _ = proj3d.proj_transform(arlen, 0, 0, ax.get_proj())
plt.annotate("x", xy=(xp, yp), xytext=(-3, -8),
textcoords='offset points', ha='right', va='bottom')
xp, yp, _ = proj3d.proj_transform(0, arlen, 0, ax.get_proj())
plt.annotate("y", xy=(xp, yp), xytext=(6, -5),
textcoords='offset points', ha='right', va='bottom')
xp, yp, _ = proj3d.proj_transform(0, 0, arlen, ax.get_proj())
plt.annotate("z", xy=(xp, yp), xytext=(2, 0),
textcoords='offset points', ha='right', va='bottom')
plt.title(title)
plt.show()
# Functions used by randomized benchmarking.
def plot_rb_data(xdata, ydatas, yavg, fit, survival_prob):
"""Plot randomized benchmarking data.
xdata = list of subsequence lengths
ydatas = list of lists of survival probabilities for each sequence
yavg = mean of the survival probabilities at each sequence length
fit = list of fitting parameters [a, b, alpha]
survival_prob = function that computes survival probability
"""
# Plot the result for each sequence
for ydata in ydatas:
plt.plot(xdata, ydata, 'rx')
# Plot the mean
plt.plot(xdata, yavg, 'bo')
# Plot the fit
plt.plot(xdata, survival_prob(xdata, *fit), 'b-')
plt.show()
| 35.286232
| 79
| 0.591745
|
from mpl_toolkits.mplot3d import proj3d
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
import numpy as np
from collections import Counter
from functools import reduce
def plot_histogram(data, number_to_keep=None):
if number_to_keep is not None:
data_temp = dict(Counter(data).most_common(number_to_keep))
data_temp["rest"] = sum(data.values()) - sum(data_temp.values())
data = data_temp
labels = sorted(data)
values = np.array([data[key] for key in labels], dtype=float)
pvalues = values / sum(values)
numelem = len(values)
ind = np.arange(numelem) width = 0.35 fig, ax = plt.subplots()
rects = ax.bar(ind, pvalues, width, color='seagreen')
ax.set_ylabel('Probabilities', fontsize=12)
ax.set_xticks(ind)
ax.set_xticklabels(labels, fontsize=12)
ax.set_ylim([0., min([1.2, max([1.2 * val for val in pvalues])])])
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%f' % float(height),
ha='center', va='bottom')
plt.show()
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def compliment(value):
return ''.join(COMPLEMENT[x] for x in value)
COMPLEMENT = {'1': '0', '0': '1'}
def n_choose_k(n, k):
if n == 0:
return 0.0
else:
return reduce(lambda x, y: x * y[0] / y[1],
zip(range(n - k + 1, n + 1),
range(1, k + 1)), 1)
def lex_index(n, k, lst):
assert len(lst) == k, "list should have length k"
comb = list(map(lambda x: n - 1 - x, lst))
dualm = sum([n_choose_k(comb[k - 1 - i], i + 1) for i in range(k)])
m = dualm
return int(m)
def bit_string_index(s):
n = len(s)
k = s.count("1")
assert s.count("0") == n - k, "s must be a string of 0 and 1"
ones = [pos for pos, char in enumerate(s) if char == "1"]
return lex_index(n, k, ones)
def plot_qsphere(data, number_to_keep, number_of_qubits):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
ax.axes.set_xlim3d(-1.0, 1.0)
ax.axes.set_ylim3d(-1.0, 1.0)
ax.axes.set_zlim3d(-1.0, 1.0)
ax.set_aspect("equal")
ax.axes.grid(False)
u = np.linspace(0, 2 * np.pi, 25)
v = np.linspace(0, np.pi, 25)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, rstride=1, cstride=1, color='k', alpha=0.05,
linewidth=0)
d = number_of_qubits
total_values = sum(data.values())
for key in data:
weight = key.count("1")
zvalue = -2 * weight / d + 1
number_of_divisions = n_choose_k(d, weight)
weight_order = bit_string_index(key)
if weight_order >= number_of_divisions / 2:
com_key = compliment(key)
weight_order_temp = bit_string_index(com_key)
weight_order = np.floor(
number_of_divisions / 2) + weight_order_temp + 1
print(key + " " + str(weight_order))
angle = (weight_order) * 2 * np.pi / number_of_divisions
xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)
yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)
linewidth = 5 * data.get(key) / total_values
print([xvalue, yvalue, zvalue])
a = Arrow3D([0, xvalue], [0, yvalue], [0, zvalue], mutation_scale=20,
lw=linewidth, arrowstyle="->", color="k")
ax.add_artist(a)
for weight in range(d + 1):
theta = np.linspace(-2 * np.pi, 2 * np.pi, 100)
z = -2 * weight / d + 1
if weight == 0:
z = z - 0.001
if weight == d:
z = z + 0.001
r = np.sqrt(1 - z**2)
x = r * np.cos(theta)
y = r * np.sin(theta)
ax.plot(x, y, z, 'k')
plt.show()
def plot_bloch_vector(bloch, title=""):
arlen = 1.3
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect("equal")
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, color="b", alpha=0.1)
xa = Arrow3D([0, arlen], [0, 0], [0, 0], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
ya = Arrow3D([0, 0], [0, arlen], [0, 0], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
za = Arrow3D([0, 0], [0, 0], [0, arlen], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
a = Arrow3D([0, bloch[0]], [0, bloch[1]], [0, bloch[2]], mutation_scale=20,
lw=2, arrowstyle="simple", color="k")
bax = Arrow3D([0, bloch[0]], [0, 0], [0, 0], mutation_scale=20, lw=2,
arrowstyle="-", color="r")
bay = Arrow3D([0, 0], [0, bloch[1]], [0, 0], mutation_scale=20, lw=2,
arrowstyle="-", color="g")
baz = Arrow3D([0, 0], [0, 0], [0, bloch[2]], mutation_scale=20, lw=2,
arrowstyle="-", color="b")
arrowlist = [xa, ya, za, a, bax, bay, baz]
for arr in arrowlist:
ax.add_artist(arr)
ax.view_init(30, 30)
xp, yp, _ = proj3d.proj_transform(arlen, 0, 0, ax.get_proj())
plt.annotate("x", xy=(xp, yp), xytext=(-3, -8),
textcoords='offset points', ha='right', va='bottom')
xp, yp, _ = proj3d.proj_transform(0, arlen, 0, ax.get_proj())
plt.annotate("y", xy=(xp, yp), xytext=(6, -5),
textcoords='offset points', ha='right', va='bottom')
xp, yp, _ = proj3d.proj_transform(0, 0, arlen, ax.get_proj())
plt.annotate("z", xy=(xp, yp), xytext=(2, 0),
textcoords='offset points', ha='right', va='bottom')
plt.title(title)
plt.show()
def plot_rb_data(xdata, ydatas, yavg, fit, survival_prob):
for ydata in ydatas:
plt.plot(xdata, ydata, 'rx')
plt.plot(xdata, yavg, 'bo')
plt.plot(xdata, survival_prob(xdata, *fit), 'b-')
plt.show()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.