hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6903a4248a7ead61e31446b710de4750a3b54c9a | 2,225 | py | Python | tensorflow_datasets/public_api.py | facaiy/datasets | 830a663e996b7b95c45acddd58ba6e6532599c0c | [
"Apache-2.0"
] | 1 | 2019-07-23T22:05:16.000Z | 2019-07-23T22:05:16.000Z | tensorflow_datasets/public_api.py | facaiy/datasets | 830a663e996b7b95c45acddd58ba6e6532599c0c | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/public_api.py | facaiy/datasets | 830a663e996b7b95c45acddd58ba6e6532599c0c | [
"Apache-2.0"
] | 1 | 2019-04-29T11:44:16.000Z | 2019-04-29T11:44:16.000Z | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public API of tfds, without the registered dataset."""
# pylint: disable=unused-import,g-import-not-at-top,g-bad-import-order,wrong-import-position
from tensorflow_datasets.core import tf_compat
tf_compat.ensure_tf_install()
from tensorflow_datasets import core
from tensorflow_datasets.core import download
from tensorflow_datasets.core import decode
from tensorflow_datasets.core import features
from tensorflow_datasets.core import file_format_adapter as file_adapter
from tensorflow_datasets.core import units
from tensorflow_datasets.core.dataset_utils import as_numpy
from tensorflow_datasets.core.download import GenerateMode
from tensorflow_datasets.core.registered import builder
from tensorflow_datasets.core.registered import list_builders
from tensorflow_datasets.core.registered import load
from tensorflow_datasets.core.splits import percent
from tensorflow_datasets.core.splits import Split
from tensorflow_datasets.core.utils.gcs_utils import is_dataset_on_gcs
from tensorflow_datasets.core.utils.tqdm_utils import disable_progress_bar
from tensorflow_datasets.version import __version__
__all__ = [
"core",
"as_numpy",
"decode",
"download",
"features",
"file_adapter",
"units",
"GenerateMode",
"builder",
"list_builders",
"load",
"percent",
"Split",
"testing",
"disable_progress_bar",
"is_dataset_on_gcs",
]
def _import_testing():
try:
from tensorflow_datasets import testing # pylint: disable=redefined-outer-name
return testing
except:
raise # pylint: disable=unreachable
testing = _import_testing()
| 32.246377 | 92 | 0.784719 |
77595d1075e692ffe42d5f21d040606e8f8b0298 | 1,151 | py | Python | user/filter.py | emreoztoklu/DSP_in_python | 3b917dc753a820dd699df0172ae90028f44c64fc | [
"Unlicense"
] | null | null | null | user/filter.py | emreoztoklu/DSP_in_python | 3b917dc753a820dd699df0172ae90028f44c64fc | [
"Unlicense"
] | null | null | null | user/filter.py | emreoztoklu/DSP_in_python | 3b917dc753a820dd699df0172ae90028f44c64fc | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 26 16:24:52 2022
@author: emreo
"""
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
t = np.linspace(-1, 1, 201)
f1 = 0.75
w1 = 2*np.pi*f1
f2 = 1.25
w2 = 2*np.pi*f2
f3 = 3.85
w3 = 2*np.pi*f3
s1= np.sin(w1 * t *(1.5-t))
s2= 0.1*np.sin(w2 * t + 1)
s3= 0.18*np.cos(w3 * t)
s4_noise = 0.08*np.random.randn(len(t))
"""---------------------------------------------
"""
x = (s1+s2+s3)
xn = x + s4_noise
"""---------------------------------------------
"""
b, a = signal.butter(3, 0.6)
zi = signal.lfilter_zi(b, a)
z1, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
z2, _ = signal.lfilter(b, a, z1, zi=zi*xn[0])
y = signal.filtfilt(b, a, xn)
"""---------------------------------------------
"""
plt.figure
plt.plot(t, xn, 'b', alpha =0.75)
plt.plot(t, z1, 'r--', alpha =0.75)
plt.plot(t, z2, 'r', alpha =0.75)
plt.plot(t, y, 'k', alpha =0.75)
#plt.plot(t, z1, 'g', t, z2, 'r', t, y, 'k')
plt.legend(('noisy signal', 'lfilter, once','lfilter, twice', 'filtfilt'), loc='best')
plt.grid(True)
plt.show()
"""---------------------------------------------
""" | 18.564516 | 86 | 0.470895 |
62f069cf9f0837dffd198107a37944d6c144825e | 6,248 | py | Python | PM.py | thautwarm/Stardust | 3fa3927792958c02e51e4e5a6a5c74b6b2ecf37d | [
"Apache-2.0"
] | 2 | 2017-08-02T03:57:26.000Z | 2017-10-07T01:57:42.000Z | PM.py | thautwarm/Stardust | 3fa3927792958c02e51e4e5a6a5c74b6b2ecf37d | [
"Apache-2.0"
] | 2 | 2017-06-05T12:13:24.000Z | 2018-08-31T13:04:22.000Z | PM.py | thautwarm/Stardust | 3fa3927792958c02e51e4e5a6a5c74b6b2ecf37d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
thautwarm committed on 5 Jun 2017
@author: misakawa
"""
from collections import Iterable,defaultdict
import re
PMRegex=re.compile('__.*__')
def DefaultReturn(RetDeal=None):
def outw(func):
def wrapper(*args,**kwargs):
try:
tail=func(*args,**kwargs)
except:
return RetDeal
return tail
return wrapper
return outw
class Any:pass
class Seq:pass
class tAny(Any):
def __init__(self,family=object):
self.family=family
def __eq__(self,var):
if isinstance(var,Any):
return var.family==self.family
return isinstance(var,self.family)
def __hash__(self):
return hash((self.family,...))
class tSeq(tAny):
def __init__(self,family=object,atleast=0):
self.family=family
self.least=atleast
class fAny(Any):
def __init__(self, v, func=lambda x,y:x==y):
self.func = func
self.v = v
def __eq__(self,var):
return self.func(self.v, var)
def __hash__(self):
return hash((self.func, self.v ,...))
class fSeq(fAny):
def __init__(self,v, func=lambda x,y:x==y,atleast=0):
self.func=func
self.v = v
self.least=atleast
def AlgebraDiv(iterator,func):
subStructures=defaultdict(set)
for item in iterator:
subStructures[func(item)].add(item)
return subStructures
#@DefaultReturn(RetDeal=False)
def patMatch(val,var,partial = False):
if isinstance(val,Iterable) and not issubclass(val.__class__,str):
try:
#============================================================
# Set
if issubclass(val.__class__,set):
subStructures=AlgebraDiv(val,
lambda item: isinstance(item,Any))
NormalDefined,GeneralDefined =subStructures[False],subStructures[True]
judge_one= len(NormalDefined&var)== len(NormalDefined)
if not judge_one:return False
for idx,item in enumerate(var):
toRemove=None.__class__
for val_i in val:
if patMatch(val_i,item,partial=partial):
toRemove=val_i
break
if toRemove!=None.__class__:
val.remove(val_i)
continue
#there is not any instance of "Any", however there is atleast 1 item left in "var".
if not GeneralDefined:return True if partial else False
toRemove=None.__class__
for genItem in GeneralDefined:
if genItem==item:
toRemove=genItem
break
if toRemove!=None.__class__:
GeneralDefined.remove(toRemove)
else:
# An item does not match any instance of "Any" left,
# which means the "val" not equaled with "var".
if not partial:
return False
return not GeneralDefined and (True if partial else (idx+1)==len(var))
#=============================================================
#Dict
elif issubclass(val.__class__,dict):
if not partial and len(val.keys())!=len(var.key()):
return False
for key in val.keys():
if not patMatch(val[key],var[key],partial=partial):
return False
return True
#=============================================================
# Iterator Except str
else:
if len(val)==0 :
# print(1,val,var)
return len(val)==0 and len(var)==0
elif isinstance(val[0],Seq):
catchNum = 0
for var_i in var:
if patMatch(val[0], var_i,partial=partial):
catchNum += 1
else:break
if catchNum>=val[0].least:
# print(2.1)
return patMatch(val[1:],var[catchNum:],partial=partial)
else:
# print(2.2)
return False
elif isinstance(val[0],Iterable) and not issubclass(val[0].__class__,str):
# print(3)
return patMatch(val[0],var[0],partial=partial) and patMatch(val[1:],var[1:],partial=partial)
else:
# print(4)
return False if not patMatch(val[0],var[0],partial=partial) \
else patMatch(val[1:],var[1:],partial=partial)
#=====================================================
except:
print(val,var)
return BaseException
pass
else:
for type_i in (str,int,float,bool,bytes,complex,Any):
if issubclass(val.__class__,type_i):
return val==var
attrs=filter(lambda x:not PMRegex.findall(x) ,dir(val))
if not partial:
attrsVar=list(filter(lambda x:not PMRegex.findall(x) ,dir(var)))
if set(attrs)&set(attrsVar)!= len(attrs):
return False
for attr in attrs:
if not (hasattr(var,attr) and (getattr(var,attr)==getattr(val,attr) or
getattr(var,attr).__class__==getattr(val,attr).__class__) ):
return False
return True
class PatternMatching:
def __init__(self,matchvalue):
self.matchvalue=matchvalue
def match(self,value,partial=True):
return patMatch(value,self.matchvalue,partial)
PM=PatternMatching
if True:
class sample:
def __init__(self,a,b,c):
self.a=a
self.b=b
self.c=c
def dosome(self):pass | 33.772973 | 112 | 0.481594 |
76e8b6c3bff4bb330699523ee113e04890119813 | 7,577 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20200501/security_partner_provider.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/network/v20200501/security_partner_provider.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/network/v20200501/security_partner_provider.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SecurityPartnerProvider']
class SecurityPartnerProvider(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_partner_provider_name: Optional[pulumi.Input[str]] = None,
security_provider_name: Optional[pulumi.Input[Union[str, 'SecurityProviderName']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Security Partner Provider resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] security_partner_provider_name: The name of the Security Partner Provider.
:param pulumi.Input[Union[str, 'SecurityProviderName']] security_provider_name: The security provider name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The virtualHub to which the Security Partner Provider belongs.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['id'] = id
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['security_partner_provider_name'] = security_partner_provider_name
__props__['security_provider_name'] = security_provider_name
__props__['tags'] = tags
__props__['virtual_hub'] = virtual_hub
__props__['connection_status'] = None
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:SecurityPartnerProvider"), pulumi.Alias(type_="azure-nextgen:network/latest:SecurityPartnerProvider"), pulumi.Alias(type_="azure-nextgen:network/v20200301:SecurityPartnerProvider"), pulumi.Alias(type_="azure-nextgen:network/v20200401:SecurityPartnerProvider"), pulumi.Alias(type_="azure-nextgen:network/v20200601:SecurityPartnerProvider"), pulumi.Alias(type_="azure-nextgen:network/v20200701:SecurityPartnerProvider"), pulumi.Alias(type_="azure-nextgen:network/v20200801:SecurityPartnerProvider")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SecurityPartnerProvider, __self__).__init__(
'azure-nextgen:network/v20200501:SecurityPartnerProvider',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SecurityPartnerProvider':
"""
Get an existing SecurityPartnerProvider resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return SecurityPartnerProvider(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> pulumi.Output[str]:
"""
The connection status with the Security Partner Provider.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the Security Partner Provider resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="securityProviderName")
def security_provider_name(self) -> pulumi.Output[Optional[str]]:
"""
The security provider name.
"""
return pulumi.get(self, "security_provider_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The virtualHub to which the Security Partner Provider belongs.
"""
return pulumi.get(self, "virtual_hub")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 42.329609 | 593 | 0.655668 |
42adec05338b553c89c8db2d17855ea196615b30 | 2,254 | py | Python | venv/Lib/site-packages/IPython/html/widgets/widget_selectioncontainer.py | Kiiwi/Syssel | 83705e3fd0edf40f09df950d5ce91c95586573f5 | [
"BSD-3-Clause"
] | 1 | 2017-12-30T20:43:28.000Z | 2017-12-30T20:43:28.000Z | venv/Lib/site-packages/IPython/html/widgets/widget_selectioncontainer.py | Kiiwi/Syssel | 83705e3fd0edf40f09df950d5ce91c95586573f5 | [
"BSD-3-Clause"
] | 7 | 2021-02-08T20:22:15.000Z | 2022-03-11T23:19:41.000Z | venv/Lib/site-packages/IPython/html/widgets/widget_selectioncontainer.py | Kiiwi/Syssel | 83705e3fd0edf40f09df950d5ce91c95586573f5 | [
"BSD-3-Clause"
] | null | null | null | """SelectionContainer class.
Represents a multipage container that can be used to group other widgets into
pages.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .widget_box import Box, register
from IPython.utils.traitlets import Unicode, Dict, CInt
from IPython.utils.warn import DeprecatedClass
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class _SelectionContainer(Box):
"""Base class used to display multiple child widgets."""
_titles = Dict(help="Titles of the pages", sync=True)
selected_index = CInt(0, sync=True)
# Public methods
def set_title(self, index, title):
"""Sets the title of a container page.
Parameters
----------
index : int
Index of the container page
title : unicode
New title"""
self._titles[index] = title
self.send_state('_titles')
def get_title(self, index):
"""Gets the title of a container pages.
Parameters
----------
index : int
Index of the container page"""
if index in self._titles:
return self._titles[index]
else:
return None
@register('IPython.Accordion')
class Accordion(_SelectionContainer):
"""Displays children each on a separate accordion page."""
_view_name = Unicode('AccordionView', sync=True)
@register('IPython.Tab')
class Tab(_SelectionContainer):
"""Displays children each on a separate accordion tab."""
_view_name = Unicode('TabView', sync=True)
# Remove in IPython 4.0
AccordionWidget = DeprecatedClass(Accordion, 'AccordionWidget')
TabWidget = DeprecatedClass(Tab, 'TabWidget')
| 32.666667 | 78 | 0.528394 |
cef60469b67ef61ce45ba209bdcab0f37066142e | 455 | py | Python | test/test_math_stuff.py | nchristensen/pytools | 82da2e0aad6863763f1950318bcb933662020135 | [
"MIT"
] | 52 | 2015-06-23T10:30:24.000Z | 2021-07-28T20:50:31.000Z | test/test_math_stuff.py | nchristensen/pytools | 82da2e0aad6863763f1950318bcb933662020135 | [
"MIT"
] | 72 | 2015-10-22T18:57:08.000Z | 2022-03-01T00:04:45.000Z | test/test_math_stuff.py | nchristensen/pytools | 82da2e0aad6863763f1950318bcb933662020135 | [
"MIT"
] | 27 | 2015-09-14T07:24:04.000Z | 2021-12-17T14:31:33.000Z | def test_variance():
data = [4, 7, 13, 16]
def naive_var(data):
n = len(data)
return ((
sum(di**2 for di in data)
- sum(data)**2/n)
/ (n-1))
from pytools import variance
orig_variance = variance(data, entire_pop=False)
assert abs(naive_var(data) - orig_variance) < 1e-15
data = [1e9 + x for x in data]
assert abs(variance(data, entire_pop=False) - orig_variance) < 1e-15
| 25.277778 | 72 | 0.569231 |
8470ca7b4c73884308e3d400198de46e7f9554fa | 1,189 | py | Python | spec/puzzle/examples/mim/p3_2_spec.py | PhilHarnish/forge | 663f19d759b94d84935c14915922070635a4af65 | [
"MIT"
] | 2 | 2020-08-18T18:43:09.000Z | 2020-08-18T20:05:59.000Z | spec/puzzle/examples/mim/p3_2_spec.py | PhilHarnish/forge | 663f19d759b94d84935c14915922070635a4af65 | [
"MIT"
] | null | null | null | spec/puzzle/examples/mim/p3_2_spec.py | PhilHarnish/forge | 663f19d759b94d84935c14915922070635a4af65 | [
"MIT"
] | null | null | null | import astor
from data import warehouse
from puzzle.examples.mim import p3_2
from puzzle.problems import logic_problem
from puzzle.puzzlepedia import prod_config
from spec.mamba import *
with _description('p3_2'):
with before.all:
warehouse.save()
prod_config.init()
self.puzzle = p3_2.get()
with after.all:
prod_config.reset()
warehouse.restore()
with description('solution'):
with it('scores the source as a LogicProblem'):
expect(logic_problem.LogicProblem.score(
p3_2.SOURCE.split('\n'))).to(equal(1))
with it('identifies puzzle type'):
problems = self.puzzle.problems()
expect(problems).to(have_len(1))
problem = problems[0]
expect(problem).to(be_a(logic_problem.LogicProblem))
with _it('parses puzzle'):
node = logic_problem._parse(p3_2.SOURCE.split('\n'))
print(astor.to_source(node))
with _it('models puzzle'):
model = logic_problem._model(p3_2.SOURCE.split('\n'))
model.add(model.dimension_constraints())
print(str(model))
with it('exports a solution'):
problem = self.puzzle.problems()[0]
expect(problem.solution).to(look_like(p3_2.SOLUTION))
| 28.309524 | 59 | 0.686291 |
6ac1326545a5ce1d88ed657a144515569602b0de | 6,531 | py | Python | homeassistant/helpers/check_config.py | lkollar/home-assistant | f4f7c25f744c0678b12acb2cc905894cca9f46ef | [
"Apache-2.0"
] | 2 | 2017-08-29T15:40:44.000Z | 2017-08-30T21:40:12.000Z | homeassistant/helpers/check_config.py | lkollar/home-assistant | f4f7c25f744c0678b12acb2cc905894cca9f46ef | [
"Apache-2.0"
] | 3 | 2021-09-08T03:25:06.000Z | 2022-03-12T00:58:54.000Z | homeassistant/helpers/check_config.py | lkollar/home-assistant | f4f7c25f744c0678b12acb2cc905894cca9f46ef | [
"Apache-2.0"
] | 1 | 2020-05-12T13:35:56.000Z | 2020-05-12T13:35:56.000Z | """Helper to check the configuration file."""
from collections import OrderedDict
from typing import List, NamedTuple, Optional
import attr
import voluptuous as vol
from homeassistant import loader
from homeassistant.config import (
CONF_CORE,
CONF_PACKAGES,
CORE_CONFIG_SCHEMA,
_format_config_error,
config_per_platform,
extract_domain_configs,
find_config_file,
load_yaml_config_file,
merge_packages_config,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType
from homeassistant.requirements import (
RequirementsNotFound,
async_get_integration_with_requirements,
)
import homeassistant.util.yaml.loader as yaml_loader
class CheckConfigError(NamedTuple):
"""Configuration check error."""
message: str
domain: Optional[str]
config: Optional[ConfigType]
@attr.s
class HomeAssistantConfig(OrderedDict):
"""Configuration result with errors attribute."""
errors: List[CheckConfigError] = attr.ib(default=attr.Factory(list))
def add_error(
self,
message: str,
domain: Optional[str] = None,
config: Optional[ConfigType] = None,
) -> "HomeAssistantConfig":
"""Add a single error."""
self.errors.append(CheckConfigError(str(message), domain, config))
return self
@property
def error_str(self) -> str:
"""Return errors as a string."""
return "\n".join([err.message for err in self.errors])
async def async_check_ha_config_file(hass: HomeAssistant) -> HomeAssistantConfig:
"""Load and check if Home Assistant configuration file is valid.
This method is a coroutine.
"""
config_dir = hass.config.config_dir
result = HomeAssistantConfig()
def _pack_error(
package: str, component: str, config: ConfigType, message: str
) -> None:
"""Handle errors from packages: _log_pkg_error."""
message = f"Package {package} setup failed. Component {component} {message}"
domain = f"homeassistant.packages.{package}.{component}"
pack_config = core_config[CONF_PACKAGES].get(package, config)
result.add_error(message, domain, pack_config)
def _comp_error(ex: Exception, domain: str, config: ConfigType) -> None:
"""Handle errors from components: async_log_exception."""
result.add_error(_format_config_error(ex, domain, config), domain, config)
# Load configuration.yaml
try:
config_path = await hass.async_add_executor_job(find_config_file, config_dir)
if not config_path:
return result.add_error("File configuration.yaml not found.")
config = await hass.async_add_executor_job(load_yaml_config_file, config_path)
except FileNotFoundError:
return result.add_error(f"File not found: {config_path}")
except HomeAssistantError as err:
return result.add_error(f"Error loading {config_path}: {err}")
finally:
yaml_loader.clear_secret_cache()
# Extract and validate core [homeassistant] config
try:
core_config = config.pop(CONF_CORE, {})
core_config = CORE_CONFIG_SCHEMA(core_config)
result[CONF_CORE] = core_config
except vol.Invalid as err:
result.add_error(err, CONF_CORE, core_config)
core_config = {}
# Merge packages
await merge_packages_config(
hass, config, core_config.get(CONF_PACKAGES, {}), _pack_error
)
core_config.pop(CONF_PACKAGES, None)
# Filter out repeating config sections
components = set(key.split(" ")[0] for key in config.keys())
# Process and validate config
for domain in components:
try:
integration = await async_get_integration_with_requirements(hass, domain)
except (RequirementsNotFound, loader.IntegrationNotFound) as ex:
result.add_error(f"Component error: {domain} - {ex}")
continue
try:
component = integration.get_component()
except ImportError as ex:
result.add_error(f"Component error: {domain} - {ex}")
continue
config_schema = getattr(component, "CONFIG_SCHEMA", None)
if config_schema is not None:
try:
config = config_schema(config)
result[domain] = config[domain]
except vol.Invalid as ex:
_comp_error(ex, domain, config)
continue
component_platform_schema = getattr(
component,
"PLATFORM_SCHEMA_BASE",
getattr(component, "PLATFORM_SCHEMA", None),
)
if component_platform_schema is None:
continue
platforms = []
for p_name, p_config in config_per_platform(config, domain):
# Validate component specific platform schema
try:
p_validated = component_platform_schema(p_config)
except vol.Invalid as ex:
_comp_error(ex, domain, config)
continue
# Not all platform components follow same pattern for platforms
# So if p_name is None we are not going to validate platform
# (the automation component is one of them)
if p_name is None:
platforms.append(p_validated)
continue
try:
p_integration = await async_get_integration_with_requirements(
hass, p_name
)
platform = p_integration.get_platform(domain)
except (
loader.IntegrationNotFound,
RequirementsNotFound,
ImportError,
) as ex:
result.add_error(f"Platform error {domain}.{p_name} - {ex}")
continue
# Validate platform specific schema
platform_schema = getattr(platform, "PLATFORM_SCHEMA", None)
if platform_schema is not None:
try:
p_validated = platform_schema(p_validated)
except vol.Invalid as ex:
_comp_error(ex, f"{domain}.{p_name}", p_validated)
continue
platforms.append(p_validated)
# Remove config for current component and add validated config back in.
for filter_comp in extract_domain_configs(config, domain):
del config[filter_comp]
result[domain] = platforms
return result
| 34.373684 | 86 | 0.64569 |
de50bb1dc761deadeb4f511feaa679e125e49080 | 991 | py | Python | src/transformers/meetup.py | muxer-dev/event-pipeline | 06ba7dd05a9ece3b7dcae5bc40599b3c4c371ccb | [
"MIT"
] | 1 | 2019-03-14T20:18:56.000Z | 2019-03-14T20:18:56.000Z | src/transformers/meetup.py | muxer-dev/event-pipeline | 06ba7dd05a9ece3b7dcae5bc40599b3c4c371ccb | [
"MIT"
] | 29 | 2020-02-04T15:02:07.000Z | 2020-03-18T12:02:30.000Z | src/transformers/meetup.py | muxer-dev/event-pipeline | 06ba7dd05a9ece3b7dcae5bc40599b3c4c371ccb | [
"MIT"
] | null | null | null | import datetime
def transform(events, location):
transformed_events = []
for event in events:
start = datetime.datetime.fromtimestamp(int(event["time"] / 1000)).strftime(
"%Y-%m-%d %H:%M:%SZ"
)
# TODO update to end time
end = datetime.datetime.fromtimestamp(int(event["time"] / 1000)).strftime(
"%Y-%m-%d %H:%M:%SZ"
)
# TODO calculate duration fo event
duration = 10000
transformed_events.append(
{
"name": event["name"],
"description": event["description"],
"url": event["event_url"],
"start": start,
"end": end,
"duration": duration,
"topics": [],
"entry": ["free"],
"category": event["group"]["name"],
"source": "meetup",
"location": location,
}
)
return transformed_events
| 27.527778 | 84 | 0.468214 |
873bf7b6703ba4dbb10e4e683cc4941deb5aa003 | 602 | py | Python | test/test_spider.py | coco369/fastspider | 464ba47176c005ed97005a79c5c4eee0bf0740b6 | [
"MIT"
] | 6 | 2021-08-09T01:35:44.000Z | 2022-02-15T08:14:29.000Z | test/test_spider.py | coco369/fastspider | 464ba47176c005ed97005a79c5c4eee0bf0740b6 | [
"MIT"
] | null | null | null | test/test_spider.py | coco369/fastspider | 464ba47176c005ed97005a79c5c4eee0bf0740b6 | [
"MIT"
] | 4 | 2021-08-13T06:41:13.000Z | 2021-12-07T15:53:56.000Z | # encoding=utf-8
import time
import fastspider
class TestSpider(fastspider.LightSpider):
douban_url = "https://movie.douban.com/top250"
def start_requests(self):
yield fastspider.Request(url=self.douban_url)
# yield fastspider.Request(url=self.douban_url)
def parser(self, request, response):
movies = response.xpath('//*[@id="content"]/div/div[1]/ol/li')
for movie in movies:
href = movie.xpath('./div/div[2]/div[1]/a/@href')[0].get()
title = movie.xpath('./div/div[2]/div[1]/a/span[1]/text()')[0].get()
print(href, title)
if __name__ == "__main__":
TestSpider(3).start()
| 24.08 | 71 | 0.686047 |
fa9670d2d093ebe558aaf467ecc3d30e78091c2f | 2,373 | py | Python | test/solutions/test_checkout.py | DPNT-Sourcecode/CHK-xeio01 | 9a6b9f852e51e371a603045ef8753012056b7570 | [
"Apache-2.0"
] | null | null | null | test/solutions/test_checkout.py | DPNT-Sourcecode/CHK-xeio01 | 9a6b9f852e51e371a603045ef8753012056b7570 | [
"Apache-2.0"
] | null | null | null | test/solutions/test_checkout.py | DPNT-Sourcecode/CHK-xeio01 | 9a6b9f852e51e371a603045ef8753012056b7570 | [
"Apache-2.0"
] | null | null | null | import unittest
from lib.solutions.checkout import checkout
class TestCheckout(unittest.TestCase):
def test_illegal_input_returns_minus_one(self):
self.assertEqual(
-1,
checkout('1')
)
def test_promo_applied_to_a(self):
self.assertEqual(
250,
checkout('AAAAAA')
)
def test_promo_applied_to_b(self):
self.assertEqual(
90,
checkout('BBBB')
)
def test_promo_applied_to_a_and_b(self):
self.assertEqual(
175,
checkout('AAABB')
)
def test_promo_applied_to_e_and_b(self):
self.assertEqual(
110,
checkout('EEBB')
)
def test_multiple_promo_applied_to_a(self):
self.assertEqual(
330,
checkout('AAAAAAAA')
)
def test_multiple_promo_applied_to_b(self):
self.assertEqual(
125,
checkout('EEBBB')
)
def test_promo_applied_to_F(self):
self.assertEqual(
20,
checkout('FFF')
)
def test_promo_applied_to_f_sanely(self):
self.assertEqual(
30,
checkout('FFFF')
)
# should change the tests to instead pass in a test config with loads
# of items and promos of the expected format
# but maybe later
def test_valid_input_returns_correct_amount(self):
self.assertEqual(
130,
checkout('ABCDD')
)
def test_t_returns_correct_amount(self):
self.assertEqual(
20,
checkout('T')
)
def test_x_returns_correct_amount(self):
self.assertEqual(
17,
checkout('X')
)
def test_y_returns_correct_amount(self):
self.assertEqual(
20,
checkout('Y')
)
def test_z_returns_correct_amount(self):
self.assertEqual(
21,
checkout('Z')
)
def test_weird_promo_returns_correct_amount(self):
self.assertEqual(
45,
checkout('STX')
)
def test_weird_promo_returns_correct_amount_two(self):
self.assertEqual(
90,
checkout('STXSTX')
)
if __name__ == '__main__':
unittest.main()
| 21.770642 | 73 | 0.542351 |
0e83289648c918844fd7c741d08479f4835f30e5 | 12,352 | py | Python | start-training.py | staceysv/safelife | 9577a8f5bece55a71ea9d069f24e6fc2c0b4fd8d | [
"Apache-2.0"
] | null | null | null | start-training.py | staceysv/safelife | 9577a8f5bece55a71ea9d069f24e6fc2c0b4fd8d | [
"Apache-2.0"
] | null | null | null | start-training.py | staceysv/safelife | 9577a8f5bece55a71ea9d069f24e6fc2c0b4fd8d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
Main entry point for starting a training job.
"""
import argparse
import logging
import os
import platform
import shutil
import subprocess
import sys
import time
import json
import torch
import numpy as np
logger = logging.getLogger('training')
safety_dir = os.path.realpath(os.path.dirname(__file__))
def parse_args(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description="""
Run agent training using proximal policy optimization.
This will set up the data/log directories, optionally install any needed
dependencies, start tensorboard, configure loggers, and start the actual
training loop. If the data directory already exists, it will prompt for
whether the existing data should be overwritten or appended. The latter
allows for training to be restarted if interrupted.
""")
parser.add_argument('data_dir', nargs='?',
help="the directory in which to store this run's data")
parser.add_argument('--run-type', choices=('train', 'benchmark', 'inspect'),
default='train',
help="What to do once the algorithm and environments have been loaded. "
"If 'train', train the model. If 'benchmark', run the model on testing "
"environments. If 'inspect', load an ipython prompt for interactive "
"debugging.")
parser.add_argument('--algo', choices=('ppo', 'dqn'), default='ppo')
parser.add_argument('-e', '--env-type', default='append-spawn')
parser.add_argument('-s', '--steps', type=float, default=6e6,
help='Length of training in steps (default: 6e6).')
parser.add_argument('--seed', default=None, type=int)
parser.add_argument('--deterministic', action="store_true",
help="If set, uses deterministic cudnn routines. This may slow "
"down training, but it should make the results reproducable.")
parser.add_argument('--port', type=int,
help="Port on which to run tensorboard.")
parser.add_argument('-w', '--wandb', action='store_true',
help='Use wandb for analytics.')
parser.add_argument('--project', default="stacey/saferlife",
help='[Entity and] project for wandb. '
'Eg: "safelife/multiagent" or "multiagent"')
parser.add_argument('--shutdown', action="store_true",
help="Shut down the system when the job is complete"
"(helpful for running remotely).")
parser.add_argument('--ensure-gpu', action='store_true',
help="Check that the machine we're running on has CUDA support")
parser.add_argument('-x', '--extra-params', default=None,
help="Extra config values/hyperparameters. Should be loadable as JSON.")
args = parser.parse_args(argv)
if args.extra_params:
try:
args.extra_params = json.loads(args.extra_params)
assert isinstance(args.extra_params, dict)
except (json.JSONDecodeError, AssertionError):
print(f"'{args.extra_params}' is not a valid JSON dictionary. "
"Make sure to escape your quotes!")
exit(1)
assert args.wandb or args.data_dir or args.run_type == 'inspect', (
"Either a data directory must be set or the wandb flag must be set. "
"If wandb is set but there is no data directory, then a run name will be "
"picked automatically.")
if args.ensure_gpu:
assert torch.cuda.is_available(), "CUDA support requested but not available!"
return args
def build_c_extensions():
subprocess.run([
"python3", os.path.join(safety_dir, "setup.py"),
"build_ext", "--inplace"
])
def setup_config_and_wandb(args):
"""
Setup wandb, the logging directory, and update the global config object.
There is a two-way sync between wandb and the parameter config.
Any parameters that are set in args are passed to wandb, and any parameters
that are set in wandb (if, e.g., this run is part of a parameter sweep) are
loaded back into the config.
Parameters
----------
args : Namespace
Values returned from parse_args
Returns
-------
config : GlobalConfig
The global configuration object. This is a singleton and can be
imported directly, but it's helpful in this file to pass it around and
so that function inputs are made explicit.
job_name : str
data_dir : str
The directory where data gets written.
"""
from training import logging_setup
from training.global_config import config
# Check to see if the data directory is already in use
# If it is, prompt the user if they want to overwrite it.
if args.data_dir is not None:
data_dir = os.path.realpath(args.data_dir)
job_name = os.path.basename(data_dir)
if os.path.exists(data_dir) and args.run_type == 'train':
print("The directory '%s' already exists." % data_dir)
print("Would you like to overwrite the old data, append to it, or abort?")
response = 'overwrite' if job_name.startswith('tmp') else None
while response not in ('overwrite', 'append', 'abort'):
response = input("(overwrite / append / abort) > ")
if response == 'overwrite':
print("Overwriting old data.")
shutil.rmtree(data_dir)
elif response == 'abort':
print("Aborting.")
exit()
else:
job_name = data_dir = None
# Remove some args from the config, just to make things neater.
# These args don't actually affect the run output.
base_config = {
k: v for k, v in vars(args).items() if k not in
['port', 'wandb', 'ensure_gpu', 'project', 'shutdown', 'extra_params']
}
# tag any hyperparams from the commandline
if args.extra_params is not None:
config.add_hyperparams(args.extra_params)
if args.wandb:
import wandb
if wandb.login():
run_notes = os.path.join(safety_dir, 'run-notes.txt')
if os.path.exists(run_notes):
run_notes = open(run_notes).read()
else:
run_notes = None
if args.project and '/' in args.project:
entity, project = args.project.split("/", 1)
elif args.project:
entity, project = None, args.project
else:
entity = project = None # use values from wandb/settings
wandb.init(
name=job_name, notes=run_notes, project=project, entity=entity,
config=base_config)
# Note that wandb config can contain different and/or new keys that
# aren't in the command-line arguments. This is especially true for
# wandb sweeps.
config.update(wandb.config._items)
# Save the environment type to the wandb summary data.
# This allows env_type show up in the benchmark table.
wandb.run.summary['env_type'] = config['env_type']
if job_name is None:
job_name = wandb.run.name
data_dir = os.path.join(
safety_dir, 'data', time.strftime("%Y-%m-%d-") + wandb.run.id)
logging_setup.save_code_to_wandb()
else:
config.update(base_config)
if data_dir is not None:
os.makedirs(data_dir, exist_ok=True)
logging_setup.setup_logging(
data_dir, debug=(config['run_type'] == 'inspect'))
logger.info("COMMAND ARGUMENTS: %s", ' '.join(sys.argv))
logger.info("TRAINING RUN: %s", job_name)
logger.info("ON HOST: %s", platform.node())
return config, job_name, data_dir
def set_global_seed(config):
from safelife.random import set_rng
# Make sure the seed can be represented by floating point exactly.
# This is just because we want to pass it over the web, and javascript
# doesn't have 64 bit integers.
if config.get('seed') is None:
config['seed'] = np.random.randint(2**53)
seed = np.random.SeedSequence(config['seed'])
logger.info("SETTING GLOBAL SEED: %i", seed.entropy)
set_rng(np.random.default_rng(seed))
torch.manual_seed(seed.entropy & (2**31 - 1))
if config['deterministic']:
# Note that this may slow down performance
# See https://pytorch.org/docs/stable/notes/randomness.html#cudnn
torch.backends.cudnn.deterministic = True
def launch_tensorboard(job_name, data_dir, port):
"""
Launch tensorboard as a subprocess.
Note that this process must be killed before the script exits.
"""
if port and data_dir is not None:
return subprocess.Popen([
"tensorboard", "--logdir_spec",
job_name + ':' + data_dir, '--port', str(port)])
else:
return None
algo_args = {}
def launch_training(config, data_dir):
from training import logging_setup
from training import models
from training.env_factory import build_environments
envs = build_environments(config, data_dir)
obs_shape = envs['training'][0].observation_space.shape
global algo_args
algo_args = {
'training_envs': envs['training'],
'testing_envs': envs.get('validation'),
'data_logger': logging_setup.setup_data_logger(data_dir, 'training'),
}
if config['algo'] == 'ppo':
from training.ppo import PPO as algo_cls
algo_args['model'] = models.SafeLifePolicyNetwork(obs_shape)
elif config['algo'] == 'dqn':
from training.dqn import DQN as algo_cls
algo_args['training_model'] = models.SafeLifeQNetwork(obs_shape)
algo_args['target_model'] = models.SafeLifeQNetwork(obs_shape)
else:
logger.error("Unexpected algorithm type '%s'", config['algo'])
raise ValueError("unexpected algorithm type")
algo = algo_cls(**algo_args)
if config.get('_wandb') is not None:
# Before we start running things, save the config object back to wandb.
import wandb
config2 = config.copy()
config2.pop('_wandb', None)
wandb.config.update(config2, allow_val_change=True)
print("")
logger.info("Hyperparameters: %s", config)
config.check_for_unused_hyperparams()
print("")
if config['run_type'] == "train":
algo.train(int(config['steps']))
if 'benchmark' in envs:
algo.run_episodes(envs['benchmark'], num_episodes=1000)
elif config['run_type'] == "benchmark" and "benchmark" in envs:
algo.run_episodes(envs['benchmark'], num_episodes=1000)
elif config['run_type'] == "inspect":
from IPython import embed
print('')
embed()
def cleanup(config, data_dir, tb_proc, shutdown):
from safelife.safelife_logger import summarize_run
if config.get('_wandb') is not None:
import wandb
wandb_run = wandb.run
else:
wandb_run = None
try:
if config['run_type'] in ['train', 'benchmark']:
summarize_run(data_dir, wandb_run, algo_args["data_logger"])
except: # noqa
import traceback
logger.warn("Exception during summarization:\n", traceback.format_exc())
if wandb_run is not None:
wandb_run.finish()
if tb_proc is not None:
tb_proc.kill()
if shutdown:
# Shutdown in 3 minutes.
# Enough time to recover if it crashed at the start.
subprocess.run("sudo shutdown +3", shell=True)
logger.critical("Shutdown commenced, but keeping ssh available...")
subprocess.run("sudo rm -f /run/nologin", shell=True)
def main():
# We'll be importing safelife modules, so we need to make
# sure that the safelife package is on the import path
sys.path.insert(1, safety_dir)
args = parse_args()
build_c_extensions()
config, job_name, data_dir = setup_config_and_wandb(args)
set_global_seed(config)
tb_proc = launch_tensorboard(job_name, data_dir, args.port)
try:
launch_training(config, data_dir)
except KeyboardInterrupt:
logger.critical("Keyboard Interrupt. Ending early.\n")
except Exception:
logger.exception("Ran into an unexpected error. Aborting training.\n")
finally:
cleanup(config, data_dir, tb_proc, args.shutdown)
if __name__ == "__main__":
main()
| 36.116959 | 86 | 0.643701 |
ffed86bf13bda84ac5e2586ca3ec7f812d3be6b1 | 1,183 | py | Python | setup.py | torms3/DataTools | 8144a9485ca69dc2208bbcc20f59132def977b7a | [
"MIT"
] | null | null | null | setup.py | torms3/DataTools | 8144a9485ca69dc2208bbcc20f59132def977b7a | [
"MIT"
] | null | null | null | setup.py | torms3/DataTools | 8144a9485ca69dc2208bbcc20f59132def977b7a | [
"MIT"
] | 2 | 2018-05-29T18:27:18.000Z | 2018-06-01T13:36:24.000Z | from Cython.Build import cythonize
# from distutils.core import Extension
from distutils.sysconfig import get_python_inc
from setuptools import setup, Extension
import numpy
import os
include_dirs = [
'./datatools',
'./datatools/backend',
os.path.dirname(get_python_inc()),
numpy.get_include(),
]
extensions = [
Extension(
'datatools._frontend',
sources = ['datatools/*.pyx', 'datatools/c_frontend.cpp'],
include_dirs=include_dirs,
language='c++',
extra_link_args=['-std=c++11'],
extra_compile_args=['-std=c++11', '-w']
),
]
setup(
name='datatools',
version='0.0.1',
description='A C++ extension to DataProvider.',
url='https://github.com/torms3/DataTools',
author='Kisuk Lee',
author_email='kisuklee@mit.edu',
license='MIT',
requires=['cython','numpy'],
packages=['datatools','datatools.cremi'],
package_data={
'': [
'datatools/*.h',
'datatools/*.cpp',
'datatools/*.pyx',
'datatools/backend/*.hpp',
]
},
include_package_data=True,
zip_safe=False,
ext_modules=cythonize(extensions)
)
| 22.75 | 66 | 0.610313 |
665e47cb7fcf43393dde27abaeda050b080b1cb8 | 11,614 | py | Python | lbry/scripts/wallet_server_monitor.py | JupyterJones/lbry-sdk | be89436fa869e1b4b9f05c3faa5c126ebcfe6e57 | [
"MIT"
] | null | null | null | lbry/scripts/wallet_server_monitor.py | JupyterJones/lbry-sdk | be89436fa869e1b4b9f05c3faa5c126ebcfe6e57 | [
"MIT"
] | null | null | null | lbry/scripts/wallet_server_monitor.py | JupyterJones/lbry-sdk | be89436fa869e1b4b9f05c3faa5c126ebcfe6e57 | [
"MIT"
] | null | null | null | import sys
import json
import random
import asyncio
import argparse
import traceback
from time import time
from datetime import datetime
try:
import aiohttp
import psycopg2
import slack
except ImportError:
print(f"To run {sys.argv[0]} you need to install aiohttp, psycopg2 and slackclient:")
print(f"")
print(f" $ pip install aiohttp psycopg2 slackclient")
print("")
sys.exit(1)
if not sys.version_info >= (3, 7):
print("Please use Python 3.7 or higher, this script expects that dictionary keys preserve order.")
sys.exit(1)
async def handle_slow_query(cursor, server, command, queries):
for query in queries:
cursor.execute("""
INSERT INTO wallet_server_slow_queries (server, command, query, event_time) VALUES (%s,%s,%s,%s);
""", (server, command, query, datetime.now()))
async def handle_analytics_event(cursor, event, server):
cursor.execute("""
INSERT INTO wallet_server_stats (server, sessions, event_time) VALUES (%s,%s,%s);
""", (server, event['status']['sessions'], datetime.now()))
for command, stats in event["api"].items():
data = {
'server': server,
'command': command,
'event_time': datetime.now()
}
for key, value in stats.items():
if key.endswith("_queries"):
if key == "interrupted_queries":
await handle_slow_query(cursor, server, command, value)
continue
if isinstance(value, list):
data.update({
key + '_avg': value[0],
key + '_min': value[1],
key + '_five': value[2],
key + '_twenty_five': value[3],
key + '_fifty': value[4],
key + '_seventy_five': value[5],
key + '_ninety_five': value[6],
key + '_max': value[7],
})
else:
data[key] = value
cursor.execute(f"""
INSERT INTO wallet_server_command_stats ({','.join(data)})
VALUES ({','.join('%s' for _ in data)});
""", list(data.values()))
SLACKCLIENT = None
async def boris_says(what_boris_says):
if SLACKCLIENT:
await SLACKCLIENT.chat_postMessage(
username="boris the wallet monitor",
icon_emoji=":boris:",
channel='#tech-sdk',
text=what_boris_says
)
else:
print(what_boris_says)
async def monitor(db, server):
c = db.cursor()
delay = 30
height_changed = None, time()
height_change_reported = False
first_attempt = True
while True:
try:
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(10)) as session:
try:
ws = await session.ws_connect(server)
except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
if first_attempt:
print(f"failed connecting to {server}")
await boris_says(random.choice([
f"{server} is not responding, probably dead, will not connect again.",
]))
return
raise
if first_attempt:
await boris_says(f"{server} is online")
else:
await boris_says(f"{server} is back online")
delay = 30
first_attempt = False
print(f"connected to {server}")
async for msg in ws:
event = json.loads(msg.data)
height = event['status']['height']
height_change_time = int(time()-height_changed[1])
if height_changed[0] != height:
height_changed = (height, time())
if height_change_reported:
await boris_says(
f"Server {server} received new block after {height_change_time / 60:.1f} minutes.",
)
height_change_reported = False
elif height_change_time > 10*60:
if not height_change_reported or height_change_time % (2*60) == 0:
await boris_says(
f"It's been {height_change_time/60:.1f} minutes since {server} received a new block.",
)
height_change_reported = True
await handle_analytics_event(c, event, server)
db.commit()
except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
await boris_says(random.choice([
f"<!channel> Guys, we have a problem! Nobody home at {server}. Will check on it again in {delay} seconds.",
f"<!channel> Something wrong with {server}. I think dead. Will poke it again in {delay} seconds.",
f"<!channel> Don't hear anything from {server}, maybe dead. Will try it again in {delay} seconds.",
]))
await asyncio.sleep(delay)
delay += 30
async def main(dsn, servers):
db = ensure_database(dsn)
await boris_says(random.choice([
"No fear, Boris is here! I will monitor the servers now and will try not to fall asleep again.",
"Comrad the Cat and Boris are here now, monitoring wallet servers.",
]))
await asyncio.gather(*(
asyncio.create_task(monitor(db, server))
for server in servers
))
def ensure_database(dsn):
db = psycopg2.connect(**dsn)
c = db.cursor()
c.execute("SELECT to_regclass('wallet_server_stats');")
if c.fetchone()[0] is None:
print("creating table 'wallet_server_stats'...")
c.execute("""
CREATE TABLE wallet_server_stats (
server text,
sessions integer,
event_time timestamp
);
""")
c.execute("SELECT to_regclass('wallet_server_slow_queries');")
if c.fetchone()[0] is None:
print("creating table 'wallet_server_slow_queries'...")
c.execute("""
CREATE TABLE wallet_server_slow_queries (
server text,
command text,
query text,
event_time timestamp
);
""")
c.execute("SELECT to_regclass('wallet_server_command_stats');")
if c.fetchone()[0] is None:
print("creating table 'wallet_server_command_stats'...")
c.execute("""
CREATE TABLE wallet_server_command_stats (
server text,
command text,
event_time timestamp,
-- total requests received during event window
receive_count integer,
-- sum of these is total responses made
cache_response_count integer,
query_response_count integer,
intrp_response_count integer,
error_response_count integer,
-- millisecond timings for non-cache responses (response_*, interrupt_*, error_*)
response_avg float,
response_min float,
response_five float,
response_twenty_five float,
response_fifty float,
response_seventy_five float,
response_ninety_five float,
response_max float,
interrupt_avg float,
interrupt_min float,
interrupt_five float,
interrupt_twenty_five float,
interrupt_fifty float,
interrupt_seventy_five float,
interrupt_ninety_five float,
interrupt_max float,
error_avg float,
error_min float,
error_five float,
error_twenty_five float,
error_fifty float,
error_seventy_five float,
error_ninety_five float,
error_max float,
-- response, interrupt and error each also report the python, wait and sql stats
python_avg float,
python_min float,
python_five float,
python_twenty_five float,
python_fifty float,
python_seventy_five float,
python_ninety_five float,
python_max float,
wait_avg float,
wait_min float,
wait_five float,
wait_twenty_five float,
wait_fifty float,
wait_seventy_five float,
wait_ninety_five float,
wait_max float,
sql_avg float,
sql_min float,
sql_five float,
sql_twenty_five float,
sql_fifty float,
sql_seventy_five float,
sql_ninety_five float,
sql_max float,
-- extended timings for individual sql executions
individual_sql_avg float,
individual_sql_min float,
individual_sql_five float,
individual_sql_twenty_five float,
individual_sql_fifty float,
individual_sql_seventy_five float,
individual_sql_ninety_five float,
individual_sql_max float,
individual_sql_count integer
);
""")
db.commit()
return db
def get_dsn(args):
dsn = {}
for attr in ('dbname', 'user', 'password', 'host', 'port'):
value = getattr(args, f'pg_{attr}')
if value:
dsn[attr] = value
return dsn
def get_servers(args):
if '..' in args.server_range:
start, end = args.server_range.split('..')
else:
start = end = args.server_range
return [
args.server_url.format(i)
for i in range(int(start), int(end)+1)
]
def get_slack_client(args):
if args.slack_token:
return slack.WebClient(token=args.slack_token, run_async=True)
def get_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--pg-dbname", default="analytics", help="PostgreSQL database name")
parser.add_argument("--pg-user", help="PostgreSQL username")
parser.add_argument("--pg-password", help="PostgreSQL password")
parser.add_argument("--pg-host", default="localhost", help="PostgreSQL host")
parser.add_argument("--pg-port", default="5432", help="PostgreSQL port")
parser.add_argument("--server-url", default="http://spv{}.lbry.com:50005", help="URL with '{}' placeholder")
parser.add_argument("--server-range", default="1..5", help="Range of numbers or single number to use in URL placeholder")
parser.add_argument("--slack-token")
return parser.parse_args()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
args = get_args()
SLACKCLIENT = get_slack_client(args)
try:
loop.run_until_complete(main(get_dsn(args), get_servers(args)))
except KeyboardInterrupt as e:
pass
except Exception as e:
loop.run_until_complete(boris_says("<!channel> I crashed with the following exception:"))
loop.run_until_complete(boris_says(traceback.format_exc()))
finally:
loop.run_until_complete(
boris_says(random.choice([
"Wallet servers will have to watch themselves, I'm leaving now.",
"I'm going to go take a nap, hopefully nothing blows up while I'm gone.",
"Babushka is calling, I'll be back later, someone else watch the servers while I'm gone.",
]))
)
| 34.981928 | 125 | 0.572585 |
efff51da9c8609287140cfb126779e9e0c1e4e23 | 723 | py | Python | StackApp/env/lib/python2.7/site-packages/pylint/test/input/func_first_arg.py | jonathanmusila/StackOverflow-Lite | a9a03f129592c6f741eb4d1e608ca2db0e40bf11 | [
"MIT"
] | 35 | 2016-09-22T22:53:14.000Z | 2020-02-13T15:12:21.000Z | virtual/lib/python3.6/site-packages/pylint/test/input/func_first_arg.py | evantoh/patient-management-system | 6637eb1344775633759165260ed99843581c0e72 | [
"Unlicense"
] | 32 | 2018-05-01T05:24:43.000Z | 2022-03-11T23:20:39.000Z | virtual/lib/python3.6/site-packages/pylint/test/input/func_first_arg.py | evantoh/patient-management-system | 6637eb1344775633759165260ed99843581c0e72 | [
"Unlicense"
] | 88 | 2016-11-27T02:16:11.000Z | 2020-02-28T05:10:26.000Z | # pylint: disable=C0111, W0232
"""check for methods first arguments
"""
__revision__ = 0
class Obj(object):
# C0202, classmethod
def __new__(something):
pass
# C0202, classmethod
def class1(cls):
pass
class1 = classmethod(class1)
def class2(other):
pass
class2 = classmethod(class2)
class Meta(type):
# C0204, metaclass __new__
def __new__(other, name, bases, dct):
pass
# C0203, metaclass method
def method1(cls):
pass
def method2(other):
pass
# C0205, metaclass classmethod
def class1(mcs):
pass
class1 = classmethod(class1)
def class2(other):
pass
class2 = classmethod(class2)
| 16.813953 | 41 | 0.608575 |
df5bf95c3bf4348ed2b8a8a2ee595205e45af97c | 9,132 | py | Python | salt/modules/win_pkg.py | mika/salt | 8430482c7177356964c894d161830c94d09f1cab | [
"Apache-2.0"
] | 1 | 2016-09-16T17:12:11.000Z | 2016-09-16T17:12:11.000Z | salt/modules/win_pkg.py | mika/salt | 8430482c7177356964c894d161830c94d09f1cab | [
"Apache-2.0"
] | null | null | null | salt/modules/win_pkg.py | mika/salt | 8430482c7177356964c894d161830c94d09f1cab | [
"Apache-2.0"
] | null | null | null | '''
A module to manage software on Windows
'''
try:
import pythoncom
import win32com.client
import win32api
import win32con
except:
pass
def __virtual__():
'''
Set the virtual pkg module if the os is Windows
'''
return 'pkg' if __grains__['os'] == 'Windows' else False
def _list_removed(old, new):
'''
List the packages which have been removed between the two package objects
'''
pkgs = []
for pkg in old:
if pkg not in new:
pkgs.append(pkg)
return pkgs
def available_version(name):
'''
The available version of the package in the repository
CLI Example::
salt '*' pkg.available_version <package name>
'''
return 'Not implemented on Windows yet'
def upgrade_available(name):
'''
Check whether or not an upgrade is available for a given package
CLI Example::
salt '*' pkg.upgrade_available <package name>
'''
return 'Not implemented on Windows yet'
def list_upgrades():
'''
List all available package upgrades on this system
CLI Example::
salt '*' pkg.list_upgrades
'''
return 'Not implemented on Windows yet'
def version(name):
'''
Returns a version if the package is installed, else returns an empty string
CLI Example::
salt '*' pkg.version <package name>
'''
pkgs = list_pkgs()
if name in pkgs:
return pkgs[name]
else:
return ''
def list_pkgs(*args):
'''
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example::
salt '*' pkg.list_pkgs
'''
pythoncom.CoInitialize()
if len(args) == 0:
pkgs = dict(
list(_get_reg_software().items()) +
list(_get_msi_software().items()))
else:
# get package version for each package in *args
pkgs = {}
for arg in args:
pkgs.update(_search_software(arg))
pythoncom.CoUninitialize()
return pkgs
def _search_software(target):
'''
This searches the msi product databases for name matches
of the list of target products, it will return a dict with
values added to the list passed in
'''
search_results = {}
software = dict(
list(_get_reg_software().items()) +
list(_get_msi_software().items()))
for key, value in software.items():
if key is not None:
if target.lower() in key.lower():
search_results[key] = value
return search_results
def _get_msi_software():
'''
This searches the msi product databases and returns a dict keyed
on the product name and all the product properties in another dict
'''
win32_products = {}
this_computer = "."
wmi_service = win32com.client.Dispatch("WbemScripting.SWbemLocator")
swbem_services = wmi_service.ConnectServer(this_computer,"root\cimv2")
products = swbem_services.ExecQuery("Select * from Win32_Product")
for product in products:
prd_name = product.Name.encode('ascii', 'ignore')
prd_ver = product.Version.encode('ascii', 'ignore')
win32_products[prd_name] = prd_ver
return win32_products
def _get_reg_software():
'''
This searches the uninstall keys in the registry to find
a match in the sub keys, it will return a dict with the
display name as the key and the version as the value
'''
reg_software = {}
#This is a list of default OS reg entries that don't seem to be installed
#software and no version information exists on any of these items
ignore_list = ['AddressBook',
'Connection Manager',
'DirectDrawEx',
'Fontcore',
'IE40',
'IE4Data',
'IE5BAKEX',
'IEData',
'MobileOptionPack',
'SchedulingAgent',
'WIC'
]
#attempt to corral the wild west of the multiple ways to install
#software in windows
reg_entries = dict(list(_get_user_keys().items()) +
list(_get_machine_keys().items()))
for reg_hive, reg_keys in reg_entries.items():
for reg_key in reg_keys:
try:
reg_handle = win32api.RegOpenKeyEx(
reg_hive,
reg_key,
0,
win32con.KEY_READ)
except:
pass
#Unsinstall key may not exist for all users
for name, num, blank, time in win32api.RegEnumKeyEx(reg_handle):
if name[0] == '{':
break
prd_uninst_key = "\\".join([reg_key, name])
#These reg values aren't guaranteed to exist
prd_name = _get_reg_value(
reg_hive,
prd_uninst_key,
"DisplayName")
prd_ver = _get_reg_value(
reg_hive,
prd_uninst_key,
"DisplayVersion")
if not name in ignore_list:
if not prd_name == 'Not Found':
reg_software[prd_name] = prd_ver
return reg_software
def _get_machine_keys():
'''
This will return the hive 'const' value and some registry keys where
installed software information has been known to exist for the
HKEY_LOCAL_MACHINE hive
'''
machine_hive_and_keys = {}
machine_keys = [
"Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall",
"Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall"
]
machine_hive = win32con.HKEY_LOCAL_MACHINE
machine_hive_and_keys[machine_hive] = machine_keys
return machine_hive_and_keys
def _get_user_keys():
'''
This will return the hive 'const' value and some registry keys where
installed software information has been known to exist for the
HKEY_USERS hive
'''
user_hive_and_keys = {}
user_keys = []
users_hive = win32con.HKEY_USERS
#skip some built in and default users since software information in these
#keys is limited
skip_users = ['.DEFAULT',
'S-1-5-18',
'S-1-5-19',
'S-1-5-20']
sw_uninst_key = "Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall"
reg_handle = win32api.RegOpenKeyEx(
users_hive,
'',
0,
win32con.KEY_READ)
for name, num, blank, time in win32api.RegEnumKeyEx(reg_handle):
#this is some identical key of a sid that contains some software names
#but no detailed information about the software installed for that user
if '_Classes' in name:
break
if name not in skip_users:
usr_sw_uninst_key = "\\".join([name, sw_uninst_key])
user_keys.append(usr_sw_uninst_key)
user_hive_and_keys[users_hive] = user_keys
return user_hive_and_keys
def _get_reg_value(reg_hive, reg_key, value_name=''):
'''
Read one value from Windows registry.
If 'name' is empty string, reads default value.
'''
value_data = ''
try:
key_handle = win32api.RegOpenKeyEx(
reg_hive, reg_key, 0, win32con.KEY_ALL_ACCESS)
value_data, value_type = win32api.RegQueryValueEx(key_handle,
value_name)
win32api.RegCloseKey(key_handle)
except:
value_data = 'Not Found'
return value_data
def refresh_db():
'''
Just recheck the repository and return a dict::
{'<database name>': Bool}
CLI Example::
salt '*' pkg.refresh_db
'''
return 'Not implemented on Windows yet'
def install(name, refresh=False, **kwargs):
'''
Install the passed package
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>']}
CLI Example::
salt '*' pkg.install <package name>
'''
return 'Not implemented on Windows yet'
def upgrade():
'''
Run a full system upgrade
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>']}
CLI Example::
salt '*' pkg.upgrade
'''
return 'Not implemented on Windows yet'
def remove(name):
'''
Remove a single package
Return a list containing the removed packages.
CLI Example::
salt '*' pkg.remove <package name>
'''
return 'Not implemented on Windows yet'
def purge(name):
'''
Recursively remove a package and all dependencies which were installed
with it
Return a list containing the removed packages.
CLI Example::
salt '*' pkg.purge <package name>
'''
return 'Not implemented on Windows yet'
| 28.448598 | 79 | 0.585633 |
dd4a2666d351424876e9f4ec11510640567a1128 | 21,310 | py | Python | sphinx_js/jsdoc.py | nemetris/sphinx-js | 479b398c82aa40fdd8abe73e4207768d8e2c6fad | [
"MIT"
] | null | null | null | sphinx_js/jsdoc.py | nemetris/sphinx-js | 479b398c82aa40fdd8abe73e4207768d8e2c6fad | [
"MIT"
] | null | null | null | sphinx_js/jsdoc.py | nemetris/sphinx-js | 479b398c82aa40fdd8abe73e4207768d8e2c6fad | [
"MIT"
] | null | null | null | """JavaScript analyzer
Analyzers run jsdoc or typedoc or whatever, squirrel away their output, and
then lazily constitute IR objects as requested.
"""
from codecs import getreader, getwriter
from collections import defaultdict
from errno import ENOENT
from json import load, dumps
from os.path import join, normpath, relpath, splitext, sep
import subprocess
from tempfile import TemporaryFile
from sphinx.errors import SphinxError
from .analyzer_utils import cache_to_file, Command, is_explicitly_rooted
from .ir import (Attribute, Class, Exc, Function, Module,
Namespace, NO_DEFAULT, OPTIONAL, Param, Pathname, Return)
from .parsers import path_and_formal_params, PathVisitor
from .suffix_tree import SuffixTree
class Analyzer:
"""A runner of a langauge-specific static analysis tool and translator of
the results to our IR
"""
def __init__(self, json, base_dir):
"""Index and squirrel away the JSON for later lazy conversion to IR
objects.
:arg app: Sphinx application
:arg json: The loaded JSON output from jsdoc
:arg base_dir: Resolve paths in the JSON relative to this directory.
This must be an absolute pathname.
"""
self._base_dir = base_dir
# 2 doclets are made for classes, and they are largely redundant: one
# for the class itself and another for the constructor. However, the
# constructor one gets merged into the class one and is intentionally
# marked as undocumented, even if it isn't. See
# https://github.com/jsdoc3/jsdoc/issues/1129.
doclets = [doclet for doclet in json if doclet.get('comment') and
not doclet.get('undocumented')]
# Build table for lookup by name, which most directives use:
self._doclets_by_path = SuffixTree()
self._doclets_by_path.add_many((full_path_segments(d, base_dir), d)
for d in doclets)
# Build lookup table for autoclass's :members: option. This will also
# pick up members of functions (inner variables), but it will instantly
# filter almost all of them back out again because they're
# undocumented. We index these by unambiguous full path. Then, when
# looking them up by arbitrary name segment, we disambiguate that first
# by running it through the suffix tree above. Expect trouble due to
# jsdoc's habit of calling things (like ES6 class methods)
# "<anonymous>" in the memberof field, even though they have names.
# This will lead to multiple methods having each other's members. But
# if you don't have same-named inner functions or inner variables that
# are documented, you shouldn't have trouble.
self._doclets_by_class = defaultdict(lambda: [])
self._doclets_by_namespace = defaultdict(lambda: [])
self._doclets_by_module = defaultdict(lambda: [])
self._doclets_by_location = defaultdict(lambda: [])
for d in doclets:
of = d.get('memberof')
folder_segments = system_path_segments(d, base_dir)
if not of:
self._doclets_by_location[tuple(folder_segments)].append(d)
else:
if 'module' in of and '~' not in of:
path_segments = full_path_segments(d, base_dir, longname_field='memberof')
self._doclets_by_module[tuple(path_segments)].append(d)
self._doclets_by_location[tuple(folder_segments)].append(d)
else:
path_segments = full_path_segments(d, base_dir, longname_field='memberof')
self._doclets_by_class[tuple(path_segments)].append(d)
self._doclets_by_namespace[tuple(path_segments)].append(d)
@classmethod
def from_disk(cls, abs_source_paths, app, base_dir):
json = jsdoc_output(getattr(app.config, 'jsdoc_cache', None),
abs_source_paths,
base_dir,
app.confdir,
getattr(app.config, 'jsdoc_config_path', None))
return cls(json, base_dir)
def get_object(self, path_suffix, as_type):
"""Return the IR object with the given path suffix.
If helpful, use the ``as_type`` hint, which identifies which autodoc
directive the user called.
"""
# Design note: Originally, I had planned to eagerly convert all the
# doclets to the IR. But it's hard to tell unambiguously what kind
# each doclet is, at least in the case of jsdoc. If instead we lazily
# convert each doclet as it's referenced by an autodoc directive, we
# can use the hint we previously did: the user saying "this is a
# function (by using autofunction on it)", "this is a class", etc.
# Additionally, being lazy lets us avoid converting unused doclets
# altogether.
try:
doclet_as_whatever = {
'function': self._doclet_as_function,
'class': self._doclet_as_class,
'namespace': self._doclet_as_namespace,
'attribute': self._doclet_as_attribute,
'module': self._doclet_as_module}[as_type]
except KeyError:
raise NotImplementedError('Unknown autodoc directive: auto%s' % as_type)
doclet, full_path = self._doclets_by_path.get_with_path(path_suffix)
return doclet_as_whatever(doclet, full_path)
def _doclet_as_module(self, doclet, full_path):
members = []
for member_doclet in self._doclets_by_module[tuple(full_path)]:
kind = member_doclet.get('kind')
member_full_path = full_path_segments(member_doclet, self._base_dir)
# Typedefs should still fit into function-shaped holes:
if (kind == 'class'):
doclet_as_whatever = self._doclet_as_class
elif (kind == 'namespace'):
doclet_as_whatever = self._doclet_as_namespace
elif (kind == 'function' or kind == 'typedef'):
doclet_as_whatever = self._doclet_as_function
else:
doclet_as_whatever = self._doclet_as_attribute
member = doclet_as_whatever(member_doclet, member_full_path)
members.append(member)
return Module(
authors=doclet.get('author'), # can be a list of authors
version=doclet.get('version'),
license_information=doclet.get('license'),
description=doclet.get('description'),
members=members,
exported_from=None,
**top_level_properties(doclet, full_path))
def _doclet_as_class(self, doclet, full_path):
# This is an instance method so it can get at the base dir.
members = []
for member_doclet in self._doclets_by_class[tuple(full_path)]:
kind = member_doclet.get('kind')
member_full_path = full_path_segments(member_doclet, self._base_dir)
# Typedefs should still fit into function-shaped holes:
doclet_as_whatever = self._doclet_as_function if (kind == 'function' or kind == 'typedef') else self._doclet_as_attribute
member = doclet_as_whatever(member_doclet, member_full_path)
members.append(member)
return Class(
description=doclet.get('classdesc', ''),
supers=[], # Could implement for JS later.
exported_from=None, # Could implement for JS later.
is_abstract=False,
interfaces=[],
# Right now, a class generates several doclets, all but one of
# which are marked as undocumented. In the one that's left, most of
# the fields are about the default constructor:
constructor=self._doclet_as_function(doclet, full_path),
members=members,
**top_level_properties(doclet, full_path))
def _doclet_as_namespace(self, doclet, full_path):
# This is an instance method so it can get at the base dir.
members = []
for member_doclet in self._doclets_by_namespace[tuple(full_path)]:
kind = member_doclet.get('kind')
member_full_path = full_path_segments(member_doclet, self._base_dir)
# Typedefs should still fit into function-shaped holes:
doclet_as_whatever = self._doclet_as_function if (kind == 'function' or kind == 'typedef') else self._doclet_as_attribute
member = doclet_as_whatever(member_doclet, member_full_path)
members.append(member)
return Namespace(
description=doclet.get('description', ''),
members=members,
exported_from=None,
**top_level_properties(doclet, full_path))
@staticmethod
def _doclet_as_function(doclet, full_path):
return Function(
description=description(doclet),
exported_from=None,
is_abstract=False,
is_optional=False,
is_static=is_static(doclet),
is_private=is_private(doclet),
exceptions=exceptions_to_ir(doclet.get('exceptions', [])),
returns=returns_to_ir(doclet.get('returns', [])),
params=params_to_ir(doclet),
**top_level_properties(doclet, full_path))
@staticmethod
def _doclet_as_attribute(doclet, full_path):
return Attribute(
description=description(doclet),
exported_from=None,
is_abstract=False,
is_optional=False,
is_static=False,
is_private=is_private(doclet),
type=get_type(doclet),
**top_level_properties(doclet, full_path)
)
def resolve_name(self, segments):
"""Return a tuple containing a list of path segments that points to exaclty
one location of js modules
:arg segments: path segments, eq. directive content
"""
partial_path = ''.join(segments)
system_paths = self._doclets_by_location.keys()
# TODO lazy resolving is not enough. walk down segments as we do in SuffixTree.get_with_path()
# to find an exact match-up
paths = [path for path in system_paths if partial_path in ''.join(list(path))]
if not paths:
raise PathError(segments)
elif len(paths) > 1:
raise MultiPathError(segments, paths)
# collect ir of js modules in path
modules = []
for module_doclet in self._doclets_by_location[paths[0]]:
kind = module_doclet.get('kind')
module_full_path = full_path_segments(module_doclet, self._base_dir)
# Typedefs should still fit into function-shaped holes:
if (kind == 'module'):
doclet_as_whatever = self._doclet_as_module
else:
continue # ignore everything else at module level
module = doclet_as_whatever(module_doclet, module_full_path)
modules.append(module)
return modules
def is_private(doclet):
return doclet.get('access') == 'private'
def is_static(doclet):
return doclet.get('scope') == 'static'
def full_path_segments(d, base_dir, longname_field='longname'):
"""Return the full, unambiguous list of path segments that points to an
entity described by a doclet.
Example: ``['./', 'dir/', 'dir/', 'file.', 'object.', 'object#', 'object']``
:arg d: The doclet
:arg base_dir: Absolutized value of the root_for_relative_js_paths option
:arg longname_field: The field to look in at the top level of the doclet
for the long name of the object to emit a path to
"""
meta = d['meta']
rel = relpath(meta['path'], base_dir)
rel = '/'.join(rel.split(sep))
rooted_rel = rel if is_explicitly_rooted(rel) else './%s' % rel
# Building up a string and then parsing it back down again is probably
# not the fastest approach, but it means knowledge of path format is in
# one place: the parser.
path = '%s/%s.%s' % (rooted_rel,
splitext(meta['filename'])[0],
d[longname_field])
return PathVisitor().visit(
path_and_formal_params['path'].parse(path))
def system_path_segments(d, base_dir):
"""Return list of path segments that points to a folder with js modules
:arg d: The Doclet
:arg base_dir: Absolutized value of the root_for_relative_js_paths option
"""
ignore_paths = ['.', '..']
meta = d['meta']
rel = relpath(meta['path'], base_dir)
rel = '/'.join(rel.split(sep))
rooted_rel = rel if is_explicitly_rooted(rel) else './%s' % rel
path = rooted_rel
if path in ignore_paths:
return path
return PathVisitor().visit(
path_and_formal_params['path'].parse(path))
@cache_to_file(lambda cache, *args: cache)
def jsdoc_output(cache, abs_source_paths, base_dir, sphinx_conf_dir, config_path=None):
command = Command('jsdoc')
command.add('-X', *abs_source_paths)
if config_path:
command.add('-c', normpath(join(sphinx_conf_dir, config_path)))
# Use a temporary file to handle large output volume. JSDoc defaults to
# utf8-encoded output.
with getwriter('utf-8')(TemporaryFile(mode='w+b')) as temp:
try:
p = subprocess.Popen(command.make(), cwd=sphinx_conf_dir, stdout=temp)
except OSError as exc:
if exc.errno == ENOENT:
raise SphinxError('%s was not found. Install it using "npm install -g jsdoc".' % command.program)
else:
raise
p.wait()
# Once output is finished, move back to beginning of file and load it:
temp.seek(0)
try:
return load(getreader('utf-8')(temp))
except ValueError:
raise SphinxError('jsdoc found no JS files in the directories %s. Make sure js_source_path is set correctly in conf.py. It is also possible (though unlikely) that jsdoc emitted invalid JSON.' % abs_source_paths)
def format_default_according_to_type_hints(value, declared_types, first_type_is_string):
"""Return the default value for a param, formatted as a string
ready to be used in a formal parameter list.
JSDoc is a mess at extracting default values. It can unambiguously
extract only a few simple types from the function signature, and
ambiguity is even more rife when extracting from doclets. So we use
any declared types to resolve the ambiguity.
:arg value: The extracted value, which may be of the right or wrong type
:arg declared_types: A list of types declared in the doclet for
this param. For example ``{string|number}`` would yield ['string',
'number'].
:arg first_type_is_string: Whether the first declared type for this param
is string, which we use as a signal that any string-typed default value
in the JSON is legitimately string-typed rather than some arrow
function or something just encased in quotes because they couldn't
think what else to do. Thus, if you want your ambiguously documented
default like ``@param {string|Array} [foo=[]]`` to be treated as a
string, make sure "string" comes first.
"""
if isinstance(value, str): # JSDoc threw it to us as a string in the JSON.
if declared_types and not first_type_is_string:
# It's a spurious string, like ``() => 5`` or a variable name.
# Let it through verbatim.
return value
else:
# It's a real string.
return dumps(value) # Escape any contained quotes.
else: # It came in as a non-string.
if first_type_is_string:
# It came in as an int, null, or bool, and we have to
# convert it back to a string.
return '"%s"' % (dumps(value),)
else:
# It's fine as the type it is.
return dumps(value)
def description(obj):
return obj.get('description', '')
def get_type(props):
"""Given an arbitrary object from a jsdoc-emitted JSON file, go get the
``type`` property, and return the textual rendering of the type, possibly a
union like ``Foo | Bar``, or None if we don't know the type."""
names = props.get('type', {}).get('names', [])
return '|'.join(names) if names else None
def top_level_properties(doclet, full_path):
"""Extract information common to complex entities, and return it as a dict.
Specifically, pull out the information needed to parametrize TopLevel's
constructor.
"""
return dict(
name=doclet['name'],
path=Pathname(full_path),
filename=doclet['meta']['filename'],
# description's source varies depending on whether the doclet is a
# class, so it gets filled out elsewhere.
line=doclet['meta']['lineno'],
deprecated=doclet.get('deprecated', False),
examples=doclet.get('examples', []),
see_alsos=doclet.get('see', []),
properties=properties_to_ir(doclet.get('properties', [])))
def properties_to_ir(properties):
"""Turn jsdoc-emitted properties JSON into a list of Properties."""
return [Attribute(type=get_type(p),
name=p['name'],
# We can get away with setting null values for these
# because we never use them for anything:
path=Pathname([]),
filename='',
description=description(p),
line=0,
deprecated=False,
examples=[],
see_alsos=[],
properties=[],
exported_from=None,
is_abstract=False,
is_optional=False,
is_static=False,
is_private=False)
for p in properties]
def first_type_is_string(type):
type_names = type.get('names', [])
return type_names and type_names[0] == 'string'
def params_to_ir(doclet):
"""Extract the parameters of a function or class, and return a list of
Param instances.
Formal param fallback philosophy:
1. If the user puts a formal param list in the RST explicitly, use that.
2. Else, if they've @param'd anything, show just those args. This gives the
user full control from the code, so they can use autoclass without
having to manually write each function signature in the RST.
3. Else, extract a formal param list from the meta field, which will lack
descriptions.
Param list:
* Don't show anything without a description or at least a type. It adds
nothing.
Our extraction to IR thus follows our formal param philosophy, and the
renderer caps it off by checking for descriptions and types while building
the param+description list.
:arg doclet: A JSDoc doclet representing a function or class
"""
ret = []
# First, go through the explicitly documented params:
for p in doclet.get('params', []):
type = get_type(p)
default = p.get('defaultvalue', NO_DEFAULT)
formatted_default = (
'dummy' if default is NO_DEFAULT else
format_default_according_to_type_hints(
default,
type,
first_type_is_string(p.get('type', {}))))
is_optional = p.get('optional', False)
ret.append(Param(
name=p['name'],
description=description(p),
has_default=default is not NO_DEFAULT,
default=formatted_default,
is_variadic=p.get('variable', False),
is_optional=is_optional,
optional='dummy' if not is_optional else OPTIONAL,
type=get_type(p)))
# Use params from JS code if there are no documented @params.
if not ret:
ret = [Param(name=p) for p in
doclet['meta']['code'].get('paramnames', [])]
return ret
def exceptions_to_ir(exceptions):
"""Turn jsdoc's JSON-formatted exceptions into a list of Exceptions."""
return [Exc(type=get_type(e),
description=description(e))
for e in exceptions]
def returns_to_ir(returns):
return [Return(type=get_type(r),
description=description(r))
for r in returns]
class PathError(Exception):
def __init__(self, segments):
self.partial_path = ''.join(segments)
self.message = 'No path found'
def __str__(self):
return '{message}: {key}'.format(
message=self.message,
key=self.partial_path
)
class MultiPathError(PathError):
def __init__(self, segments, paths):
self.partial_path = ''.join(segments)
self.paths = ', '.join([''.join(path) for path in paths])
self.message = 'Multiple paths found'
def __str__(self):
return '{message}: {key}: {paths}'.format(
message=self.message,
key=self.partial_path,
paths=self.paths
)
| 41.378641 | 223 | 0.629564 |
0c47d07c176658db36bcda66502601e1a651c17e | 6,243 | py | Python | plumbing/database.py | svetlyak40wt/plumbing | 928e7900d8b55f3ae031c3acfeeb374150df268a | [
"MIT"
] | null | null | null | plumbing/database.py | svetlyak40wt/plumbing | 928e7900d8b55f3ae031c3acfeeb374150df268a | [
"MIT"
] | null | null | null | plumbing/database.py | svetlyak40wt/plumbing | 928e7900d8b55f3ae031c3acfeeb374150df268a | [
"MIT"
] | 1 | 2019-06-10T16:14:24.000Z | 2019-06-10T16:14:24.000Z | # Built-in modules #
import sqlite3
# Internal modules #
from common import get_next_item
from color import Color
from autopaths import FilePath
from cache import property_cached
################################################################################
def convert_to_sql(source, dest, keys, values, sql_field_types=None):
if sql_field_types is None: sql_field_types = {}
with sqlite3.connect(dest) as connection:
# Prepare #
cursor = connection.cursor()
fields = ','.join(['"' + f + '"' + ' ' + sql_field_types.get(f, 'text') for f in keys])
cursor.execute("CREATE table 'data' (%s)" % fields)
question_marks = '(' + ','.join(['?' for x in keys]) + ')'
sql_command = "INSERT into 'data' values " + question_marks
# Main loop #
try:
cursor.executemany(sql_command, values)
except (ValueError, sqlite3.OperationalError, sqlite3.ProgrammingError, sqlite3.InterfaceError) as err:
first_elem = get_next_item(values)
message1 = "The command <%s%s%s> on the database '%s' failed with error:\n %s%s%s"
message1 = message1 % (Color.cyn, sql_command, Color.end, dest, Color.u_red, err, Color.end)
message2 = "\n * %sThe bindings (%i) %s: %s \n * %sYou gave%s: %s"
message2 = message2 % (Color.b_ylw, len(keys), Color.end, keys, Color.b_ylw, Color.end, values)
message3 = "\n * %sFirst element (%i)%s: %s \n"
message3 = message3 % (Color.b_ylw, len(first_elem) if first_elem else 0, Color.end, first_elem)
raise Exception(message1 + message2 + message3)
except KeyboardInterrupt as err:
print "You interrupted the creation of the database. Committing everything done up to this point."
connection.commit()
cursor.close()
raise err
# Index #
try:
cursor.execute("CREATE INDEX if not exists 'data_index' on 'data' (id)")
except KeyboardInterrupt as err:
print "You interrupted the creation of the index. Committing everything done up to this point."
connection.commit()
cursor.close()
raise err
# Close #
connection.commit()
cursor.close()
################################################################################
class Database(FilePath):
def __init__(self, path, factory=None):
self.path = path
self.factory = factory
def __repr__(self):
"""Called when evaluating ``print seqs``."""
return '<%s object on "%s">' % (self.__class__.__name__, self.path)
def __enter__(self):
"""Called when entering the 'with' statement."""
return self
def __exit__(self, errtype, value, traceback):
"""Called when exiting the 'with' statement.
Enables us to close the database properly, even when
exceptions are raised."""
self.close()
def __iter__(self):
"""Called when evaluating ``for x in seqs: pass``."""
self.cursor.execute("SELECT * from '%s'" % self.main_table)
return self.cursor
def __contains__(self, key):
"""Called when evaluating ``"P81239A" in seqs``."""
self._cursor.execute("SELECT EXISTS(SELECT 1 FROM '%s' WHERE id=='%s' LIMIT 1);" % (self.main_table, key))
return bool(self._cursor.fetchone())
def __len__(self):
"""Called when evaluating ``len(seqs)``."""
self._cursor.execute("SELECT COUNT(1) FROM '%s';" % self.main_table)
return int(self._cursor.fetchone())
def __nonzero__(self):
"""Called when evaluating ``if seqs: pass``."""
return True if len(self) != 0 else False
def __getitem__(self, key):
"""Called when evaluating ``seqs[0] or seqs['P81239A']``."""
if isinstance(key, int):
self.cursor.execute("SELECT * from '%s' LIMIT 1 OFFSET %i;" % (self.main_table, key))
else:
key = key.replace("'","''")
self.cursor.execute("SELECT * from '%s' where id=='%s' LIMIT 1;" % (self.main_table, key))
return self.cursor.fetchone()
@property
def main_table(self):
if not 'data' in self.tables:
raise Exception("The file '" + self.path + "' does not contain any 'data' table.")
return 'data'
def check_format(self):
with open(self.path, 'r') as f: header = f.read(15)
if header != 'SQLite format 3':
raise Exception("The file '" + self.path + "' is not an SQLite database.")
@property_cached
def connection(self):
self.check_format()
con = sqlite3.connect(self.path)
con.row_factory = self.factory
return con
@property_cached
def cursor(self):
return self.connection.cursor()
@property_cached
def _connection(self):
self.check_format()
return sqlite3.connect(self.path)
@property_cached
def _cursor(self):
return self._connection.cursor()
@property
def tables(self):
"""The complete list of SQL tables."""
self.connection.row_factory = sqlite3.Row
self._cursor.execute("select name from sqlite_master where type='table'")
result = [x[0].encode('ascii') for x in self._cursor.fetchall()]
self.connection.row_factory = self.factory
return result
@property
def fields(self):
"""The list of fields available for every entry."""
return self.get_fields_of_table(self.main_table)
@property
def first(self):
"""Just the first entry"""
return self[0]
def get_fields_of_table(self, table):
"""Return the list of fields for a particular table
by querying the SQL for the complete list of column names."""
# Check the table exists #
if not table in self.tables: return []
# A PRAGMA statement will implicitly issue a commit, don't use #
self._cursor.execute("SELECT * from '%s' LIMIT 1" % table)
fields = [x[0] for x in self._cursor.description]
self.cursor.fetchall()
return fields
def close(self):
self.cursor.close()
self.connection.close() | 38.776398 | 114 | 0.592504 |
f12333a518a594663621ac78f99839630d069702 | 416 | py | Python | wastecoinapp/migrations/0002_auto_20200603_1731.py | BuildForSDG/wastecoinBackup | 32899d9b3d2f36eb44ded52a341cf9d4e227f8cc | [
"MIT"
] | null | null | null | wastecoinapp/migrations/0002_auto_20200603_1731.py | BuildForSDG/wastecoinBackup | 32899d9b3d2f36eb44ded52a341cf9d4e227f8cc | [
"MIT"
] | 7 | 2020-06-03T19:08:42.000Z | 2021-09-22T19:08:32.000Z | wastecoinapp/migrations/0002_auto_20200603_1731.py | BuildForSDG/wastecoinBackup | 32899d9b3d2f36eb44ded52a341cf9d4e227f8cc | [
"MIT"
] | 1 | 2020-06-03T19:22:39.000Z | 2020-06-03T19:22:39.000Z | # Generated by Django 2.2.4 on 2020-06-03 16:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wastecoinapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='otp',
name='otp_reset_code',
field=models.IntegerField(default='0000', verbose_name='Reset Code'),
),
]
| 21.894737 | 81 | 0.608173 |
9b5b3c7a825d40d5fec975266da0f1cc9d966c18 | 1,317 | py | Python | baseline/sota_semantic_hashing/get_best_sem_acc_stterror.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | 32 | 2020-01-03T09:53:03.000Z | 2021-09-07T07:23:26.000Z | baseline/sota_semantic_hashing/get_best_sem_acc_stterror.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | null | null | null | baseline/sota_semantic_hashing/get_best_sem_acc_stterror.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | 6 | 2020-01-21T06:50:21.000Z | 2021-01-22T08:04:00.000Z |
dir_path = './results/results_trigram_hash_stterror_768_10runs/'
runs = 10
for tts_stt in ["gtts_witai", "macsay_witai"]:
str_write = ''
for dataset_name in ['chatbot', 'snips']:
str_write += dataset_name + '\n'
acc_avg = 0
acc_max = 0
for run in range(1, runs + 1):
acc = 0
subdir_path = dir_path + 'inc_{}_run{}/'.format(tts_stt, run)
filename = subdir_path + '{}_f1.txt.txt'.format(dataset_name)
f = open(filename, 'r')
lines = f.read().split('\n')
# Get max acc from 1 run
for l in lines:
if l is not "":
l_split = l.split(': ')
l_acc = float(l_split[1])
print(l_acc)
if l_acc >= acc:
acc = l_acc
# Get average acc
acc_avg += acc
# Get max acc
if acc >= acc_max:
acc_max = acc
acc_avg /= runs
str_write += ' Avg-{}: {:.2f}\n'.format(runs, acc_avg * 100)
str_write += ' Best-{}: {:.2f}\n\n'.format(runs, acc_max * 100)
print("Saving to: {}".format(dir_path + 'inc_{}'.format(tts_stt)))
f_out = open(dir_path + 'inc_{}'.format(tts_stt), 'w')
f_out.write(str_write)
| 34.657895 | 73 | 0.488231 |
af902054e01d415904c7aa772259191b98ceebf1 | 10,436 | py | Python | generators/cppevent.py | Lomadriel/xpp | d2ff2aaba6489f606bbcc090c0a78a8a3f9fcd1f | [
"MIT"
] | null | null | null | generators/cppevent.py | Lomadriel/xpp | d2ff2aaba6489f606bbcc090c0a78a8a3f9fcd1f | [
"MIT"
] | null | null | null | generators/cppevent.py | Lomadriel/xpp | d2ff2aaba6489f606bbcc090c0a78a8a3f9fcd1f | [
"MIT"
] | null | null | null | import sys # stderr
from utils import \
get_namespace, \
get_ext_name, \
_n_item, \
_ext, \
_reserved_keywords
from resource_classes import _resource_classes
_field_accessor_template_specialization = \
'''\
template<typename Connection>
template<>
%s
%s<Connection>::%s<%s>(void) const
{
return %s;
}\
'''
_templates = {}
_templates['field_accessor_template'] = \
'''\
template<typename ReturnType = %s, typename ... Parameter>
ReturnType
%s(Parameter && ... parameter) const
{
using make = xpp::generic::factory::make<Connection,
decltype((*this)->%s),
ReturnType,
Parameter ...>;
return make()(this->m_c,
(*this)->%s,
std::forward<Parameter>(parameter) ...);
}\
'''
def _field_accessor_template(c_type, method_name, member):
return _templates['field_accessor_template'] % \
( c_type
, method_name
, member
, member
)
_templates['event_dispatcher_class'] = \
'''\
namespace event {
template<typename Connection>
class dispatcher
{
public:
%s\
%s\
template<typename Handler>
bool
operator()(Handler%s,
const std::shared_ptr<xcb_generic_event_t> &%s) const
{\
%s
return false;
}
%s\
}; // class dispatcher
} // namespace event
'''
def _event_dispatcher_class(typedef, ctors, switch, members, has_events):
return _templates['event_dispatcher_class'] % \
( typedef
, ctors
, " handler" if has_events else ""
, " event" if has_events else ""
, switch if has_events else ""
, members
)
def event_dispatcher_class(namespace, cppevents):
ns = get_namespace(namespace)
ctor_name = "dispatcher"
typedef = []
ctors = []
members = []
opcode_switch = "event->response_type & ~0x80"
typedef = [ "typedef xpp::%s::extension extension;\n" % ns ]
members = \
[ "protected:"
, " Connection m_c;"
]
ctors = \
[ "template<typename C>"
, "%s(C && c)" % ctor_name
, " : m_c(std::forward<C>(c))"
, "{}"
]
# >>> if begin <<<
if namespace.is_ext:
# XXX: The xkb extension contains the sub-event in the member pad0
if ns == "xkb":
opcode_switch = "event->pad0"
else:
opcode_switch = "(event->response_type & ~0x80) - m_first_event"
members += [ " uint8_t m_first_event;" ]
ctors = \
[ "template<typename C>"
, "%s(C && c, uint8_t first_event)" % (ctor_name)
, " : m_c(std::forward<C>(c))"
, " , m_first_event(first_event)"
, "{}"
, ""
, "template<typename C>"
, "%s(C && c, const xpp::%s::extension & extension)" % (ctor_name, ns)
, " : %s(std::forward<C>(c), extension->first_event)" % ctor_name
, "{}"
]
# >>> if end <<<
if len(typedef) > 0:
typedef = "\n".join(map(lambda s: " " + s, typedef)) + "\n"
else:
typedef = ""
if len(ctors) > 0:
ctors = "\n".join(map(lambda s: (" " if len(s) > 0 else "") + s, ctors)) + "\n"
else:
ctors = ""
if len(members) > 0:
members = "\n".join(map(lambda s: " " + s, members)) + "\n"
else:
members = ""
switch = event_switch_cases(cppevents, opcode_switch, "handler", "event", namespace)
return _event_dispatcher_class(typedef,
ctors,
switch,
members,
len(cppevents) > 0)
def event_switch_cases(cppevents, arg_switch, arg_handler, arg_event, ns):
cases = ""
first_event_arg = ", m_first_event" if ns.is_ext else ""
templ = [ " case %s:"
, " %s(" % arg_handler + "%s<Connection>" + "(m_c%s, %s));" % (first_event_arg, arg_event)
, " return true;"
, ""
, ""
]
distinct_events = [[]]
for e in cppevents:
done = False
for l in distinct_events:
if e in l:
continue
else:
l.append(e)
done = True
break
if not done:
distinct_events.append([e])
else:
continue
for l in distinct_events:
cases += "\n switch (%s) {\n\n" % arg_switch
for e in l:
cases += "\n".join(templ) % (e.opcode_name, e.scoped_name())
cases += " };\n"
return cases if len(cppevents) > 0 else ""
########## EVENT ##########
class CppEvent(object):
def __init__(self, opcode, opcode_name, c_name, namespace, name, fields):
self.opcode = opcode
self.opcode_name = opcode_name
self.c_name = c_name
self.namespace = namespace
self.fields = fields
self.names = map(str.lower, _n_item(name[-1], True))
self.name = "_".join(map(str.lower, self.names))
self.nssopen = ""
self.nssclose = ""
self.scope = []
for name in self.names[0:-1]:
if name in _reserved_keywords: name += "_"
self.nssopen += " namespace %s {" % name
self.nssclose += " };"
self.scope.append(name)
def __cmp__(self, other):
if self.opcode == other.opcode:
return 0
elif self.opcode < other.opcode:
return -1
else:
return 1
def get_name(self):
return _reserved_keywords.get(self.name, self.name)
def scoped_name(self):
ns = get_namespace(self.namespace)
return "xpp::" + ns + "::event::" + self.get_name()
def make_class(self):
member_accessors = []
member_accessors_special = []
for field in self.fields:
if field.field_type[-1] in _resource_classes:
template_name = field.field_name.capitalize()
c_type = field.c_field_type
method_name = field.field_name.lower()
if (method_name == self.get_name()
or method_name in _reserved_keywords):
method_name += "_"
member = field.c_field_name
member_accessors.append(_field_accessor_template(c_type, method_name, member))
ns = get_namespace(self.namespace)
extension = "xpp::%s::extension" % ns
ctor = \
[ "template<typename C>"
, "%s(C && c," % self.get_name()
, (" " * len(self.get_name())) + " const std::shared_ptr<xcb_generic_event_t> & event)"
, " : base(event)"
, " , m_c(std::forward<C>(c))"
, "{}"
]
m_first_event = ""
typedef = [ "typedef xpp::%s::extension extension;" % ns ]
description = \
[ "static std::string description(void)"
, "{"
, " return std::string(\"%s\");" % self.opcode_name
, "}"
]
opcode_accessor = \
[ "static uint8_t opcode(void)"
, "{"
, " return %s;" % self.opcode_name
, "}"
]
first_event = []
if self.namespace.is_ext:
opcode_accessor += \
[ ""
, "static uint8_t opcode(uint8_t first_event)"
, "{"
, " return first_event + opcode();"
, "}"
, ""
, "static uint8_t opcode(const xpp::%s::extension & extension)" % ns
, "{"
, " return opcode(extension->first_event);"
, "}"
]
first_event = \
[ "uint8_t first_event(void)"
, "{"
, " return m_first_event;"
, "}"
]
ctor = \
[ "template<typename C>"
, "%s(C && c," % self.get_name()
, (" " * len(self.get_name())) + " uint8_t first_event,"
, (" " * len(self.get_name())) + " const std::shared_ptr<xcb_generic_event_t> & event)"
, " : base(event)"
, " , m_c(std::forward<C>(c))"
, " , m_first_event(first_event)"
, "{}"
]
m_first_event = " const uint8_t m_first_event;\n"
if len(opcode_accessor) > 0:
opcode_accessor = "\n".join(map(lambda s: " " + s, opcode_accessor)) + "\n"
else:
opcode_accessor = ""
if len(ctor) > 0:
ctor = "\n".join(map(lambda s: " " + s, ctor)) + "\n"
else:
ctor = ""
if len(typedef) > 0:
typedef = "\n".join(map(lambda s: " " + s, typedef)) + "\n\n"
else:
typedef = ""
if len(member_accessors) > 0:
member_accessors = "\n" + "\n\n".join(member_accessors) + "\n\n"
member_accessors_special = "\n" + "\n\n".join(member_accessors_special) + "\n\n"
else:
member_accessors = ""
member_accessors_special = ""
if len(description) > 0:
description = "\n" + "\n".join(map(lambda s: " " + s, description)) + "\n"
else:
description = ""
if len(first_event) > 0:
first_event = "\n" + "\n".join(map(lambda s: " " + s, first_event)) + "\n"
else:
first_event = ""
return \
'''
namespace event {
template<typename Connection>
class %s
: public xpp::generic::event<%s>
{
public:
%s\
typedef xpp::generic::event<%s> base;
%s\
virtual ~%s(void) {}
%s\
%s\
%s\
%s\
protected:
Connection m_c;
%s\
}; // class %s
%s\
} // namespace event
''' % (self.get_name(), # class %s
self.c_name, # %s>
typedef,
self.c_name, # typedef xpp::generic::event<%s>::base;
ctor,
self.get_name(), # virtual ~%s(void) {}
opcode_accessor,
description,
first_event,
member_accessors,
m_first_event,
self.get_name(), # // class %s
member_accessors_special)
| 27.391076 | 111 | 0.483039 |
3e55297e77b69676733cdacad032163a9b15713c | 12,506 | py | Python | aries_cloudagent/issuer/indy.py | BoschDigitalSolutions/aries-cloudagent-python | 1b7d209cc70f01593c5c78be512af00c2cc1c9a8 | [
"Apache-2.0"
] | 1 | 2020-07-02T12:36:32.000Z | 2020-07-02T12:36:32.000Z | aries_cloudagent/issuer/indy.py | BoschDigitalSolutions/aries-cloudagent-python | 1b7d209cc70f01593c5c78be512af00c2cc1c9a8 | [
"Apache-2.0"
] | null | null | null | aries_cloudagent/issuer/indy.py | BoschDigitalSolutions/aries-cloudagent-python | 1b7d209cc70f01593c5c78be512af00c2cc1c9a8 | [
"Apache-2.0"
] | null | null | null | """Indy issuer implementation."""
import json
import logging
from typing import Sequence, Tuple
import indy.anoncreds
import indy.blob_storage
from indy.error import AnoncredsRevocationRegistryFullError, IndyError, ErrorCode
from ..messaging.util import encode
from .base import (
BaseIssuer,
IssuerError,
IssuerRevocationRegistryFullError,
DEFAULT_CRED_DEF_TAG,
DEFAULT_ISSUANCE_TYPE,
DEFAULT_SIGNATURE_TYPE,
)
from ..indy import create_tails_reader, create_tails_writer
from ..indy.error import IndyErrorHandler
class IndyIssuer(BaseIssuer):
"""Indy issuer class."""
def __init__(self, wallet):
"""
Initialize an IndyIssuer instance.
Args:
wallet: IndyWallet instance
"""
self.logger = logging.getLogger(__name__)
self.wallet = wallet
def make_schema_id(
self, origin_did: str, schema_name: str, schema_version: str
) -> str:
"""Derive the ID for a schema."""
return f"{origin_did}:2:{schema_name}:{schema_version}"
async def create_and_store_schema(
self,
origin_did: str,
schema_name: str,
schema_version: str,
attribute_names: Sequence[str],
) -> Tuple[str, str]:
"""
Create a new credential schema and store it in the wallet.
Args:
origin_did: the DID issuing the credential definition
schema_name: the schema name
schema_version: the schema version
attribute_names: a sequence of schema attribute names
Returns:
A tuple of the schema ID and JSON
"""
with IndyErrorHandler("Error when creating schema", IssuerError):
schema_id, schema_json = await indy.anoncreds.issuer_create_schema(
origin_did, schema_name, schema_version, json.dumps(attribute_names),
)
return (schema_id, schema_json)
def make_credential_definition_id(
self, origin_did: str, schema: dict, signature_type: str = None, tag: str = None
) -> str:
"""Derive the ID for a credential definition."""
signature_type = signature_type or DEFAULT_SIGNATURE_TYPE
tag = tag or DEFAULT_CRED_DEF_TAG
return f"{origin_did}:3:{signature_type}:{str(schema['seqNo'])}:{tag}"
async def credential_definition_in_wallet(
self, credential_definition_id: str
) -> bool:
"""
Check whether a given credential definition ID is present in the wallet.
Args:
credential_definition_id: The credential definition ID to check
"""
try:
await indy.anoncreds.issuer_create_credential_offer(
self.wallet.handle, credential_definition_id
)
return True
except IndyError as error:
if error.error_code not in (
ErrorCode.CommonInvalidStructure,
ErrorCode.WalletItemNotFound,
):
raise IndyErrorHandler.wrap_error(
error,
"Error when checking wallet for credential definition",
IssuerError,
) from error
# recognized error signifies no such cred def in wallet: pass
return False
async def create_and_store_credential_definition(
self,
origin_did: str,
schema: dict,
signature_type: str = None,
tag: str = None,
support_revocation: bool = False,
) -> Tuple[str, str]:
"""
Create a new credential definition and store it in the wallet.
Args:
origin_did: the DID issuing the credential definition
schema: the schema used as a basis
signature_type: the credential definition signature type (default 'CL')
tag: the credential definition tag
support_revocation: whether to enable revocation for this credential def
Returns:
A tuple of the credential definition ID and JSON
"""
with IndyErrorHandler("Error when creating credential definition", IssuerError):
(
credential_definition_id,
credential_definition_json,
) = await indy.anoncreds.issuer_create_and_store_credential_def(
self.wallet.handle,
origin_did,
json.dumps(schema),
tag or DEFAULT_CRED_DEF_TAG,
signature_type or DEFAULT_SIGNATURE_TYPE,
json.dumps({"support_revocation": support_revocation}),
)
return (credential_definition_id, credential_definition_json)
async def create_credential_offer(self, credential_definition_id: str) -> str:
"""
Create a credential offer for the given credential definition id.
Args:
credential_definition_id: The credential definition to create an offer for
Returns:
The created credential offer
"""
with IndyErrorHandler("Exception when creating credential offer", IssuerError):
credential_offer_json = await indy.anoncreds.issuer_create_credential_offer(
self.wallet.handle, credential_definition_id
)
return credential_offer_json
async def create_credential(
self,
schema: dict,
credential_offer: dict,
credential_request: dict,
credential_values: dict,
revoc_reg_id: str = None,
tails_file_path: str = None,
) -> Tuple[str, str]:
"""
Create a credential.
Args
schema: Schema to create credential for
credential_offer: Credential Offer to create credential for
credential_request: Credential request to create credential for
credential_values: Values to go in credential
revoc_reg_id: ID of the revocation registry
tails_file_path: Path to the local tails file
Returns:
A tuple of created credential and revocation id
"""
encoded_values = {}
schema_attributes = schema["attrNames"]
for attribute in schema_attributes:
# Ensure every attribute present in schema to be set.
# Extraneous attribute names are ignored.
try:
credential_value = credential_values[attribute]
except KeyError:
raise IssuerError(
"Provided credential values are missing a value "
+ f"for the schema attribute '{attribute}'"
)
encoded_values[attribute] = {}
encoded_values[attribute]["raw"] = str(credential_value)
encoded_values[attribute]["encoded"] = encode(credential_value)
tails_reader_handle = (
await create_tails_reader(tails_file_path)
if tails_file_path is not None
else None
)
try:
(
credential_json,
credential_revocation_id,
_, # rev_reg_delta_json only figures if rev reg is ISSUANCE_ON_DEMAND
) = await indy.anoncreds.issuer_create_credential(
self.wallet.handle,
json.dumps(credential_offer),
json.dumps(credential_request),
json.dumps(encoded_values),
revoc_reg_id,
tails_reader_handle,
)
except AnoncredsRevocationRegistryFullError:
self.logger.error(
f"Revocation registry {revoc_reg_id} is full: cannot create credential"
)
raise IssuerRevocationRegistryFullError(
f"Revocation registry {revoc_reg_id} is full"
)
except IndyError as error:
raise IndyErrorHandler.wrap_error(
error, "Error when issuing credential", IssuerError
) from error
return credential_json, credential_revocation_id
async def revoke_credentials(
self, revoc_reg_id: str, tails_file_path: str, cred_revoc_ids: Sequence[str]
) -> (str, Sequence[str]):
"""
Revoke a set of credentials in a revocation registry.
Args:
revoc_reg_id: ID of the revocation registry
tails_file_path: path to the local tails file
cred_revoc_ids: sequences of credential indexes in the revocation registry
Returns:
Tuple with the combined revocation delta, list of cred rev ids not revoked
"""
failed_crids = []
tails_reader_handle = await create_tails_reader(tails_file_path)
result_json = None
for cred_revoc_id in cred_revoc_ids:
with IndyErrorHandler("Exception when revoking credential", IssuerError):
try:
delta_json = await indy.anoncreds.issuer_revoke_credential(
self.wallet.handle,
tails_reader_handle,
revoc_reg_id,
cred_revoc_id,
)
except IndyError as error:
if error.error_code == ErrorCode.AnoncredsInvalidUserRevocId:
self.logger.error(
"Abstaining from revoking credential on "
f"rev reg id {revoc_reg_id}, cred rev id={cred_revoc_id}: "
"already revoked or not yet issued"
)
else:
self.logger.error(
IndyErrorHandler.wrap_error(
error, "Revocation error", IssuerError
).roll_up
)
failed_crids.append(cred_revoc_id)
continue
if result_json:
result_json = await self.merge_revocation_registry_deltas(
result_json, delta_json
)
else:
result_json = delta_json
return (result_json, failed_crids)
async def merge_revocation_registry_deltas(
self, fro_delta: str, to_delta: str
) -> str:
"""
Merge revocation registry deltas.
Args:
fro_delta: original delta in JSON format
to_delta: incoming delta in JSON format
Returns:
Merged delta in JSON format
"""
return await indy.anoncreds.issuer_merge_revocation_registry_deltas(
fro_delta, to_delta
)
async def create_and_store_revocation_registry(
self,
origin_did: str,
cred_def_id: str,
revoc_def_type: str,
tag: str,
max_cred_num: int,
tails_base_path: str,
issuance_type: str = None,
) -> Tuple[str, str, str]:
"""
Create a new revocation registry and store it in the wallet.
Args:
origin_did: the DID issuing the revocation registry
cred_def_id: the identifier of the related credential definition
revoc_def_type: the revocation registry type (default CL_ACCUM)
tag: the unique revocation registry tag
max_cred_num: the number of credentials supported in the registry
tails_base_path: where to store the tails file
issuance_type: optionally override the issuance type
Returns:
A tuple of the revocation registry ID, JSON, and entry JSON
"""
tails_writer = await create_tails_writer(tails_base_path)
with IndyErrorHandler(
"Exception when creating revocation registry", IssuerError
):
(
revoc_reg_id,
revoc_reg_def_json,
revoc_reg_entry_json,
) = await indy.anoncreds.issuer_create_and_store_revoc_reg(
self.wallet.handle,
origin_did,
revoc_def_type,
tag,
cred_def_id,
json.dumps(
{
"max_cred_num": max_cred_num,
"issuance_type": issuance_type or DEFAULT_ISSUANCE_TYPE,
}
),
tails_writer,
)
return (revoc_reg_id, revoc_reg_def_json, revoc_reg_entry_json)
| 34.738889 | 88 | 0.589717 |
d4843b9fca3844663ce331ec5e6e5404bb121d07 | 2,194 | py | Python | tests/test_custom_backend.py | toastdriven/alligator | 2bb41e0cf0a57baf90a9c15b3bb01bd74438bc69 | [
"BSD-3-Clause"
] | 21 | 2015-01-01T02:06:25.000Z | 2022-03-19T13:57:42.000Z | tests/test_custom_backend.py | toastdriven/alligator | 2bb41e0cf0a57baf90a9c15b3bb01bd74438bc69 | [
"BSD-3-Clause"
] | 2 | 2015-01-07T07:38:53.000Z | 2015-01-07T07:39:01.000Z | tests/test_custom_backend.py | toastdriven/alligator | 2bb41e0cf0a57baf90a9c15b3bb01bd74438bc69 | [
"BSD-3-Clause"
] | 7 | 2015-01-03T19:45:14.000Z | 2022-03-19T13:57:44.000Z | import os
import unittest
from unittest import mock
from alligator.backends.sqlite_backend import Client as SQLiteClient
from alligator.constants import ALL
from alligator.gator import Gator, Options
from alligator.tasks import Task
def add(a, b):
return a + b
class CustomBackendTestCase(unittest.TestCase):
def setUp(self):
super(CustomBackendTestCase, self).setUp()
self.conn_string = "sqlite:///tmp/alligator_test.db"
try:
os.unlink("/tmp/alligator_test.db")
except OSError:
pass
self.gator = Gator(self.conn_string, backend_class=SQLiteClient)
self.gator.backend.setup_tables()
def test_everything(self):
self.assertEqual(self.gator.backend.len(ALL), 0)
t1 = self.gator.task(add, 1, 3)
t2 = self.gator.task(add, 5, 7)
t3 = self.gator.task(add, 3, 13)
t4 = self.gator.task(add, 9, 4)
self.assertEqual(self.gator.backend.len(ALL), 4)
task_1 = self.gator.pop()
self.assertEqual(task_1.result, 4)
task_3 = self.gator.get(t3.task_id)
self.assertEqual(task_3.result, 16)
task_2 = self.gator.pop()
self.assertEqual(task_2.result, 12)
self.assertEqual(self.gator.backend.len(ALL), 1)
self.gator.backend.drop_all(ALL)
self.assertEqual(self.gator.backend.len(ALL), 0)
@mock.patch("time.time")
def test_delay_until(self, mock_time):
mock_time.return_value = 12345678
self.assertEqual(self.gator.backend.len(ALL), 0)
with self.gator.options(delay_until=12345777):
t1 = self.gator.task(add, 2, 2)
with self.gator.options(delay_until=12345999):
t2 = self.gator.task(add, 3, 8)
with self.gator.options(delay_until=12345678):
t3 = self.gator.task(add, 4, 11)
with self.gator.options():
t4 = self.gator.task(add, 7, 1)
self.assertEqual(self.gator.backend.len(ALL), 4)
task_1 = self.gator.pop()
self.assertEqual(task_1.result, 4)
mock_time.return_value = 123499999
task_2 = self.gator.pop()
self.assertEqual(task_2.result, 11)
| 28.128205 | 72 | 0.638104 |
bb3ad575d98b4193ec3677662dd2a38683fa6742 | 1,674 | py | Python | flow/scenarios/loop/loop_scenario.py | lijunsun/flow | fb3f0d54e06b9e940b7a2ba8772395ee7ea0f17b | [
"MIT"
] | null | null | null | flow/scenarios/loop/loop_scenario.py | lijunsun/flow | fb3f0d54e06b9e940b7a2ba8772395ee7ea0f17b | [
"MIT"
] | null | null | null | flow/scenarios/loop/loop_scenario.py | lijunsun/flow | fb3f0d54e06b9e940b7a2ba8772395ee7ea0f17b | [
"MIT"
] | 1 | 2020-08-24T07:41:49.000Z | 2020-08-24T07:41:49.000Z | from flow.scenarios.base_scenario import Scenario
from flow.core.params import InitialConfig
from flow.core.traffic_lights import TrafficLights
ADDITIONAL_NET_PARAMS = {
# length of the ring road
"length": 230,
# number of lanes
"lanes": 1,
# speed limit for all edges
"speed_limit": 30,
# resolution of the curves on the ring
"resolution": 40
}
class LoopScenario(Scenario):
def __init__(self,
name,
generator_class,
vehicles,
net_params,
initial_config=InitialConfig(),
traffic_lights=TrafficLights()):
"""Initializes a loop scenario.
Requires from net_params:
- length: length of the circle
- lanes: number of lanes in the circle
- speed_limit: max speed limit of the circle
- resolution: number of nodes resolution
See Scenario.py for description of params.
"""
for p in ADDITIONAL_NET_PARAMS.keys():
if p not in net_params.additional_params:
raise KeyError('Network parameter "{}" not supplied'.format(p))
self.length = net_params.additional_params["length"]
self.lanes = net_params.additional_params["lanes"]
super().__init__(name, generator_class, vehicles, net_params,
initial_config, traffic_lights)
def specify_edge_starts(self):
"""
See parent class
"""
edgelen = self.length / 4
edgestarts = [("bottom", 0), ("right", edgelen), ("top", 2 * edgelen),
("left", 3 * edgelen)]
return edgestarts
| 30.436364 | 79 | 0.599164 |
f500d0d79758fd599f53516496c0c752844a95ac | 4,590 | py | Python | python/create-hole-example.py | ianksalter/shv | e0df96a21060df224ebcf1679d1f956bf9462445 | [
"MIT"
] | null | null | null | python/create-hole-example.py | ianksalter/shv | e0df96a21060df224ebcf1679d1f956bf9462445 | [
"MIT"
] | null | null | null | python/create-hole-example.py | ianksalter/shv | e0df96a21060df224ebcf1679d1f956bf9462445 | [
"MIT"
] | null | null | null | import bpy
# Delete default cube
bpy.ops.object.delete(use_global=False)
# Ensure we are using the metric system
bpy.context.scene.unit_settings.system = 'METRIC'
wall_1_location = (0, 0, 1.5)
wall_1_scale = (2.5, 0.1, 1.5)
hole_1_x_start = 1 # x distance from the x coordinate of the least vertex
hole_1_z_start = 0 # z distance from the least vertex
hole_1_length = 1
hole_1_height = 2
hole_2_x_start = 2.1
hole_2_z_start = 1
hole_2_length = 1
hole_2_height = 1
# Walls
bpy.ops.mesh.primitive_cube_add(
enter_editmode=False,
align='WORLD',
location=wall_1_location,
scale=wall_1_scale)
bpy.context.object.name = "Wall 1"
# As Sam had already done "Wall_1" can be assigned to a variable.
wall_1 = bpy.context.scene.objects['Wall 1']
# Holes
# NOTE THIS CODE HAD AN ERROR (AND STILL HAS AN ERROR see below)
# As the hole start should be relative to the wall.
# So this code:
#hole_1_location = (hole_1_x_start + hole_1_length / 2,
# 0,
# hole_1_z_start + hole_1_height / 2)
#
#hole_1_scale = (hole_1_length / 2, 0.1, hole_1_height / 2)
# Was replaced with
wall_1_x_start = wall_1.location[0] - wall_1.dimensions[0]/2 # How cool that python vector positions start at 0!!!
wall_1_y_start = wall_1.location[1] - wall_1.dimensions[1]/2
wall_1_z_start = wall_1.location[2] - wall_1.dimensions[2]/2
hole_1_location = (wall_1_x_start + hole_1_x_start + hole_1_length / 2,
wall_1.location[1],
wall_1_z_start + hole_1_z_start + hole_1_height / 2)
hole_1_scale = (hole_1_length / 2, wall_1.dimensions[1]/2 , hole_1_height / 2)
# NOTE THERE IS STILL AN ERROR this solution currently takes no notice of rotation!!!
# Note we can access the walls rotation from wall_1.rotation_euler note. This needs an
# understanding of blender rotation modes.
# See https://docs.blender.org/manual/en/latest/advanced/appendices/rotations.html
# An interesting point is how much of this stuff should be done in R especially
# as we know the wall start vector in with the R data.
bpy.ops.mesh.primitive_cube_add(
enter_editmode=False,
align='WORLD',
location=hole_1_location,
scale=hole_1_scale)
bpy.context.object.name = "Hole 1"
# We can now assign that object to a variable
hole_1 = bpy.context.scene.objects['Hole 1']
# Once we have done that we can use the varaible to replace any thing in bpy.context.scene.object. So
# bpy.context.object.display_type = 'WIRE'
# bpy.context.object.hide_render = True
# can be replaced with
hole_1.display_type = 'WIRE'
hole_1.hide_render = True
bpy.context.view_layer.objects.active = wall_1 #set the active object to wall_1 ready to add the modifier.
bpy.ops.object.modifier_add(type='BOOLEAN') #Add the modifier in this context
bpy.context.object.modifiers["Boolean"].name = "hole_1_mod" #Name the modifier
modifier_1 = bpy.context.object.modifiers["hole_1_mod"] #Assign the modifier to a variable
modifier_1.operation = 'DIFFERENCE' #Make sure the operation is difference
modifier_1.object = hole_1 #Set the other object to be hole_1
# NOTE moderfier_1 variable was used to replace the slightly longer code
# be interesting to see if the second hole creates a problem.
# bpy.context.object.modifiers["Boolean"].operation = 'DIFFERENCE'
# bpy.context.object.modifiers["Boolean"].object = hole_1
# TODO Create the second hole.
hole_2_location = (wall_1_x_start + hole_2_x_start + hole_2_length / 2,
wall_1.location[1],
wall_1_z_start + hole_2_z_start + hole_2_height / 2)
hole_2_scale = (hole_2_length / 2, wall_1.dimensions[1]/2 , hole_2_height / 2)
bpy.ops.mesh.primitive_cube_add(
enter_editmode=False,
align='WORLD',
location=hole_2_location,
scale=hole_2_scale)
bpy.context.object.name = "Hole 2"
hole_2 = bpy.context.scene.objects['Hole 2']
hole_2.display_type = 'WIRE'
hole_2.hide_render = True
bpy.context.view_layer.objects.active = wall_1
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].name = "hole_2_mod"
modifier_2 = bpy.context.object.modifiers["hole_2_mod"]
modifier_2.operation = 'DIFFERENCE'
modifier_2.object = hole_2
# Sams old code
#bpy.ops.outliner.item_activate(deselect_all=True)
#bpy.context.view_layer.objects.active = bpy.data.objects['Wall 1']
#bpy.data.objects['Wall 1'].select_set(True)
#Save the generated file
# bpy.ops.wm.save_as_mainfile(filepath="blender/create-hole-example.blend")
| 36.141732 | 114 | 0.720044 |
8c5ae485dad12246f288f49d02aaee0835ce3e60 | 2,132 | py | Python | arkane/encorr/__init__.py | tza0035/RMG-Py | 38c49f7107d1b19e4a534408a1040ddd313b8596 | [
"MIT"
] | 250 | 2015-06-06T23:32:00.000Z | 2022-03-22T16:45:16.000Z | arkane/encorr/__init__.py | tza0035/RMG-Py | 38c49f7107d1b19e4a534408a1040ddd313b8596 | [
"MIT"
] | 1,781 | 2015-05-26T23:52:00.000Z | 2022-03-31T19:07:54.000Z | arkane/encorr/__init__.py | tza0035/RMG-Py | 38c49f7107d1b19e4a534408a1040ddd313b8596 | [
"MIT"
] | 161 | 2015-06-02T14:28:59.000Z | 2022-03-02T19:37:14.000Z | #!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2021 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
initialize imports
"""
| 64.606061 | 79 | 0.463884 |
2f1882f06fe40999e75b82692f4409f879626948 | 99 | py | Python | pageNotFound/apps.py | loribonna/EsameLDPython | 02f671d0813e4e3cfed5a977018ab295b8675d60 | [
"MIT"
] | null | null | null | pageNotFound/apps.py | loribonna/EsameLDPython | 02f671d0813e4e3cfed5a977018ab295b8675d60 | [
"MIT"
] | null | null | null | pageNotFound/apps.py | loribonna/EsameLDPython | 02f671d0813e4e3cfed5a977018ab295b8675d60 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class PagenotfoundConfig(AppConfig):
name = 'pageNotFound'
| 16.5 | 36 | 0.777778 |
5be419f807bd774ecdbc6ca019cc481d3d62db5f | 4,294 | py | Python | tests/conftest.py | luiscastilho/toute | 56207d3d92b8bc1066e115c285cf79f96be3e249 | [
"MIT"
] | 3 | 2021-01-04T02:12:33.000Z | 2021-09-22T14:41:39.000Z | tests/conftest.py | luiscastilho/toute | 56207d3d92b8bc1066e115c285cf79f96be3e249 | [
"MIT"
] | 6 | 2020-08-04T22:34:12.000Z | 2020-10-10T12:40:14.000Z | tests/conftest.py | luiscastilho/toute | 56207d3d92b8bc1066e115c285cf79f96be3e249 | [
"MIT"
] | 1 | 2020-08-04T21:59:43.000Z | 2020-08-04T21:59:43.000Z | # content of conftest.py
import pytest
import elasticsearch.helpers as eh_original
from toute import Document
from toute.fields import IntegerField, KeywordField, FloatField
DOUBLE_ID_FIELD = "double_id"
_INDEX = 'index'
_DOC_TYPE = 'doc_type'
class ES(object):
test_id = 100
test_ids = [100, 101]
def index(self, *args, **kwargs):
assert kwargs['index'] == _INDEX
assert kwargs['doc_type'] == _DOC_TYPE
assert kwargs['id'] == self.test_id
assert 'body' in kwargs
kwargs['created'] = True
kwargs['_id'] = self.test_id
return kwargs
def get(self, *args, **kwargs):
assert kwargs['index'] == _INDEX
assert kwargs['doc_type'] == _DOC_TYPE
assert kwargs['id'] == self.test_id
return {
'_source': {
'id': self.test_id
},
'_id': self.test_id
}
def search(self, *args, **kwargs):
assert kwargs['index'] == _INDEX
assert kwargs['doc_type'] == _DOC_TYPE
docs = []
for _id in self.test_ids:
doc = {
'_source': {
'id': _id
},
'_id': _id,
'_score': 1.0
}
docs.append(doc)
return {
'hits': {
'hits': docs
}
}
class ES_fields(object):
test_id = 100
test_ids = [100, 101]
double_ids = [id * 2 for id in test_ids]
def index(self, *args, **kwargs):
assert kwargs['index'] == _INDEX
assert kwargs['doc_type'] == _DOC_TYPE
assert kwargs['id'] == self.test_id
assert 'body' in kwargs
kwargs['created'] = True
kwargs['_id'] = self.test_id
return kwargs
def get(self, *args, **kwargs):
assert kwargs['index'] == _INDEX
assert kwargs['doc_type'] == _DOC_TYPE
assert kwargs['id'] == self.test_id
return {
'_source': {
'id': self.test_id
},
'_id': self.test_id
}
def search(self, *args, **kwargs):
assert kwargs['index'] == _INDEX
assert kwargs['doc_type'] == _DOC_TYPE
docs = []
for _id in self.test_ids:
doc = {
'_source': {
'id': _id
},
'_id': _id,
'_score': 1.0,
'fields': {
"double_id": _id * 2
}
}
docs.append(doc)
return {
'hits': {
'hits': docs
}
}
class D(Document):
_index = _INDEX
_doctype = _DOC_TYPE
id = IntegerField()
class DW(D):
_es = ES()
id = IntegerField() # ID hould be inherited
document_id = KeywordField()
house_number = IntegerField()
height = FloatField()
# def pytest_runtest_setup(item):
# # called for running each test in 'a' directory
# print("setting up", item)
@pytest.fixture(scope="module")
def INDEX():
return 'index'
@pytest.fixture(scope="module")
def DOC_TYPE():
return 'doc_type'
@pytest.fixture(scope="module")
def QUERY():
return {
"query": {
"bool": {
"must": [
{"match": {"name": "Gonzo"}}
]
}
}
}
@pytest.fixture(scope="module")
def QUERY_SCRIPT_FIELDS():
return {
"query": {
"match_all": {}
},
"script_fields": {
DOUBLE_ID_FIELD: {"script": "doc[\"id\"]*2"}
}
}
@pytest.fixture(scope="module")
def FIELD_NAME():
return DOUBLE_ID_FIELD
@pytest.fixture(scope="module")
def MockES():
return ES
@pytest.fixture(scope="module")
def MockESf():
return ES_fields
@pytest.fixture(scope="module")
def eh():
def bulk(es, actions):
for action in actions:
assert action['_op_type'] in ['index', 'update', 'delete']
assert action['_index'] == _INDEX
assert action['_type'] == _DOC_TYPE
eh_original.bulk = bulk
return eh_original
@pytest.fixture(scope="module")
def Doc():
return D
@pytest.fixture(scope="module")
def DocWithDefaultClient():
return DW
| 22.134021 | 70 | 0.509781 |
a44ee8ece1aefc03a797169be151684cffc8336a | 194 | py | Python | oprogramowanie_1/Zadanie01.py | Pervicorn/oprogramowanie | c17d8511d4dd650694b291d7cb904831a2e5b0ea | [
"MIT"
] | null | null | null | oprogramowanie_1/Zadanie01.py | Pervicorn/oprogramowanie | c17d8511d4dd650694b291d7cb904831a2e5b0ea | [
"MIT"
] | null | null | null | oprogramowanie_1/Zadanie01.py | Pervicorn/oprogramowanie | c17d8511d4dd650694b291d7cb904831a2e5b0ea | [
"MIT"
] | 1 | 2020-10-21T11:31:11.000Z | 2020-10-21T11:31:11.000Z | # -*- coding: utf-8 -*-
# Zadanie 1 ---------------------------------
cel = float(input('Proszę podać temperaturę w °C: '))
fah = (cel * (9/5)) + 32
print('\n')
print(cel, '°C =', fah, '°F')
| 19.4 | 53 | 0.43299 |
9cc6a015fbaa1a299af1e67b872f86563cabd9c7 | 19,365 | py | Python | lib_pypy/pyrepl/unix_console.py | akercheval/espy | f8317d2f01ba726ed4f03cab081176c32ae4cac4 | [
"Apache-2.0",
"OpenSSL"
] | 4 | 2019-02-11T06:58:43.000Z | 2020-03-15T14:12:32.000Z | lib_pypy/pyrepl/unix_console.py | akercheval/espy | f8317d2f01ba726ed4f03cab081176c32ae4cac4 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | lib_pypy/pyrepl/unix_console.py | akercheval/espy | f8317d2f01ba726ed4f03cab081176c32ae4cac4 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | # Copyright 2000-2010 Michael Hudson-Doyle <micahel@gmail.com>
# Antonio Cuni
# Armin Rigo
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import termios, select, os, struct, errno
import signal, re, time, sys
from fcntl import ioctl
from pyrepl import curses
from pyrepl.fancy_termios import tcgetattr, tcsetattr
from pyrepl.console import Console, Event
from pyrepl import unix_eventqueue
class InvalidTerminal(RuntimeError):
pass
_error = (termios.error, curses.error, InvalidTerminal)
# there are arguments for changing this to "refresh"
SIGWINCH_EVENT = 'repaint'
FIONREAD = getattr(termios, "FIONREAD", None)
TIOCGWINSZ = getattr(termios, "TIOCGWINSZ", None)
def _my_getstr(cap, optional=0):
r = curses.tigetstr(cap)
if not optional and r is None:
raise InvalidTerminal, \
"terminal doesn't have the required '%s' capability"%cap
return r
# at this point, can we say: AAAAAAAAAAAAAAAAAAAAAARGH!
def maybe_add_baudrate(dict, rate):
name = 'B%d'%rate
if hasattr(termios, name):
dict[getattr(termios, name)] = rate
ratedict = {}
for r in [0, 110, 115200, 1200, 134, 150, 1800, 19200, 200, 230400,
2400, 300, 38400, 460800, 4800, 50, 57600, 600, 75, 9600]:
maybe_add_baudrate(ratedict, r)
del r, maybe_add_baudrate
delayprog = re.compile("\\$<([0-9]+)((?:/|\\*){0,2})>")
try:
poll = select.poll
except AttributeError:
# this is exactly the minumum necessary to support what we
# do with poll objects
class poll:
def __init__(self):
pass
def register(self, fd, flag):
self.fd = fd
def poll(self, timeout=None):
r,w,e = select.select([self.fd],[],[],timeout)
return r
POLLIN = getattr(select, "POLLIN", None)
class UnixConsole(Console):
def __init__(self, f_in=0, f_out=1, term=None, encoding=None):
if encoding is None:
encoding = sys.getdefaultencoding()
self.encoding = encoding
if isinstance(f_in, int):
self.input_fd = f_in
else:
self.input_fd = f_in.fileno()
if isinstance(f_out, int):
self.output_fd = f_out
else:
self.output_fd = f_out.fileno()
self.pollob = poll()
self.pollob.register(self.input_fd, POLLIN)
curses.setupterm(term, self.output_fd)
self.term = term
self._bel = _my_getstr("bel")
self._civis = _my_getstr("civis", optional=1)
self._clear = _my_getstr("clear")
self._cnorm = _my_getstr("cnorm", optional=1)
self._cub = _my_getstr("cub", optional=1)
self._cub1 = _my_getstr("cub1", 1)
self._cud = _my_getstr("cud", 1)
self._cud1 = _my_getstr("cud1", 1)
self._cuf = _my_getstr("cuf", 1)
self._cuf1 = _my_getstr("cuf1", 1)
self._cup = _my_getstr("cup")
self._cuu = _my_getstr("cuu", 1)
self._cuu1 = _my_getstr("cuu1", 1)
self._dch1 = _my_getstr("dch1", 1)
self._dch = _my_getstr("dch", 1)
self._el = _my_getstr("el")
self._hpa = _my_getstr("hpa", 1)
self._ich = _my_getstr("ich", 1)
self._ich1 = _my_getstr("ich1", 1)
self._ind = _my_getstr("ind", 1)
self._pad = _my_getstr("pad", 1)
self._ri = _my_getstr("ri", 1)
self._rmkx = _my_getstr("rmkx", 1)
self._smkx = _my_getstr("smkx", 1)
## work out how we're going to sling the cursor around
if 0 and self._hpa: # hpa don't work in windows telnet :-(
self.__move_x = self.__move_x_hpa
elif self._cub and self._cuf:
self.__move_x = self.__move_x_cub_cuf
elif self._cub1 and self._cuf1:
self.__move_x = self.__move_x_cub1_cuf1
else:
raise RuntimeError, "insufficient terminal (horizontal)"
if self._cuu and self._cud:
self.__move_y = self.__move_y_cuu_cud
elif self._cuu1 and self._cud1:
self.__move_y = self.__move_y_cuu1_cud1
else:
raise RuntimeError, "insufficient terminal (vertical)"
if self._dch1:
self.dch1 = self._dch1
elif self._dch:
self.dch1 = curses.tparm(self._dch, 1)
else:
self.dch1 = None
if self._ich1:
self.ich1 = self._ich1
elif self._ich:
self.ich1 = curses.tparm(self._ich, 1)
else:
self.ich1 = None
self.__move = self.__move_short
self.event_queue = unix_eventqueue.EventQueue(self.input_fd)
self.partial_char = ''
self.cursor_visible = 1
def change_encoding(self, encoding):
self.encoding = encoding
def refresh(self, screen, cxy):
# this function is still too long (over 90 lines)
if not self.__gone_tall:
while len(self.screen) < min(len(screen), self.height):
self.__hide_cursor()
self.__move(0, len(self.screen) - 1)
self.__write("\n")
self.__posxy = 0, len(self.screen)
self.screen.append("")
else:
while len(self.screen) < len(screen):
self.screen.append("")
if len(screen) > self.height:
self.__gone_tall = 1
self.__move = self.__move_tall
px, py = self.__posxy
old_offset = offset = self.__offset
height = self.height
# we make sure the cursor is on the screen, and that we're
# using all of the screen if we can
cx, cy = cxy
if cy < offset:
offset = cy
elif cy >= offset + height:
offset = cy - height + 1
elif offset > 0 and len(screen) < offset + height:
offset = max(len(screen) - height, 0)
screen.append("")
oldscr = self.screen[old_offset:old_offset + height]
newscr = screen[offset:offset + height]
# use hardware scrolling if we have it.
if old_offset > offset and self._ri:
self.__hide_cursor()
self.__write_code(self._cup, 0, 0)
self.__posxy = 0, old_offset
for i in range(old_offset - offset):
self.__write_code(self._ri)
oldscr.pop(-1)
oldscr.insert(0, "")
elif old_offset < offset and self._ind:
self.__hide_cursor()
self.__write_code(self._cup, self.height - 1, 0)
self.__posxy = 0, old_offset + self.height - 1
for i in range(offset - old_offset):
self.__write_code(self._ind)
oldscr.pop(0)
oldscr.append("")
self.__offset = offset
for _y, oldline, newline, in zip(range(offset, offset + height),
oldscr,
newscr):
if oldline != newline:
self.__write_changed_line(_y, oldline, newline, px)
_y = len(newscr)
while _y < len(oldscr):
self.__hide_cursor()
self.__move(0, _y)
self.__posxy = 0, _y
self.__write_code(self._el)
_y += 1
self.__show_cursor()
self.screen = screen
self.move_cursor(cx, cy)
self.flushoutput()
def __write_changed_line(self, _y, oldline, newline, px):
# this is frustrating; there's no reason to test (say)
# self.dch1 inside the loop -- but alternative ways of
# structuring this function are equally painful (I'm trying to
# avoid writing code generators these days...)
x = 0
minlen = min(len(oldline), len(newline))
#
# reuse the oldline as much as possible, but stop as soon as we
# encounter an ESCAPE, because it might be the start of an escape
# sequene
while x < minlen and oldline[x] == newline[x] and newline[x] != '\x1b':
x += 1
if oldline[x:] == newline[x+1:] and self.ich1:
if ( _y == self.__posxy[1] and x > self.__posxy[0]
and oldline[px:x] == newline[px+1:x+1] ):
x = px
self.__move(x, _y)
self.__write_code(self.ich1)
self.__write(newline[x])
self.__posxy = x + 1, _y
elif x < minlen and oldline[x + 1:] == newline[x + 1:]:
self.__move(x, _y)
self.__write(newline[x])
self.__posxy = x + 1, _y
elif (self.dch1 and self.ich1 and len(newline) == self.width
and x < len(newline) - 2
and newline[x+1:-1] == oldline[x:-2]):
self.__hide_cursor()
self.__move(self.width - 2, _y)
self.__posxy = self.width - 2, _y
self.__write_code(self.dch1)
self.__move(x, _y)
self.__write_code(self.ich1)
self.__write(newline[x])
self.__posxy = x + 1, _y
else:
self.__hide_cursor()
self.__move(x, _y)
if len(oldline) > len(newline):
self.__write_code(self._el)
self.__write(newline[x:])
self.__posxy = len(newline), _y
if '\x1b' in newline:
# ANSI escape characters are present, so we can't assume
# anything about the position of the cursor. Moving the cursor
# to the left margin should work to get to a known position.
self.move_cursor(0, _y)
def __write(self, text):
self.__buffer.append((text, 0))
def __write_code(self, fmt, *args):
self.__buffer.append((curses.tparm(fmt, *args), 1))
def __maybe_write_code(self, fmt, *args):
if fmt:
self.__write_code(fmt, *args)
def __move_y_cuu1_cud1(self, _y):
dy = _y - self.__posxy[1]
if dy > 0:
self.__write_code(dy*self._cud1)
elif dy < 0:
self.__write_code((-dy)*self._cuu1)
def __move_y_cuu_cud(self, _y):
dy = _y - self.__posxy[1]
if dy > 0:
self.__write_code(self._cud, dy)
elif dy < 0:
self.__write_code(self._cuu, -dy)
def __move_x_hpa(self, x):
if x != self.__posxy[0]:
self.__write_code(self._hpa, x)
def __move_x_cub1_cuf1(self, x):
dx = x - self.__posxy[0]
if dx > 0:
self.__write_code(self._cuf1*dx)
elif dx < 0:
self.__write_code(self._cub1*(-dx))
def __move_x_cub_cuf(self, x):
dx = x - self.__posxy[0]
if dx > 0:
self.__write_code(self._cuf, dx)
elif dx < 0:
self.__write_code(self._cub, -dx)
def __move_short(self, x, _y):
self.__move_x(x)
self.__move_y(_y)
def __move_tall(self, x, _y):
assert 0 <= _y - self.__offset < self.height, _y - self.__offset
self.__write_code(self._cup, _y - self.__offset, x)
def move_cursor(self, x, _y):
if _y < self.__offset or _y >= self.__offset + self.height:
self.event_queue.insert(Event('scroll', None))
else:
self.__move(x, _y)
self.__posxy = x, _y
self.flushoutput()
def prepare(self):
# per-readline preparations:
self.__svtermstate = tcgetattr(self.input_fd)
raw = self.__svtermstate.copy()
raw.iflag &=~ (termios.BRKINT | termios.INPCK |
termios.ISTRIP | termios.IXON)
raw.oflag &=~ (termios.OPOST)
raw.cflag &=~ (termios.CSIZE|termios.PARENB)
raw.cflag |= (termios.CS8)
raw.lflag &=~ (termios.ICANON|termios.ECHO|
termios.IEXTEN|(termios.ISIG*1))
raw.cc[termios.VMIN] = 1
raw.cc[termios.VTIME] = 0
tcsetattr(self.input_fd, termios.TCSADRAIN, raw)
self.screen = []
self.height, self.width = self.getheightwidth()
self.__buffer = []
self.__posxy = 0, 0
self.__gone_tall = 0
self.__move = self.__move_short
self.__offset = 0
self.__maybe_write_code(self._smkx)
try:
self.old_sigwinch = signal.signal(
signal.SIGWINCH, self.__sigwinch)
except ValueError:
pass
def restore(self):
self.__maybe_write_code(self._rmkx)
self.flushoutput()
tcsetattr(self.input_fd, termios.TCSADRAIN, self.__svtermstate)
if hasattr(self, 'old_sigwinch'):
signal.signal(signal.SIGWINCH, self.old_sigwinch)
del self.old_sigwinch
def __sigwinch(self, signum, frame):
self.height, self.width = self.getheightwidth()
self.event_queue.insert(Event('resize', None))
def push_char(self, char):
self.partial_char += char
try:
c = unicode(self.partial_char, self.encoding)
except UnicodeError, e:
if len(e.args) > 4 and \
e.args[4] == 'unexpected end of data':
pass
else:
# was: "raise". But it crashes pyrepl, and by extension the
# pypy currently running, in which we are e.g. in the middle
# of some debugging session. Argh. Instead just print an
# error message to stderr and continue running, for now.
self.partial_char = ''
sys.stderr.write('\n%s: %s\n' % (e.__class__.__name__, e))
else:
self.partial_char = ''
self.event_queue.push(c)
def get_event(self, block=1):
while self.event_queue.empty():
while 1: # All hail Unix!
try:
self.push_char(os.read(self.input_fd, 1))
except (IOError, OSError), err:
if err.errno == errno.EINTR:
if not self.event_queue.empty():
return self.event_queue.get()
else:
continue
else:
raise
else:
break
if not block:
break
return self.event_queue.get()
def wait(self):
self.pollob.poll()
def set_cursor_vis(self, vis):
if vis:
self.__show_cursor()
else:
self.__hide_cursor()
def __hide_cursor(self):
if self.cursor_visible:
self.__maybe_write_code(self._civis)
self.cursor_visible = 0
def __show_cursor(self):
if not self.cursor_visible:
self.__maybe_write_code(self._cnorm)
self.cursor_visible = 1
def repaint_prep(self):
if not self.__gone_tall:
self.__posxy = 0, self.__posxy[1]
self.__write("\r")
ns = len(self.screen)*['\000'*self.width]
self.screen = ns
else:
self.__posxy = 0, self.__offset
self.__move(0, self.__offset)
ns = self.height*['\000'*self.width]
self.screen = ns
if TIOCGWINSZ:
def getheightwidth(self):
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(self.input_fd, TIOCGWINSZ, "\000"*8))[0:2]
if not height: return 25, 80
return height, width
else:
def getheightwidth(self):
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
return 25, 80
def forgetinput(self):
termios.tcflush(self.input_fd, termios.TCIFLUSH)
def flushoutput(self):
for text, iscode in self.__buffer:
if iscode:
self.__tputs(text)
else:
os.write(self.output_fd, text.encode(self.encoding, 'replace'))
del self.__buffer[:]
def __tputs(self, fmt, prog=delayprog):
"""A Python implementation of the curses tputs function; the
curses one can't really be wrapped in a sane manner.
I have the strong suspicion that this is complexity that
will never do anyone any good."""
# using .get() means that things will blow up
# only if the bps is actually needed (which I'm
# betting is pretty unlkely)
bps = ratedict.get(self.__svtermstate.ospeed)
while 1:
m = prog.search(fmt)
if not m:
os.write(self.output_fd, fmt)
break
x, _y = m.span()
os.write(self.output_fd, fmt[:x])
fmt = fmt[_y:]
delay = int(m.group(1))
if '*' in m.group(2):
delay *= self.height
if self._pad:
nchars = (bps*delay)/1000
os.write(self.output_fd, self._pad*nchars)
else:
time.sleep(float(delay)/1000.0)
def finish(self):
_y = len(self.screen) - 1
while _y >= 0 and not self.screen[_y]:
_y -= 1
self.__move(0, min(_y, self.height + self.__offset - 1))
self.__write("\n\r")
self.flushoutput()
def beep(self):
self.__maybe_write_code(self._bel)
self.flushoutput()
if FIONREAD:
def getpending(self):
e = Event('key', '', '')
while not self.event_queue.empty():
e2 = self.event_queue.get()
e.data += e2.data
e.raw += e.raw
amount = struct.unpack(
"i", ioctl(self.input_fd, FIONREAD, "\0\0\0\0"))[0]
raw = unicode(os.read(self.input_fd, amount), self.encoding, 'replace')
e.data += raw
e.raw += raw
return e
else:
def getpending(self):
e = Event('key', '', '')
while not self.event_queue.empty():
e2 = self.event_queue.get()
e.data += e2.data
e.raw += e.raw
amount = 10000
raw = unicode(os.read(self.input_fd, amount), self.encoding, 'replace')
e.data += raw
e.raw += raw
return e
def clear(self):
self.__write_code(self._clear)
self.__gone_tall = 1
self.__move = self.__move_tall
self.__posxy = 0, 0
self.screen = []
| 34.033392 | 83 | 0.552905 |
9f452377f3e83ba3f42bea1df3b8eea68f1c57df | 5,063 | py | Python | examples/plot_lcmv_decoding.py | wmvanvliet/workbench | 3f299f820fdc8e4fb9bf3db31a50cb590040cead | [
"BSD-3-Clause"
] | null | null | null | examples/plot_lcmv_decoding.py | wmvanvliet/workbench | 3f299f820fdc8e4fb9bf3db31a50cb590040cead | [
"BSD-3-Clause"
] | null | null | null | examples/plot_lcmv_decoding.py | wmvanvliet/workbench | 3f299f820fdc8e4fb9bf3db31a50cb590040cead | [
"BSD-3-Clause"
] | null | null | null | #encoding: utf-8
"""
Decoding with a spatio-temporal LCMV beamformer
===============================================
This example will demonstrate a simple decoder based on an LCMV beamformer
applied to the MNE-Sample dataset. This dataset contains MEG recordings of a
subject being presented with audio beeps on either the left or right side of
the head.
This approach is further documented in van Vliet et al. 2016 [1]_.
"""
###############################################################################
# First, some required Python modules and loading the data:
import numpy as np
import mne
from posthoc import Beamformer
from posthoc.cov_estimators import ShrinkageKernel
from sklearn.model_selection import StratifiedKFold
from sklearn import metrics
from matplotlib import pyplot as plt
mne.set_log_level(False) # Be very very quiet
path = mne.datasets.sample.data_path()
raw = mne.io.read_raw_fif('{path}/MEG/sample/sample_audvis_raw.fif',
preload=True)
events = mne.find_events(raw)
event_id = dict(left=1, right=2)
raw.pick_types(meg='grad')
raw.filter(None, 20)
raw, events = raw.resample(50, events=events)
epochs = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5,
baseline=(-0.2, 0), preload=True)
###############################################################################
# The ``post-hoc`` package uses a scikit-learn style API. We must translate
# the MNE-Python ``epochs`` object into scikit-learn style ``X`` and ``y``
# matrices.
X = epochs.get_data().reshape(len(epochs), -1)
y = epochs.events[:, 2]
# Split the data in a train and test set
folds = StratifiedKFold(n_splits=2)
train_index, test_index = next(folds.split(X, y))
X_train, y_train = X[train_index], y[train_index]
X_test, y_test = X[test_index], y[test_index]
###############################################################################
# We will now use some of the epochs to construct a template of the difference
# between the 'left' and 'right' reponses. For this, we compute the "evoked
# potential" for the left and right beeps. The contrast between these
# conditions will serve as our template. This template is then further refined
# with a Hanning window to focus on a specific part of the evoked potential.
evoked_left = X_train[y_train == 1].mean(axis=0)
evoked_right = X_train[y_train == 2].mean(axis=0)
template = evoked_left - evoked_right
# This creates a (channels x time) view of the template
template_ch_time = template.reshape(epochs.info['nchan'], -1)
# Plot the template
plt.figure()
plt.plot(epochs.times, template_ch_time.T, color='black', alpha=0.2)
plt.xlabel('Time (s)')
plt.title('Original template')
###############################################################################
# The template is quite noisy. The main distinctive feature between the
# conditions should be the auditory evoked potential around 0.05 seconds.
# Let's create a Hanning window to limit our template to just the evoked
# potential.
center = np.searchsorted(epochs.times, 0.05)
width = 10
window = np.zeros(len(epochs.times))
window[center - width // 2: center + width // 2] = np.hanning(width)
template_ch_time *= window[np.newaxis, :]
# Plot the refined template
plt.figure()
plt.plot(epochs.times, template_ch_time.T, color='black', alpha=0.2)
plt.xlabel('Time (s)')
plt.title('Refined template')
###############################################################################
# Now, we make an LCMV beamformer based on the template. We apply heavy
# shrinkage regularization to the covariance matrix to deal with the fact that
# we have so few trials and a huge number of features:
# (203 channels x 50 time points = 7308)
beamformer = Beamformer(template, cov=ShrinkageKernel(1.0)).fit(X_train)
# Decode the test data
y_hat = beamformer.predict(X_test).ravel()
# Visualize the output of the LCMV beamformer
y_left = y_hat[y_test == 1]
y_right = y_hat[y_test == 2]
lim = np.max(np.abs(y_hat))
plt.figure()
plt.scatter(np.arange(len(y_left)), y_left)
plt.scatter(np.arange(len(y_left), len(y_left) + len(y_right)), y_right)
plt.legend(['left', 'right'])
plt.axhline(0, color='gray')
plt.ylim(-lim, lim)
plt.xlabel('Epochs')
plt.ylabel('Beamformer output')
# Assign the 'left' class to values above 0 and 'right' to values below 0
y_bin = np.zeros(len(y_hat), dtype=int)
y_bin[y_hat >= 0] = 1
y_bin[y_hat < 0] = 2
###############################################################################
# So, how did we do? What percentage of the epochs did we decode correctly?
print('LCMV accuracy: %.2f%%' % (100 * metrics.accuracy_score(y_test, y_bin)))
###############################################################################
# References
# ----------
#
# .. [1] van Vliet, M., Chumerin, N., De Deyne, S., Wiersema, J. R., Fias, W.,
# Storms, G., & Van Hulle, M. M. (2016). Single-trial ERP component
# analysis using a spatiotemporal LCMV beamformer. IEEE Transactions on
# Biomedical Engineering, 63(1), 55–66.
# https://doi.org/10.1109/TBME.2015.2468588
| 39.554688 | 79 | 0.639937 |
06f8745efe290149be73d6a4f53485386e6d4a44 | 8,458 | py | Python | regionator/parse_object_db.py | rootlabs/neohabitat | 0600dbee95f6cf95cd24e5615d4f15af6508ac6d | [
"MIT"
] | null | null | null | regionator/parse_object_db.py | rootlabs/neohabitat | 0600dbee95f6cf95cd24e5615d4f15af6508ac6d | [
"MIT"
] | null | null | null | regionator/parse_object_db.py | rootlabs/neohabitat | 0600dbee95f6cf95cd24e5615d4f15af6508ac6d | [
"MIT"
] | null | null | null | '''
Parse the MC_object database from the Habitat Stratus backup.
There are still lots of unknowns:
* Many objects have container 0x20202020. They appear to be unused, but it's
unclear why.
* Some address strings have unprintable characters. It's unclear if this
was intentional or garbage data.
* Matchbook (class 49): there are 3 objects of this type, but they appear
to be overwritten or otherwise unused.
* When combined with MC_regions, we find lots of orphaned objects. This may
be because of broken relationships. Some appear to be pockets of avatars.
'''
import json, struct, sys
from collections import OrderedDict
STRUCT_ITEMS = (
'id',
'class',
'container',
'contype',
'x_pos',
'y_pos',
'style',
'gr_state',
'orientation',
'gr_width',
'nitty_bits',
'prop_length',
'property_data',
)
FORMAT = '> 3I 7H I 10x H 86s'
assert struct.calcsize(FORMAT) == 128
PARSERS = {
2: ('>HI', ['magic_type', 'magic_data']),
129: ('>H', ['state']),
6: ('>HW', ['open_flags', 'key']),
130: ('>H', ['open_flags']),
10: ('>HIH', ['current_page', 'text_id', 'last_page']),
12: ('>H', ['filled']),
13: ('>HW', ['open_flags', 'key']),
131: ('>HH', ['width', 'length']),
132: ('>xxxxxxi', ['connection']),
158: ('>H', ['open_flags']),
134: ('>H', ['open_flags']),
135: ('>HW', ['open_flags', 'key']),
136: ('>I', ['take']),
137: ('>H', ['open_flags']),
18: ('>HW', ['open_flags', 'key']), # + whoput array
20: ('>H', ['live']),
21: ('>H', ['state']),
22: ('>HWIH', ['open_flags', 'key', 'owner', 'locked']),
23: ('>HWi', ['open_flags', 'key', 'connection']),
25: ('>HH', ['count', 'effect']),
28: ('>HI20s', ['state', 'take', 'address']),
26: ('>H', ['charge']),
27: ('>H', ['state']),
29: ('>H', ['mass']),
30: ('>H', ['on']),
93: ('>H', ['flat_type']),
139: ('>H', ['on']),
140: ('>I', ['take']),
141: ('>H', ['live']),
5: ('>H', ['state']),
32: ('>HW', ['open_flags', 'key']),
33: ('>HI', ['magic_type', 'magic_data']),
98: ('>HWHHHHHHHHHHHH', ['open_flags', 'key', 'x_offset_1', 'y_offset_1',
'x_offset_2', 'y_offset_2', 'x_offset_3', 'y_offset_3', 'x_offset_4',
'y_offset_4', 'x_offset_5', 'y_offset_5', 'x_offset_6', 'y_offset_6']),
35: ('>H', ['pinpulled']),
38: ('>H', ['state']),
88: ('>HW', ['open_flags', 'key']),
40: ('>H', ['instant_what']),
42: ('>W', ['key_number']),
43: ('>H', ['is_magic']),
45: ('>HHxxxxH', ['lamp_state', 'wisher', 'live']),
46: ('>HI', ['magic_type', 'magic_data']),
48: ('>HI', ['mail_arrived', 'owner']),
# XXX can't find valid example to decode varstring properly
#49: ('>84s', ['mtext']),
52: ('>H', ['on']),
54: ('>I', ['text_id']),
96: ('>HW', ['open_flags', 'key']),
152: ('>HH', ['mass', 'picture']),
58: ('>H', ['mass']),
55: ('>HIH', ['current_page', 'text_id', 'last_page']),
60: ('>HI', ['magic_type', 'magic_data']),
61: ('>H', ['mass']),
149: ('>HH', ['base', 'pattern']),
150: ('>HW', ['open_flags', 'key']),
63: ('>H', ['on']),
64: ('>H', ['scan_type']),
#56: short sign, handled below
#57: sign, handled below
95: ('>H', ['charge']),
70: ('>HH', ['on', 'tape']),
153: ('>HH', ['width', 'height']),
92: ('>HHHHHHHH', ['trapezoid_type', 'upper_left_x', 'upper_right_x',
'lower_left_x', 'lower_right_x', 'height',
'pattern_x_size','pattern_y_size']), # + pattern array
97: ('>HI', ['magic_type', 'magic_data']),
155: ('>HW', ['open_flags', 'key']),
74: ('>HI20s', ['state', 'take', 'address']),
75: ('>H', ['event']),
76: ('>W', ['denom']),
87: ('>HHHHHH', ['trapezoid_type', 'upper_left_x', 'upper_right_x',
'lower_left_x', 'lower_right_x', 'height']),
85: ('>HWHH', ['open_flags', 'key', 'item_price',
'display_item']), # + prices array
86: ('>HW', ['open_flags', 'key']),
80: ('>HH', ['length', 'height', 'pattern']),
82: ('>H', ['wind_level']),
}
def decode_properties(buf, fmt, keys):
'''
Parse the properties from the given byte buffer, using the format string
and names of keys for each item in the format string. Returns a dict
of name/value pairs for all keys.
'''
fat_words = []
# Handle fatwords, which are 16-bits stored as 00 xx 00 yy.
if 'W' in fmt:
# Hack: our fatword handling doesn't count repeated format strings
idx = fmt.index('W')
if fmt[:idx].isdigit():
raise ValueError('cant handle format strings with numbers')
base = 1 if not fmt[0].isalpha() else 0
fmt_chars = []
for i, c in enumerate(fmt):
if c == 'W':
c = 'I'
fat_words.append(keys[i - base])
fmt_chars.append(c)
fmt = ''.join(fmt_chars)
data = OrderedDict(zip(
keys,
struct.unpack(fmt, buf[:struct.calcsize(fmt)])))
# Replace each fat word with its actual value
for name in fat_words:
data[name] = ((data[name] >> 8) & 0xff00) | (data[name] & 0xff)
return data
def parse_array(buf, fmt, count):
'''
Unpack a number of same-sized items into an array
'''
items = []
item_size = struct.calcsize(fmt)
for i in range(count):
items += struct.unpack(fmt, buf[i * item_size:(i + 1) * item_size])
return items
def decode_text(buf):
'''
Decode a word-packed string (00 x 00 y ...), which is similar to a
fatword but is a string instead of int.
'''
string = ''
for i in range(1, len(buf), 2):
string += buf[i]
return string
def parse_properties(cls, property_data):
'''
Decode basic properties and then class-specific ones
'''
data = OrderedDict()
args = PARSERS.get(cls)
if args:
data.update(decode_properties(property_data, *args))
remainder_off = struct.calcsize(args[0].replace('W', 'I'))
# Special class decoders for those not fully handled above
if cls == 56:
# short sign
data['text'] = [
ord(c)
for c in decode_text(property_data[:10 * 2])
]
elif cls == 57:
# sign
data['text'] = [
ord(c)
for c in decode_text(property_data[:40 * 2])
]
elif cls == 18:
# countertop: whoput = 5 ints
n = 5
data['whoput'] = parse_array(
property_data[remainder_off:remainder_off + n * 4],
'>I',
n)
elif cls == 92:
# super trapezoid: pattern = 32 halfwords
n = 32
data['pattern'] = parse_array(
property_data[remainder_off:remainder_off + n * 4],
'>H',
n)
elif cls == 85:
# vendo front: prices = 10 halfwords
n = 10
data['prices'] = parse_array(
property_data[remainder_off:remainder_off + n * 4],
'>H',
n)
return data
def decode_row(row):
'''
Parse a single row and return a dict of the items
'''
data = OrderedDict(zip(STRUCT_ITEMS, struct.unpack(FORMAT, row)))
data.update(parse_properties(data['class'], data['property_data']))
# Debug-dump the Matchbook class
#if data['class'] == 49:
# print ' '.join('%02x' % ord(c) for c in row)
# print data
# These fields tend to be all padding for many objects.
# Maybe these were deleted or superseded?
data['deleted'] = (data['container'] == 0x20202020 and
data['contype'] == 0x2020)
# Always remove the raw property bytes, which we've decoded
del data['property_data']
# Clear text data if it's unprintable
addr_str = data.get('address')
if addr_str and any(ord(c) >= 0x80 for c in addr_str):
#print ' '.join('%02x' % ord(c) for c in row)
#print data
data['address'] = ''
return data
def main():
'''
Read each row from database and then decode it, dumping output to JSON
'''
items = []
with open(sys.argv[1]) as fp:
while True:
row = fp.read(struct.calcsize(FORMAT))
if not row:
break
items.append(decode_row(row))
with open(sys.argv[2], 'w') as fp:
json.dump(items, fp, indent=2)
if __name__ == '__main__':
main()
| 31.095588 | 79 | 0.537952 |
1e9d1c8bf1e53ebc7c858c51bab0fccc892933dd | 29,818 | py | Python | openstackclient/tests/identity/v3/test_project.py | redhat-openstack/python-openstackclient | 7dc2e1dc08b0692a3accb343c62451fb3d83f4cd | [
"Apache-2.0"
] | null | null | null | openstackclient/tests/identity/v3/test_project.py | redhat-openstack/python-openstackclient | 7dc2e1dc08b0692a3accb343c62451fb3d83f4cd | [
"Apache-2.0"
] | null | null | null | openstackclient/tests/identity/v3/test_project.py | redhat-openstack/python-openstackclient | 7dc2e1dc08b0692a3accb343c62451fb3d83f4cd | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import mock
from openstackclient.common import exceptions
from openstackclient.identity.v3 import project
from openstackclient.tests import fakes
from openstackclient.tests.identity.v3 import fakes as identity_fakes
class TestProject(identity_fakes.TestIdentityv3):
def setUp(self):
super(TestProject, self).setUp()
# Get a shortcut to the DomainManager Mock
self.domains_mock = self.app.client_manager.identity.domains
self.domains_mock.reset_mock()
# Get a shortcut to the ProjectManager Mock
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.reset_mock()
class TestProjectCreate(TestProject):
columns = (
'description',
'domain_id',
'enabled',
'id',
'name'
)
datalist = (
identity_fakes.project_description,
identity_fakes.domain_id,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
def setUp(self):
super(TestProjectCreate, self).setUp()
self.domains_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.DOMAIN),
loaded=True,
)
self.projects_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = project.CreateProject(self.app, None)
def test_project_create_no_options(self):
arglist = [
identity_fakes.project_name,
]
verifylist = [
('parent', None),
('enable', False),
('disable', False),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': identity_fakes.project_name,
'domain': None,
'description': None,
'enabled': True,
'parent': None,
}
# ProjectManager.create(name=, domain=, description=,
# enabled=, **kwargs)
self.projects_mock.create.assert_called_with(
**kwargs
)
collist = ('description', 'domain_id', 'enabled', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.project_description,
identity_fakes.domain_id,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(datalist, data)
def test_project_create_description(self):
arglist = [
'--description', 'new desc',
identity_fakes.project_name,
]
verifylist = [
('description', 'new desc'),
('enable', False),
('disable', False),
('name', identity_fakes.project_name),
('parent', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': identity_fakes.project_name,
'domain': None,
'description': 'new desc',
'enabled': True,
'parent': None,
}
# ProjectManager.create(name=, domain=, description=,
# enabled=, **kwargs)
self.projects_mock.create.assert_called_with(
**kwargs
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
def test_project_create_domain(self):
arglist = [
'--domain', identity_fakes.domain_name,
identity_fakes.project_name,
]
verifylist = [
('domain', identity_fakes.domain_name),
('enable', False),
('disable', False),
('name', identity_fakes.project_name),
('parent', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': identity_fakes.project_name,
'domain': identity_fakes.domain_id,
'description': None,
'enabled': True,
'parent': None,
}
# ProjectManager.create(name=, domain=, description=,
# enabled=, **kwargs)
self.projects_mock.create.assert_called_with(
**kwargs
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
def test_project_create_domain_no_perms(self):
arglist = [
'--domain', identity_fakes.domain_id,
identity_fakes.project_name,
]
verifylist = [
('domain', identity_fakes.domain_id),
('enable', False),
('disable', False),
('name', identity_fakes.project_name),
('parent', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
mocker = mock.Mock()
mocker.return_value = None
with mock.patch("openstackclient.common.utils.find_resource", mocker):
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': identity_fakes.project_name,
'domain': identity_fakes.domain_id,
'description': None,
'enabled': True,
'parent': None,
}
self.projects_mock.create.assert_called_with(
**kwargs
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
def test_project_create_enable(self):
arglist = [
'--enable',
identity_fakes.project_name,
]
verifylist = [
('enable', True),
('disable', False),
('name', identity_fakes.project_name),
('parent', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': identity_fakes.project_name,
'domain': None,
'description': None,
'enabled': True,
'parent': None,
}
# ProjectManager.create(name=, domain=, description=,
# enabled=, **kwargs)
self.projects_mock.create.assert_called_with(
**kwargs
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
def test_project_create_disable(self):
arglist = [
'--disable',
identity_fakes.project_name,
]
verifylist = [
('enable', False),
('disable', True),
('name', identity_fakes.project_name),
('parent', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': identity_fakes.project_name,
'domain': None,
'description': None,
'enabled': False,
'parent': None,
}
# ProjectManager.create(name=, domain=,
# description=, enabled=, **kwargs)
self.projects_mock.create.assert_called_with(
**kwargs
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
def test_project_create_property(self):
arglist = [
'--property', 'fee=fi',
'--property', 'fo=fum',
identity_fakes.project_name,
]
verifylist = [
('property', {'fee': 'fi', 'fo': 'fum'}),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': identity_fakes.project_name,
'domain': None,
'description': None,
'enabled': True,
'parent': None,
'fee': 'fi',
'fo': 'fum',
}
# ProjectManager.create(name=, domain=, description=,
# enabled=, **kwargs)
self.projects_mock.create.assert_called_with(
**kwargs
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
def test_project_create_parent(self):
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.projects_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT_WITH_PARENT),
loaded=True,
)
arglist = [
'--domain', identity_fakes.PROJECT_WITH_PARENT['domain_id'],
'--parent', identity_fakes.PROJECT['name'],
identity_fakes.PROJECT_WITH_PARENT['name'],
]
verifylist = [
('domain', identity_fakes.PROJECT_WITH_PARENT['domain_id']),
('parent', identity_fakes.PROJECT['name']),
('enable', False),
('disable', False),
('name', identity_fakes.PROJECT_WITH_PARENT['name']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
kwargs = {
'name': identity_fakes.PROJECT_WITH_PARENT['name'],
'domain': identity_fakes.PROJECT_WITH_PARENT['domain_id'],
'parent': identity_fakes.PROJECT['id'],
'description': None,
'enabled': True,
}
self.projects_mock.create.assert_called_with(
**kwargs
)
collist = (
'description',
'domain_id',
'enabled',
'id',
'name',
'parent_id',
)
self.assertEqual(columns, collist)
datalist = (
identity_fakes.PROJECT_WITH_PARENT['description'],
identity_fakes.PROJECT_WITH_PARENT['domain_id'],
identity_fakes.PROJECT_WITH_PARENT['enabled'],
identity_fakes.PROJECT_WITH_PARENT['id'],
identity_fakes.PROJECT_WITH_PARENT['name'],
identity_fakes.PROJECT['id'],
)
self.assertEqual(data, datalist)
def test_project_create_invalid_parent(self):
self.projects_mock.resource_class.__name__ = 'Project'
self.projects_mock.get.side_effect = exceptions.NotFound(
'Invalid parent')
self.projects_mock.find.side_effect = exceptions.NotFound(
'Invalid parent')
arglist = [
'--domain', identity_fakes.domain_name,
'--parent', 'invalid',
identity_fakes.project_name,
]
verifylist = [
('domain', identity_fakes.domain_name),
('parent', 'invalid'),
('enable', False),
('disable', False),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args,
)
class TestProjectDelete(TestProject):
def setUp(self):
super(TestProjectDelete, self).setUp()
# This is the return value for utils.find_resource()
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.projects_mock.delete.return_value = None
# Get the command object to test
self.cmd = project.DeleteProject(self.app, None)
def test_project_delete_no_options(self):
arglist = [
identity_fakes.project_id,
]
verifylist = [
('projects', [identity_fakes.project_id]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.projects_mock.delete.assert_called_with(
identity_fakes.project_id,
)
self.assertIsNone(result)
class TestProjectList(TestProject):
columns = (
'ID',
'Name',
)
datalist = (
(
identity_fakes.project_id,
identity_fakes.project_name,
),
)
def setUp(self):
super(TestProjectList, self).setUp()
self.projects_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
),
]
# Get the command object to test
self.cmd = project.ListProject(self.app, None)
def test_project_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.list.assert_called_with()
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
def test_project_list_long(self):
arglist = [
'--long',
]
verifylist = [
('long', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.list.assert_called_with()
collist = ('ID', 'Name', 'Domain ID', 'Description', 'Enabled')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.project_id,
identity_fakes.project_name,
identity_fakes.domain_id,
identity_fakes.project_description,
True,
), )
self.assertEqual(datalist, tuple(data))
def test_project_list_domain(self):
arglist = [
'--domain', identity_fakes.domain_name,
]
verifylist = [
('domain', identity_fakes.domain_name),
]
self.domains_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.DOMAIN),
loaded=True,
)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.list.assert_called_with(
domain=identity_fakes.domain_id)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
def test_project_list_domain_no_perms(self):
arglist = [
'--domain', identity_fakes.domain_id,
]
verifylist = [
('domain', identity_fakes.domain_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
mocker = mock.Mock()
mocker.return_value = None
with mock.patch("openstackclient.common.utils.find_resource", mocker):
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.list.assert_called_with(
domain=identity_fakes.domain_id)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
class TestProjectSet(TestProject):
def setUp(self):
super(TestProjectSet, self).setUp()
self.domains_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.DOMAIN),
loaded=True,
)
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.projects_mock.update.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = project.SetProject(self.app, None)
def test_project_set_no_options(self):
arglist = [
identity_fakes.project_name,
]
verifylist = [
('project', identity_fakes.project_name),
('enable', False),
('disable', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
def test_project_set_name(self):
arglist = [
'--name', 'qwerty',
'--domain', identity_fakes.domain_id,
identity_fakes.project_name,
]
verifylist = [
('name', 'qwerty'),
('domain', identity_fakes.domain_id),
('enable', False),
('disable', False),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'name': 'qwerty',
}
# ProjectManager.update(project, name=, domain=, description=,
# enabled=, **kwargs)
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
self.assertIsNone(result)
def test_project_set_description(self):
arglist = [
'--domain', identity_fakes.domain_id,
'--description', 'new desc',
identity_fakes.project_name,
]
verifylist = [
('domain', identity_fakes.domain_id),
('description', 'new desc'),
('enable', False),
('disable', False),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': 'new desc',
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
self.assertIsNone(result)
def test_project_set_enable(self):
arglist = [
'--domain', identity_fakes.domain_id,
'--enable',
identity_fakes.project_name,
]
verifylist = [
('domain', identity_fakes.domain_id),
('enable', True),
('disable', False),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'enabled': True,
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
self.assertIsNone(result)
def test_project_set_disable(self):
arglist = [
'--domain', identity_fakes.domain_id,
'--disable',
identity_fakes.project_name,
]
verifylist = [
('domain', identity_fakes.domain_id),
('enable', False),
('disable', True),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'enabled': False,
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
self.assertIsNone(result)
def test_project_set_property(self):
arglist = [
'--domain', identity_fakes.domain_id,
'--property', 'fee=fi',
'--property', 'fo=fum',
identity_fakes.project_name,
]
verifylist = [
('domain', identity_fakes.domain_id),
('property', {'fee': 'fi', 'fo': 'fum'}),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'fee': 'fi',
'fo': 'fum',
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
self.assertIsNone(result)
class TestProjectShow(TestProject):
def setUp(self):
super(TestProjectShow, self).setUp()
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = project.ShowProject(self.app, None)
def test_project_show(self):
arglist = [
identity_fakes.project_id,
]
verifylist = [
('project', identity_fakes.project_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.get.assert_called_with(
identity_fakes.project_id,
parents_as_list=False,
subtree_as_list=False,
)
collist = ('description', 'domain_id', 'enabled', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.project_description,
identity_fakes.domain_id,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(datalist, data)
def test_project_show_parents(self):
project = copy.deepcopy(identity_fakes.PROJECT_WITH_GRANDPARENT)
project['parents'] = identity_fakes.grandparents
self.projects_mock.get.return_value = fakes.FakeResource(
None,
project,
loaded=True,
)
arglist = [
identity_fakes.PROJECT_WITH_GRANDPARENT['id'],
'--parents',
]
verifylist = [
('project', identity_fakes.PROJECT_WITH_GRANDPARENT['id']),
('parents', True),
('children', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.get.assert_called_with(
identity_fakes.PROJECT_WITH_GRANDPARENT['id'],
parents_as_list=True,
subtree_as_list=False,
)
collist = (
'description',
'domain_id',
'enabled',
'id',
'name',
'parent_id',
'parents',
)
self.assertEqual(columns, collist)
datalist = (
identity_fakes.PROJECT_WITH_GRANDPARENT['description'],
identity_fakes.PROJECT_WITH_GRANDPARENT['domain_id'],
identity_fakes.PROJECT_WITH_GRANDPARENT['enabled'],
identity_fakes.PROJECT_WITH_GRANDPARENT['id'],
identity_fakes.PROJECT_WITH_GRANDPARENT['name'],
identity_fakes.PROJECT_WITH_GRANDPARENT['parent_id'],
identity_fakes.ids_for_parents_and_grandparents,
)
self.assertEqual(data, datalist)
def test_project_show_subtree(self):
project = copy.deepcopy(identity_fakes.PROJECT_WITH_PARENT)
project['subtree'] = identity_fakes.children
self.projects_mock.get.return_value = fakes.FakeResource(
None,
project,
loaded=True,
)
arglist = [
identity_fakes.PROJECT_WITH_PARENT['id'],
'--children',
]
verifylist = [
('project', identity_fakes.PROJECT_WITH_PARENT['id']),
('parents', False),
('children', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.get.assert_called_with(
identity_fakes.PROJECT_WITH_PARENT['id'],
parents_as_list=False,
subtree_as_list=True,
)
collist = (
'description',
'domain_id',
'enabled',
'id',
'name',
'parent_id',
'subtree',
)
self.assertEqual(columns, collist)
datalist = (
identity_fakes.PROJECT_WITH_PARENT['description'],
identity_fakes.PROJECT_WITH_PARENT['domain_id'],
identity_fakes.PROJECT_WITH_PARENT['enabled'],
identity_fakes.PROJECT_WITH_PARENT['id'],
identity_fakes.PROJECT_WITH_PARENT['name'],
identity_fakes.PROJECT_WITH_PARENT['parent_id'],
identity_fakes.ids_for_children,
)
self.assertEqual(data, datalist)
def test_project_show_parents_and_children(self):
project = copy.deepcopy(identity_fakes.PROJECT_WITH_PARENT)
project['subtree'] = identity_fakes.children
project['parents'] = identity_fakes.parents
self.projects_mock.get.return_value = fakes.FakeResource(
None,
project,
loaded=True,
)
arglist = [
identity_fakes.PROJECT_WITH_PARENT['id'],
'--parents',
'--children',
]
verifylist = [
('project', identity_fakes.PROJECT_WITH_PARENT['id']),
('parents', True),
('children', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.get.assert_called_with(
identity_fakes.PROJECT_WITH_PARENT['id'],
parents_as_list=True,
subtree_as_list=True,
)
collist = (
'description',
'domain_id',
'enabled',
'id',
'name',
'parent_id',
'parents',
'subtree',
)
self.assertEqual(columns, collist)
datalist = (
identity_fakes.PROJECT_WITH_PARENT['description'],
identity_fakes.PROJECT_WITH_PARENT['domain_id'],
identity_fakes.PROJECT_WITH_PARENT['enabled'],
identity_fakes.PROJECT_WITH_PARENT['id'],
identity_fakes.PROJECT_WITH_PARENT['name'],
identity_fakes.PROJECT_WITH_PARENT['parent_id'],
identity_fakes.ids_for_parents,
identity_fakes.ids_for_children,
)
self.assertEqual(data, datalist)
| 32.166127 | 79 | 0.574049 |
b8bf7b390f3371e0d1e26f3ef7f13b6c3bea39f0 | 749 | py | Python | tests/wait_server.py | Uxio0/psycogreen | 6674b71955f0fb1b526beb0cd4aebd8d1c42f4d0 | [
"BSD-3-Clause"
] | 65 | 2020-02-23T07:04:45.000Z | 2022-03-08T07:23:28.000Z | tests/wait_server.py | Uxio0/psycogreen | 6674b71955f0fb1b526beb0cd4aebd8d1c42f4d0 | [
"BSD-3-Clause"
] | 4 | 2020-07-20T09:17:49.000Z | 2021-10-14T08:39:25.000Z | tests/wait_server.py | Uxio0/psycogreen | 6674b71955f0fb1b526beb0cd4aebd8d1c42f4d0 | [
"BSD-3-Clause"
] | 3 | 2020-03-12T12:02:18.000Z | 2022-01-08T21:26:41.000Z | #!/usr/bin/env python
"""A server to test with blocking I/O."""
# Copyright (C) 2010-2020 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# All rights reserved. See COPYING file for details.
from __future__ import print_function
import time
from wsgiref.simple_server import make_server
def wait_app(environ, start_response):
"""An application serving blocking pages."""
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
try:
secs = int(environ['PATH_INFO'].replace('/', ''))
except Exception:
secs = 0
time.sleep(secs)
return [str(secs).encode('ascii')]
httpd = make_server('', 8000, wait_app)
print("Serving on port 8000...")
httpd.serve_forever()
| 24.16129 | 71 | 0.683578 |
da2125b5c553c284e6859f4cbb50f0832750ee9c | 426 | py | Python | 07_07_scrolling.py | bolivaralejandro/prog_pi_ed2- | 656705c70a0334c915409dd8ec8a3593cb092b72 | [
"MIT"
] | 26 | 2015-04-28T14:34:14.000Z | 2021-12-03T21:29:29.000Z | 07_07_scrolling.py | bolivaralejandro/prog_pi_ed2- | 656705c70a0334c915409dd8ec8a3593cb092b72 | [
"MIT"
] | null | null | null | 07_07_scrolling.py | bolivaralejandro/prog_pi_ed2- | 656705c70a0334c915409dd8ec8a3593cb092b72 | [
"MIT"
] | 27 | 2015-09-06T16:45:33.000Z | 2021-03-26T15:58:51.000Z | #07_07_scrolling.py
from tkinter import *
class App:
def __init__(self, master):
scrollbar = Scrollbar(master)
scrollbar.pack(side=RIGHT, fill=Y)
text = Text(master, yscrollcommand=scrollbar.set)
text.pack(side=LEFT, fill=BOTH)
text.insert(END, 'word ' * 1000)
scrollbar.config(command=text.yview)
root = Tk()
root.wm_title('Scrolling')
app = App(root)
root.mainloop()
| 22.421053 | 57 | 0.65493 |
af0d6ac994677e3a7505fc88c1ca3bc27aa2c02e | 6,458 | py | Python | ssc/DR_random.py | cdslabamotong/USCO-Solver | 6fd61cf03db44ebc778bcf51b546f37f431e5ae9 | [
"MIT"
] | 1 | 2021-12-09T04:24:25.000Z | 2021-12-09T04:24:25.000Z | ssc/DR_random.py | cdslabamotong/USCO-Solver | 6fd61cf03db44ebc778bcf51b546f37f431e5ae9 | [
"MIT"
] | null | null | null | ssc/DR_random.py | cdslabamotong/USCO-Solver | 6fd61cf03db44ebc778bcf51b546f37f431e5ae9 | [
"MIT"
] | null | null | null | """
==============================
ssc random method
# License: BSD 3-clause
==============================
"""
import numpy as np
from DR_one_slack_ssvm import OneSlackSSVM
from DR import (DR_Utils, DR_InputInstance)
from basic_Utils import (Utils)
from basic_USCO import (Model)
import multiprocessing
import argparse
import os
import sys
from datetime import datetime
class Object(object):
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataname', default='yahoo',
choices=['cora', 'yahoo', 'ny'])
#parser.add_argument(
# '--vNum', type=int, default=768, choices=[1024,768,512],
# help='kro 1024, power768 768, ER512 512')
parser.add_argument(
'--featureNum', type=int, default=16,
help='number of features (random subgraphs) used in StratLearn ')
parser.add_argument(
'--featureGenMethod', default='uniform', \
choices=['uniform','true', ], \
help='the distribution used for generating features, the choices correspond phi_1^1, phi_0.01^1, phi_0.005^1, phi_+^+')
parser.add_argument(
'--trainNum', type=int, default=8, help='number of training data')
parser.add_argument(
'--testNum', type=int, default=640, help='number of testing data')
#parser.add_argument(
# '--testBatch', type=int, default=1, help='number of testing data')
parser.add_argument(
'--thread', type=int, default=1, help='number of threads')
parser.add_argument(
'--output', default=False, action="store_true", help='if output prediction')
parser.add_argument(
'--pre_train', default=True ,action="store_true", help='if store a pre_train model')
parser.add_argument(
'--log_path', default=None, help='if store a pre_train model')
args = parser.parse_args()
#utils= Utils()
problem ="dr"
dataname=args.dataname
#vNum = args.vNum
trainNum =args.trainNum
testNum =args.testNum
#testBatch =args.testBatch
#pairMax=2500
thread = args.thread
verbose=6
#parameter used in SVM
C = 0.0001
tol=0.001
max_iter = 2
featureNum = args.featureNum
featureGenMethod = args.featureGenMethod
if dataname == "cora":
maxFeatureNum = 10000
pairMax = 10000
fraction = 0.1
scale = 200
#vNum=1024
if dataname == "yahoo":
maxFeatureNum = 10000
pairMax = 10000
fraction = 0.1
scale = 200
if dataname == "ny":
pairMax = 395265
vNum = 768
pre_train = args.pre_train
preTrainPathResult = None
#get data
path = os.getcwd()
data_path=path+"/data"
pair_path = "{}/{}/{}/{}_{}_train_{}_{}_{}".format(data_path,problem,dataname,problem,dataname,pairMax, fraction, scale)
#unitpair_path = "{}/{}/{}/{}_{}_unitAll".format(data_path,problem,dataname,problem,dataname)
stoCoverGraphPath = "{}/{}/{}/{}_{}".format(data_path,problem,dataname,problem,dataname)
featurePath = "{}/{}/{}/features/{}_{}".format(data_path,problem,dataname,featureGenMethod,maxFeatureNum)
#if args.log_paht is not None:
logpath=path+"/log/dr_random_"+dataname
#print(data_path)
#print(pair_path)
#print(stoGraphPath)
#print(featurePath)
#sys.exit("stop")
X_train, Y_train, X_test, Y_test = DR_Utils.getDataTrainTestRandom(pair_path,trainNum,testNum, pairMax)
#print(X_train)
print("data fetched")
#sys.exit()
Utils.writeToFile(logpath, "data fetched")
'''
for x, y in zip (X_train, Y_train):
print(x)
print(y)
print()
'''
instance = DR_InputInstance(stoCoverGraphPath, featurePath, featureNum, fraction,
featureRandom = True, maxFeatureNum = maxFeatureNum,
thread = thread)
#sys.exit("stop")
#**************************OneSlackSSVM
#model = Model()
#model.initialize(X_train, Y_train, instance)
#one_slack_svm = OneSlackSSVM(model, verbose=verbose, C=C, tol=tol, n_jobs=thread,
#max_iter = max_iter, log = logpath)
#one_slack_svm.fit(X_train, Y_train, initialize = False)
Utils.writeToFile(logpath, "===============================================================", toconsole = True,preTrainPathResult = preTrainPathResult)
Utils.writeToFile(logpath, "Testing Random Started", toconsole = True,preTrainPathResult = preTrainPathResult)
Y_pred = DR_Utils.random_prediction(instance, X_test, featureNum)
#sys.exit("stop")
instance.test(X_test, Y_test, Y_pred, logpath)
#print("Prediction Started")
#Utils.writeToFile(logpath, "Prediction Started", toconsole = True)
#Y_pred = one_slack_svm.predict(X_test, featureNum)
#sys.exit("stop")
#print("Testing Started")
#Utils.writeToFile(logpath, "Prediction Started", toconsole = True)
#instance.test(X_test, Y_test_length, Y_pred,logpath)
#print("All One Started")
#instance.testUnitAllPair(pair_path,unitpair_path)
'''
Y_allOne_pred = [];
for x in X_test:
Y_allOne_pred.append(instance.inferenceBasic(x))
print("AllOne Testing Started")
instance.test(X_test, Y_test_length, Y_allOne_pred)
'''
Utils.writeToFile(logpath, dataname, toconsole = True,preTrainPathResult = preTrainPathResult)
#print(dataname)
Utils.writeToFile(logpath, "featureNum:{}, featureGenMethod: {}, c:{} ".format(featureNum, featureGenMethod, C), toconsole = True,preTrainPathResult = preTrainPathResult)
#print("featureNum:{}, featureGenMethod: {}, c:{} ".format(featureNum, featureGenMethod, C))
Utils.writeToFile(logpath, "trainNum:{}, testNum:{} ".format(trainNum, testNum), toconsole = True,preTrainPathResult = preTrainPathResult)
#print("trainNum:{}, testNum:{} ".format(trainNum, testNum))
#print("loss_type:{}, LAI_method:{}, ".format(loss_type.name, LAI_method))
Utils.writeToFile(logpath, "===============================================================", toconsole = True,preTrainPathResult = preTrainPathResult)
if __name__ == "__main__":
main() | 31.656863 | 174 | 0.607773 |
a66d7c5203999d338a5d8fb6b97722d7017913db | 435 | py | Python | python/venv/Scripts/pip3.8-script.py | namseokyoo/homework | 458aef509ba7b6483b72f319e23d6fde2f732f76 | [
"MIT"
] | 1 | 2020-03-30T01:17:14.000Z | 2020-03-30T01:17:14.000Z | python/venv/Scripts/pip3.8-script.py | namseokyoo/homework | 458aef509ba7b6483b72f319e23d6fde2f732f76 | [
"MIT"
] | 1 | 2020-04-05T08:10:17.000Z | 2020-04-06T10:36:41.000Z | python/venv/Scripts/pip3.8-script.py | namseokyoo/homework | 458aef509ba7b6483b72f319e23d6fde2f732f76 | [
"MIT"
] | null | null | null | #!C:\Users\Namseok\Desktop\Sparta\homework\homework\python\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| 33.461538 | 82 | 0.675862 |
c3b74ae0a96ccf4ef1bece678abe2d8f019399cf | 2,344 | py | Python | studentPerformance.py | ManasRaturi/C109 | af957b188b80ac2adcea7ce14e2cf4e8296acaf4 | [
"MIT"
] | null | null | null | studentPerformance.py | ManasRaturi/C109 | af957b188b80ac2adcea7ce14e2cf4e8296acaf4 | [
"MIT"
] | null | null | null | studentPerformance.py | ManasRaturi/C109 | af957b188b80ac2adcea7ce14e2cf4e8296acaf4 | [
"MIT"
] | null | null | null | import plotly.figure_factory as ff
import pandas as pd
import csv
import plotly.graph_objects as go
import random
import statistics
df = pd.read_csv("StudentPerformance.csv")
data = df["reading score"].tolist()
mean = sum(data) / len(data)
std_deviation = statistics.stdev(data)
median = statistics.median(data)
mode = statistics.mode(data)
first_std_deviation_start, first_std_deviation_end = mean-std_deviation, mean+std_deviation
second_std_deviation_start, second_std_deviation_end = mean-(2*std_deviation), mean+(2*std_deviation)
third_std_deviation_start, third_std_deviation_end = mean-(3*std_deviation), mean+(3*std_deviation)
fig = ff.create_distplot([data], ["reading scores"], show_hist=False)
fig.add_trace(go.Scatter(x=[mean,mean], y=[0, 0.17], mode="lines", name="MEAN"))
fig.add_trace(go.Scatter(x=[first_std_deviation_start, first_std_deviation_start], y=[0,0.17], mode="lines", name="STANDARD DEVIATION 1"))
fig.add_trace(go.Scatter(x=[first_std_deviation_end, first_std_deviation_end], y=[0,0.17], mode="lines", name="STANDARD DEVIATION 1"))
fig.add_trace(go.Scatter(x=[second_std_deviation_start, second_std_deviation_start], y=[0,0.17], mode="lines", name="STANDARD DEVIATION 2"))
fig.add_trace(go.Scatter(x=[second_std_deviation_end, second_std_deviation_end], y=[0,0.17], mode="lines", name="STANDARD DEVIATION 2"))
fig.show
list_of_data_within_1_std_deviation = [result for result in data if result > first_std_deviation_start and result < first_std_deviation_end]
list_of_data_within_1_std_deviation = [result for result in data if result > second_std_deviation_start and result < second_std_deviation_end]
list_of_data_within_1_std_deviation = [result for result in data if result > third_std_deviation_start and result < third_std_deviation_end]
print("Mean of this Data is {}".format(mean))
print("Median of this Data is {}".format(median))
print("Mode of this Data is {}".format(mode))
print("Standard Deviation of this Data is {}".format(std_deviation))
print("{}% of data lies within 1 Standard Deviation".format(len(list_of_data_within_1_std_deviation)*100.0/len(data)))
print("{}% of data lies within 2 Standard Deviation".format(len(list_of_data_within_2_std_deviation)*100.0/len(data)))
print("{}% of data lies within 3 Standard Deviation".format(len(list_of_data_within_3_std_deviation)*100.0/len(data))) | 63.351351 | 142 | 0.790956 |
bec56a7becd23889ad4d3b632fef55dad21dd01b | 4,302 | py | Python | examples/filters-general/voxelize-points.py | tkoyama010/PVGeo | d2852b07be5411ca4b3a96f886ae864bbf6d09d8 | [
"BSD-3-Clause"
] | 145 | 2018-07-20T21:46:27.000Z | 2022-02-21T02:23:06.000Z | examples/filters-general/voxelize-points.py | tkoyama010/PVGeo | d2852b07be5411ca4b3a96f886ae864bbf6d09d8 | [
"BSD-3-Clause"
] | 50 | 2018-06-14T22:38:27.000Z | 2021-11-29T03:38:08.000Z | examples/filters-general/voxelize-points.py | tkoyama010/PVGeo | d2852b07be5411ca4b3a96f886ae864bbf6d09d8 | [
"BSD-3-Clause"
] | 34 | 2018-07-27T07:48:20.000Z | 2022-03-23T06:39:57.000Z | """
Voxelize Points
~~~~~~~~~~~~~~~
This example will demonstrate how to connect a set of points defined on a
regular grid to create a `vtkUnstructuredGrid` which can be used to perform
volumetric operations.
This example demos :class:`PVGeo.filters.VoxelizePoints`
"""
# sphinx_gallery_thumbnail_number = 2
import pyvista
from pyvista import examples
import numpy as np
import pandas as pd
import PVGeo
from PVGeo.filters import VoxelizePoints
###############################################################################
# Download sample data files and keep track of names:
url = 'https://github.com/OpenGeoVis/PVGeo/raw/master/tests/data/fault_points.csv'
fault_file, _ = examples.downloads._retrieve_file(url, 'fault_points.csv')
###############################################################################
# Let's go ahead and load a simple file that has XYZ coordinates and a boolean
# array for fault presence. This point cloud makes some sort of regular grid,
# but we have forgotten the deatials of the cell spacings and local coordinate
# rotations.
#
# We will read in this data with ``pandas`` and send it to the
# :func:`PVGeo.points_to_poly_data` helper to create a :class:`pyvista.PolyData`
# object (essentially a point cloud).
points = pd.read_csv(fault_file)
print(points[0:2])
###############################################################################
vtkpoints = PVGeo.points_to_poly_data(points)
print(vtkpoints)
###############################################################################
# Note that we have a :class:`pyvista.PolyData` object now which allows us to do
# all types of immediate plotting of our data. First, lets threshold our points
# as the point cloud has a bunch of zeros and ones throughout the dataspace to
# describe the presence of a fault.
#
# To threshold the points, we call the threshold filter directly on our data
# object and pass the thresholding value. We can then plot the result by
# calling the plot function. (Note: change the notebook parameter to
# ``False`` for an interactive window)
vtkpoints.plot(clim=[0, 1], point_size=1)
###############################################################################
# Points to Voxelized Volume
# ++++++++++++++++++++++++++
#
# The above figure is pretty cool! But its a point cloud which means out
# filtering options are pretty limited. Fortunately, we know that the point
# cloud represents some sort of regularized gridded volume of data and PVGeo
# has a filter to recover that volume. This will allow further volumetric
# operations can be performed with other PVGeo or VTK filters.
#
# Remember that these points are rotated and we do not know the cell sizes...
# this is okay! The VoxelizePoints filter from PVGeo will handle the recovery of
# the coordinate rotation and grid our data without running an interpolation
# scheme. The VoxelizePoints filter assumes that the points are structured on some
# rotated XY-plane with regular cell spacings and does the rest on its own!
# Check out VoxelizePoints code docs for more details.
# The full pipeline method
print('Voxelizing... ', end='')
voxelizer = PVGeo.filters.VoxelizePoints()
grid = voxelizer.apply(vtkpoints)
print('done.')
# Output the results
print('Recovered Angle (deg.): %.3f' % voxelizer.get_angle())
print('Recovered Cell Sizes: (%.2f, %.2f, %.2f)' % voxelizer.get_spacing())
print(grid)
###############################################################################
# And now we can plot the voxelized volume
grid.plot()
###############################################################################
# Filter Volumetric Data
# ++++++++++++++++++++++
#
# Now lets use one of `PyVista`'s filters to create slices of the thresholded
# dataset. Specifically, we are using the ``slice_orthogonal`` filter that will
# create 3 orthogonal slices through a data volume.
slices = grid.slice_orthogonal()
print(slices)
###############################################################################
# And let's use a ``clip`` filter:
clip = grid.clip(normal='x').clip(normal='-y').threshold(0.5)
###############################################################################
# Now display the slices and clipped volume
p = pyvista.Plotter()
p.add_mesh(slices)
p.add_mesh(clip)
p.show_grid()
p.show()
| 39.833333 | 82 | 0.626453 |
aaea3c9e09589bc1e2b05764e4255aa29f53b560 | 2,264 | py | Python | samples/gae/main.py | goodtune/pycloudinary | 741c13ba94ecbd7833fc3a1c770e9f6de44d8d2c | [
"MIT"
] | 199 | 2015-01-11T14:46:06.000Z | 2022-03-22T15:37:23.000Z | samples/gae/main.py | goodtune/pycloudinary | 741c13ba94ecbd7833fc3a1c770e9f6de44d8d2c | [
"MIT"
] | 132 | 2015-01-21T08:13:27.000Z | 2022-03-04T14:36:44.000Z | samples/gae/main.py | goodtune/pycloudinary | 741c13ba94ecbd7833fc3a1c770e9f6de44d8d2c | [
"MIT"
] | 117 | 2015-02-04T04:23:59.000Z | 2022-03-19T11:30:34.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import webapp2
from cloudinary.compat import StringIO
from cloudinary.uploader import upload
from cloudinary.utils import cloudinary_url
from google.appengine.ext.webapp import template
class MainHandler(webapp2.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'index.html')
template_values = {
'image_url': None,
'thumbnail_url1': None,
'thumbnail_url2': None
}
self.response.write(template.render(path, template_values))
def post(self):
image_url = None
thumbnail_url1 = None
thumbnail_url2 = None
file_to_upload = self.request.get('file')
if file_to_upload:
str_file = StringIO(file_to_upload)
str_file.name = 'file'
upload_result = upload(str_file)
image_url = upload_result['url']
thumbnail_url1, options = cloudinary_url(upload_result['public_id'], format="jpg", crop="fill", width=100,
height=100)
thumbnail_url2, options = cloudinary_url(upload_result['public_id'], format="jpg", crop="fill", width=200,
height=100, radius=20, effect="sepia")
template_values = {
'image_url': image_url,
'thumbnail_url1': thumbnail_url1,
'thumbnail_url2': thumbnail_url2
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.write(template.render(path, template_values))
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
| 36.516129 | 118 | 0.646201 |
823f8ae60aa199d870c42169ad6861e9688ec04f | 200 | py | Python | agdss/settings/dev-suryadhir.py | nemaderinku/deepgis | 807567af739adfb7f2c7bc7e523f6b987cb6a0a9 | [
"Apache-2.0"
] | 4 | 2019-08-12T20:57:52.000Z | 2022-01-06T19:31:27.000Z | agdss/settings/dev-suryadhir.py | nemaderinku/deepgis | 807567af739adfb7f2c7bc7e523f6b987cb6a0a9 | [
"Apache-2.0"
] | 32 | 2019-10-11T09:30:18.000Z | 2021-02-02T17:32:12.000Z | agdss/settings/dev-suryadhir.py | nemaderinku/deepgis | 807567af739adfb7f2c7bc7e523f6b987cb6a0a9 | [
"Apache-2.0"
] | 2 | 2019-08-14T06:25:55.000Z | 2020-02-17T05:20:31.000Z | from agdss.settings.common import *
#Note: Include a '/' at end of folder names
STATIC_ROOT = "/Users/suryadhir/Desktop/"
LABEL_FOLDER_NAME='temp/'
LABEL_AVERAGE_FOLDER_NAME='temp_averages/'
| 25 | 44 | 0.75 |
1b03a3aef2a1a61033cf65c2c2a28e5f7634b141 | 12,705 | py | Python | reviewboard/webapi/tests/test_webhook.py | davidt/reviewboard | 66b08d06aec9be921e737b99c5ab78acc8b7a6fe | [
"MIT"
] | 1 | 2017-01-16T09:39:15.000Z | 2017-01-16T09:39:15.000Z | reviewboard/webapi/tests/test_webhook.py | davidt/reviewboard | 66b08d06aec9be921e737b99c5ab78acc8b7a6fe | [
"MIT"
] | null | null | null | reviewboard/webapi/tests/test_webhook.py | davidt/reviewboard | 66b08d06aec9be921e737b99c5ab78acc8b7a6fe | [
"MIT"
] | 1 | 2018-01-15T19:13:49.000Z | 2018-01-15T19:13:49.000Z | from __future__ import unicode_literals
from django.utils import six
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import INVALID_FORM_DATA
from reviewboard.notifications.models import WebHookTarget
from reviewboard.site.models import LocalSite
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (webhook_item_mimetype,
webhook_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.mixins_extra_data import (ExtraDataItemMixin,
ExtraDataListMixin)
from reviewboard.webapi.tests.urls import (get_webhook_item_url,
get_webhook_list_url)
def compare_item(self, item_rsp, webhook):
self.assertEqual(item_rsp['id'], webhook.pk)
self.assertEqual(item_rsp['enabled'], webhook.enabled)
self.assertEqual(item_rsp['url'], webhook.url)
self.assertEqual(item_rsp['custom_content'],
webhook.custom_content)
self.assertEqual(item_rsp['secret'],
webhook.secret)
self.assertEqual(
resources.webhook.parse_apply_to_field(item_rsp['apply_to'], None),
webhook.apply_to)
self.assertEqual(
set(item['title'] for item in item_rsp['repositories']),
set(repo.name for repo in webhook.repositories.all()))
self.assertEqual(item_rsp['events'], webhook.events)
self.assertEqual(item_rsp['extra_data'], webhook.extra_data)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(ExtraDataListMixin, BaseWebAPITestCase):
"""Tests for the WebHookResource list resource."""
resource = resources.webhook
sample_api_url = 'api/webhooks/'
basic_get_use_admin = True
basic_post_use_admin = True
fixtures = ['test_users']
compare_item = compare_item
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
webhook = self.create_webhook(with_local_site=with_local_site)
if populate_items:
items = [webhook]
else:
items = []
return (get_webhook_list_url(local_site_name),
webhook_list_mimetype,
items)
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
if post_valid_data:
post_data = {
'enabled': 0,
'events': '*',
'url': 'http://example.com',
'encoding': 'application/json',
'custom_content': '',
'apply_to': 'all',
}
else:
post_data = {}
return (get_webhook_list_url(local_site_name),
webhook_item_mimetype,
post_data,
[])
def check_post_result(self, user, rsp):
self.assertIn('webhook', rsp)
item_rsp = rsp['webhook']
self.compare_item(item_rsp,
WebHookTarget.objects.get(pk=item_rsp['id']))
@add_fixtures(['test_scmtools'])
def test_post_with_repositories(self):
"""Testing adding a webhook for custom repositories"""
repositories = [
self.create_repository(name='Repo 1'),
self.create_repository(name='Repo 2'),
]
self.user.is_superuser = True
self.user.save()
rsp = self.api_post(
get_webhook_list_url(),
{
'enabled': 0,
'events': '*',
'url': 'http://example.com',
'encoding': 'application/json',
'apply_to': 'custom',
'repositories': ','.join(
six.text_type(repo.pk)
for repo in repositories
)
},
expected_mimetype=webhook_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.check_post_result(self.user, rsp)
@add_fixtures(['test_scmtools'])
def test_post_all_repositories_not_same_local_site(self):
"""Testing adding a webhook with a local site and custom repositories
that are not all in the same local site
"""
local_site_1 = LocalSite.objects.create(name='local-site-1')
local_site_2 = LocalSite.objects.create(name='local-site-2')
for local_site in (local_site_1, local_site_2):
local_site.admins = [self.user]
local_site.users = [self.user]
local_site.save()
repositories = [
self.create_repository(name='Repo 1', local_site=local_site_1),
self.create_repository(name='Repo 2', local_site=local_site_2),
self.create_repository(name='Repo 3')
]
rsp = self.api_post(
get_webhook_list_url(local_site_1),
{
'enabled': 0,
'events': '*',
'url': 'http://example.com',
'encoding': 'application/json',
'custom_content': '',
'apply_to': 'custom',
'repositories': ','.join(
six.text_type(repo.pk)
for repo in repositories
)
},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertTrue('err' in rsp)
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertEqual(rsp['err']['msg'], INVALID_FORM_DATA.msg)
self.assertTrue('fields' in rsp)
self.assertTrue('repositories' in rsp['fields'])
self.assertEqual(set(rsp['fields']['repositories']),
set([
'Repository with ID %s is invalid.' % repo.pk
for repo in repositories[1:]
]))
@add_fixtures(['test_scmtools'])
def test_post_repositories_local_site_but_webhook_not(self):
"""Testing adding a webhook without a local site for repositories that
are in a local site
"""
local_site = LocalSite.objects.create(name='local-site-1')
self.user.is_superuser = True
self.user.save()
repositories = [
self.create_repository(name='Repo 1', local_site=local_site),
self.create_repository(name='Repo 2', local_site=local_site),
]
rsp = self.api_post(
get_webhook_list_url(),
{
'enabled': 0,
'events': '*',
'url': 'http://example.com',
'encoding': 'application/json',
'custom_content': '',
'apply_to': 'custom',
'repositories': ','.join(
six.text_type(repo.pk)
for repo in repositories
)
},
expected_status=400)
self.assertEqual(rsp['stat'], 'fail')
self.assertTrue('err' in rsp)
self.assertEqual(rsp['err']['code'], INVALID_FORM_DATA.code)
self.assertEqual(rsp['err']['msg'], INVALID_FORM_DATA.msg)
self.assertTrue('fields' in rsp)
self.assertTrue('repositories' in rsp['fields'])
self.assertEqual(set(rsp['fields']['repositories']),
set([
'Repository with ID %s is invalid.' % repo.pk
for repo in repositories
]))
@add_fixtures(['test_scmtools'])
def test_post_multiple_events(self):
"""Testing adding a webhook that listens for multiple events"""
self.user.is_superuser = True
self.user.save()
rsp = self.api_post(
get_webhook_list_url(),
{
'enabled': 0,
'events': 'review_request_closed,review_request_published',
'url': 'http://example.com',
'encoding': 'application/json',
'custom_content': '',
'apply_to': 'all'
},
expected_mimetype=webhook_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('webhook', rsp)
self.compare_item(rsp['webhook'], WebHookTarget.objects.get())
@add_fixtures(['test_scmtools'])
def test_post_no_events(self):
"""Testing adding a webhook that listens on no events"""
self.user.is_superuser = True
self.user.save()
rsp = self.api_post(
get_webhook_list_url(),
{
'enabled': 0,
'events': '',
'url': 'http://example.com',
'encoding': 'application/json',
'custom_content': '',
'apply_to': 'all'
},
expected_mimetype=webhook_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('webhook', rsp)
self.compare_item(rsp['webhook'], WebHookTarget.objects.get())
@add_fixtures(['test_scmtools'])
def test_post_all_events_and_more(self):
"""Testing adding a webhook that listens on all events (*) and
additional events
"""
self.user.is_superuser = True
self.user.save()
rsp = self.api_post(
get_webhook_list_url(),
{
'enabled': 0,
'events': 'review_request_closed,*,review_request_published',
'url': 'http://example.com',
'encoding': 'application/json',
'custom_content': '',
'apply_to': 'all'
},
expected_mimetype=webhook_item_mimetype)
webhook = WebHookTarget.objects.get()
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('webhook', rsp)
self.compare_item(rsp['webhook'], webhook)
self.assertListEqual(webhook.events, ['*'])
@add_fixtures(['test_scmtools'])
def test_post_empty_repositories(self):
"""Testing adding a webhook that has an empty repositories field"""
self.user.is_superuser = True
self.user.save()
rsp = self.api_post(
get_webhook_list_url(),
{
'enabled': 0,
'events': 'review_request_closed,*,review_request_published',
'url': 'http://example.com',
'encoding': 'application/json',
'custom_content': '',
'apply_to': 'custom',
'repositories': '',
},
expected_mimetype=webhook_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('webhook', rsp)
self.compare_item(rsp['webhook'], WebHookTarget.objects.get())
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(ExtraDataItemMixin, BaseWebAPITestCase):
"""Tests for the WebHookResource item resource."""
resource = resources.webhook
sample_api_url = 'api/webhooks/<id>/'
basic_get_use_admin = True
basic_delete_use_admin = True
basic_put_use_admin = True
fixtures = ['test_users']
compare_item = compare_item
def setup_basic_get_test(self, user, with_local_site, local_site_name):
webhook = self.create_webhook(with_local_site=with_local_site)
return (get_webhook_item_url(webhook.pk, local_site_name),
webhook_item_mimetype,
webhook)
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
webhook = self.create_webhook(with_local_site=with_local_site)
return (get_webhook_item_url(webhook.pk, local_site_name),
webhook_item_mimetype,
{},
webhook,
[])
def check_put_result(self, user, item_rsp, item):
self.compare_item(item_rsp,
WebHookTarget.objects.get(pk=item_rsp['id']))
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
webhook = self.create_webhook(with_local_site=with_local_site)
return (get_webhook_item_url(webhook.pk, local_site_name),
[webhook])
def check_delete_result(self, user, webhook):
self.assertRaises(WebHookTarget.DoesNotExist,
lambda: WebHookTarget.objects.get(pk=webhook.pk))
| 35.193906 | 78 | 0.574105 |
a4cf2f2c60fb793d37bfc54c78d55510a3e552a7 | 2,826 | py | Python | hyperbox/networks/repnas/utils.py | marsggbo/hyperbox | 91dcd04ad30164bcb12209d818df18961fa3f347 | [
"MIT"
] | 1 | 2022-01-17T00:34:14.000Z | 2022-01-17T00:34:14.000Z | hyperbox/networks/repnas/utils.py | marsggbo/hyperbox | 91dcd04ad30164bcb12209d818df18961fa3f347 | [
"MIT"
] | null | null | null | hyperbox/networks/repnas/utils.py | marsggbo/hyperbox | 91dcd04ad30164bcb12209d818df18961fa3f347 | [
"MIT"
] | null | null | null | from hyperbox.mutables.spaces import OperationSpace
from numpy.lib.arraysetops import isin
import torch
import torch.nn as nn
# from .rep_ops import *
from hyperbox.networks.repnas.rep_ops import *
def fuse(candidates, weights, kernel_size=3):
k_list = []
b_list = []
for i in range(len(candidates)):
op = candidates[i]
weight = weights[i].float()
if op.__class__.__name__ == "DBB1x1kxk":
if hasattr(op.dbb_1x1_kxk, 'idconv1'):
k1 = op.dbb_1x1_kxk.idconv1.get_actual_kernel()
else:
k1 = op.dbb_1x1_kxk.conv1.weight
k1, b1 = transI_fusebn(k1, op.dbb_1x1_kxk.bn1)
k2, b2 = transI_fusebn(op.dbb_1x1_kxk.conv2.weight, op.dbb_1x1_kxk.bn2)
k, b = transIII_1x1_kxk(k1, b1, k2, b2, groups=op.groups)
elif op.__class__.__name__ == "DBB1x1":
k, b = transI_fusebn(op.conv.weight, op.bn)
k = transVI_multiscale(k, kernel_size)
elif op.__class__.__name__ == "DBBORIGIN":
k, b = transI_fusebn(op.conv.weight, op.bn)
elif op.__class__.__name__ == "DBBAVG":
ka = transV_avg(op.out_channels, op.kernel_size, op.groups)
k2, b2 = transI_fusebn(ka.to(op.dbb_avg.avgbn.weight.device), op.dbb_avg.avgbn)
if hasattr(op.dbb_avg, 'conv'):
k1, b1 = transI_fusebn(op.dbb_avg.conv.weight, op.dbb_avg.bn)
k, b = transIII_1x1_kxk(k1, b1, k2, b2, groups=op.groups)
else:
k, b = k2, b2
else:
raise "TypeError: Not In DBBAVG DBB1x1kxk DBB1x1 DBBORIGIN."
k_list.append(k.detach() * weight)
b_list.append(b.detach() * weight)
return transII_addbranch(k_list, b_list)
def replace(net):
for name, module in net.named_modules():
if isinstance(module, OperationSpace):
candidates = []
weights = []
for idx, weight in enumerate(module.mask):
if weight:
candidates.append(module.candidates_original[idx])
weights.append(weight)
ks = max([c_.kernel_size for c_ in candidates])
k, b = fuse(candidates, weights, ks)
first = module.candidates_original[0]
inc = first.in_channels
ouc = first.out_channels
s = first.stride
p = ks//2
g = first.groups
reparam = nn.Conv2d(in_channels=inc, out_channels=ouc, kernel_size=ks,
stride=s, padding=p, dilation=1, groups=g)
reparam.weight.data = k
reparam.bias.data = b
module.candidates_original = [reparam]
module.candidates = torch.nn.ModuleList([reparam])
module.mask = torch.tensor([True])
| 38.712329 | 91 | 0.58351 |
bef560a89b5a53789b765d1a67074dcaabfb5af9 | 12,779 | py | Python | lib/nest_client/entities/base.py | dskrypa/nest-client | 361ef2fb91d18151bbf8f0aa8510652dcec95d9d | [
"Apache-2.0"
] | null | null | null | lib/nest_client/entities/base.py | dskrypa/nest-client | 361ef2fb91d18151bbf8f0aa8510652dcec95d9d | [
"Apache-2.0"
] | null | null | null | lib/nest_client/entities/base.py | dskrypa/nest-client | 361ef2fb91d18151bbf8f0aa8510652dcec95d9d | [
"Apache-2.0"
] | null | null | null | """
Classes that represent Nest Structures, Users, Devices/Thermostats, etc.
:author: Doug Skrypa
"""
import logging
from datetime import datetime
from threading import RLock
from typing import TYPE_CHECKING, Any, Union, Optional, TypeVar, Type, Callable
from ..constants import BUCKET_CHILD_TYPES
from ..exceptions import NestObjectNotFound, DictAttrFieldNotFoundError
from ..utils import ClearableCachedProperty, ClearableCachedPropertyMixin, cached_classproperty, celsius_to_fahrenheit
if TYPE_CHECKING:
from httpx import Response
from ..client import NestWebClient
__all__ = ['NestObject', 'NestObj', 'NestProperty', 'TemperatureProperty', 'NestObjectDict']
log = logging.getLogger(__name__)
NestObjectDict = dict[str, Union[str, int, None, dict[str, Any]]]
NestObj = TypeVar('NestObj', bound='NestObject')
_NotSet = object()
class NestObject(ClearableCachedPropertyMixin):
__lock = RLock()
__instances = {}
type: Optional[str] = None
parent_type: Optional[str] = None
child_types: Optional[dict[str, bool]] = None
sub_type_key: Optional[str] = None
_type_cls_map: dict[str, Type[NestObj]] = {}
_sub_type_cls_map: dict[str, dict[str, Type[NestObj]]] = {}
# noinspection PyMethodOverriding
def __init_subclass__(cls, type: str, parent_type: str = None, key: str = None): # noqa
cls.type = type
cls.parent_type = parent_type
cls.sub_type_key = key
if key:
cls._sub_type_cls_map.setdefault(type, {})[key] = cls
else:
cls._type_cls_map[type] = cls
if child_types := BUCKET_CHILD_TYPES.get(type):
cls.child_types = child_types
def __new__(
cls, key: str, timestamp: Optional[int], revision: Optional[int], value: dict[str, Any], *args, **kwargs
):
if cls is NestObject:
bucket_type = key.split('.', 1)[0]
cls = cls._type_cls_map.get(bucket_type, cls)
if key_sub_cls_map := cls._sub_type_cls_map.get(cls.type):
for key, sub_cls in key_sub_cls_map.items():
if key in value:
cls = sub_cls
break
with NestObject.__lock:
try:
return NestObject.__instances[key]
except KeyError:
NestObject.__instances[key] = obj = super().__new__(cls)
return obj
def __init__(
self,
key: str,
timestamp: Optional[int],
revision: Optional[int],
value: dict[str, Any],
client: 'NestWebClient',
):
if hasattr(self, 'key'):
self.clear_cached_properties()
self.key = key
self.type, self.serial = key.split('.', 1)
if self.parent_type is None and self.type != self.__class__.type:
if '-' in self.serial:
self.parent_type = 'structure'
else:
try:
int(self.serial)
except ValueError:
self.parent_type = 'device'
else:
self.parent_type = 'user'
self.timestamp = timestamp
self.revision = revision
self.value = value
self.client = client
self.config = client.config
self._refreshed = datetime.now()
self._needs_update = False
def __repr__(self) -> str:
if self.__class__.type:
return f'<{self.__class__.__name__}[{self.serial}]>'
else:
return f'<{self.__class__.__name__}[{self.serial}, type={self.type}]>'
def to_dict(self) -> NestObjectDict:
return {
'object_key': self.key,
'object_timestamp': self.timestamp,
'object_revision': self.revision,
'value': self.value,
}
@classmethod
def from_dict(cls: Type[NestObj], obj: NestObjectDict, client: 'NestWebClient') -> NestObj:
return cls(obj['object_key'], obj['object_timestamp'], obj['object_revision'], obj['value'], client)
@classmethod
async def find(
cls: Type[NestObj], client: 'NestWebClient', serial: str = None, type: str = None # noqa
) -> NestObj:
if type and cls.type is not None and type != cls.type:
expected = cls._type_cls_map.get(type, NestObject).__name__
raise ValueError(f'Use {expected} - {cls.__name__} is incompatible with {type=}')
return await client.get_object(type or cls.type, serial, _sub_type_key=cls.sub_type_key)
@classmethod
async def find_all(cls: Type[NestObj], client: 'NestWebClient', type: str = None) -> dict[str, NestObj]: # noqa
if type and cls.type is not None and type != cls.type:
expected = cls._type_cls_map.get(type, NestObject).__name__
raise ValueError(f'Use {expected} - {cls.__name__} is incompatible with {type=}')
obj_dict = await client.get_objects([type or cls.type])
if sub_type_key := cls.sub_type_key:
return {key: obj for key, obj in obj_dict.items() if obj.sub_type_key == sub_type_key}
else:
return obj_dict
# region Refresh Status Methods
def needs_refresh(self, interval: float) -> bool:
return self._needs_update or (datetime.now() - self._refreshed).total_seconds() >= interval
def subscribe_dict(self, meta: bool = True) -> dict[str, Union[str, int, None]]:
if meta:
return {'object_key': self.key, 'object_timestamp': self.timestamp, 'object_revision': self.revision}
else:
return {'object_key': self.key}
async def refresh(
self, all: bool = True, subscribe: bool = True, send_meta: bool = True, timeout: float = None # noqa
):
last = self._refreshed
if all:
await self.client.refresh_known_objects(subscribe, send_meta, timeout)
else:
await self.client.refresh_objects([self], subscribe, send_meta, timeout=timeout)
if last == self._refreshed:
target = 'all objects' if all else self
log.debug(f'Attempted to refresh {target}, but no fresh data was received for {self}')
def _maybe_refresh(self, objects: list[NestObjectDict], source: str):
for obj in objects:
if obj['object_key'] == self.key:
self._refresh(obj)
break
else:
keys = [obj['object_key'] for obj in objects]
log.warning(f'Could not refresh {self} via {source} - received unexpected {keys=}')
def _refresh(self, obj_dict: NestObjectDict):
log.debug(f'Received update for {self}')
self.clear_cached_properties()
self.revision = obj_dict['object_revision']
self.timestamp = obj_dict['object_timestamp']
self.value = obj_dict['value']
self._refreshed = datetime.now()
self._needs_update = False
async def _subscribe(self, send_meta: bool = False):
self._maybe_refresh(await self.client.subscribe([self], send_meta), 'subscribe')
async def _app_launch(self):
self._maybe_refresh(await self.client.get_buckets([self.type]), 'app_launch')
# endregion
async def _set_key(self, key: str, value: Any, op: str = 'MERGE') -> 'Response':
return await self._set_full({key: value}, op)
async def _set_full(self, data: dict[str, Any], op: str = 'MERGE') -> 'Response':
payload = {'objects': [{'object_key': self.key, 'op': op, 'value': data}]}
self._needs_update = True
async with self.client.transport_url() as client:
log.debug(f'Submitting {payload=}')
self.clear_cached_properties()
return await client.post('v5/put', json=payload)
# region Parent/Child Object Methods
def is_child_of(self, nest_obj: NestObj) -> bool:
return nest_obj.is_parent_of(self)
def is_parent_of(self, nest_obj: NestObj) -> bool:
return self.child_types and nest_obj.type in self.child_types and nest_obj.serial == self.serial
@cached_classproperty
def fetch_child_types(cls) -> tuple[str, ...]: # noqa
if child_types := cls.child_types:
return tuple(t for t, fetch in child_types.items() if fetch)
return ()
async def get_children(self) -> dict[str, NestObj]:
"""Mapping of {type: NestObject} for this object's children"""
if fetch_child_types := self.fetch_child_types:
key_obj_map = await self.client.get_objects(fetch_child_types)
return {obj.type: obj for obj in key_obj_map.values() if obj.serial == self.serial}
return {}
async def get_parent(self) -> Optional[NestObj]:
if self.parent_type:
try:
return await self.client.get_object(self.parent_type, self.serial)
except NestObjectNotFound:
return None
return None
# endregion
class NestProperty(ClearableCachedProperty):
def __init__(
self,
path: str,
type: Callable = _NotSet, # noqa
default: Any = _NotSet,
default_factory: Callable = _NotSet,
delim: str = '.',
attr: str = 'value',
):
# noinspection PyUnresolvedReferences
"""
Descriptor that acts as a cached property for retrieving values nested in a dict stored in an attribute of the
object that this :class:`NestProperty` is a member of. The value is not accessed or stored until the first
time that it is accessed.
To un-cache a value (causes the descriptor to take over again)::\n
>>> del instance.__dict__[attr_name]
The :class:`ClearableCachedPropertyMixin` mixin class can be used to facilitate clearing all
:class:`NestProperty` and any similar cached properties that exist in a given object.
:param path: The nexted key location in the dict attribute of the value that this NestProperty
represents; dict keys should be separated by ``.``, otherwise the delimiter should be provided via ``delim``
:param type: Callable that accepts 1 argument; the value of this NestProperty will be passed to it,
and the result will be returned as this NestProperty's value (default: no conversion)
:param default: Default value to return if a KeyError is encountered while accessing the given path
:param default_factory: Callable that accepts no arguments to be used to generate default values
instead of an explicit default value
:param delim: Separator that was used between keys in the provided path (default: ``.``)
:param attr: Name of the attribute in the class that this NestProperty is in that contains the dict that this
NestProperty should reference
"""
self.path = [p for p in path.split(delim) if p]
self.path_repr = delim.join(self.path)
self.attr = attr
self.type = type
self.name = f'_{self.__class__.__name__}#{self.path_repr}'
self.default = default
self.default_factory = default_factory
def __set_name__(self, owner, name):
self.name = name
attr_path = ''.join('[{!r}]'.format(p) for p in self.path)
self.__doc__ = (
f'A :class:`NestProperty<nest.entities.base.NestProperty>` that references this {owner.__name__}'
f' instance\'s {self.attr}{attr_path}'
)
def __get__(self, obj: 'NestObject', cls):
if obj is None:
return self
# TODO: Fix update/refresh handling
# if obj._needs_update:
# await obj.refresh()
value = getattr(obj, self.attr)
for key in self.path:
try:
value = value[key]
except KeyError:
if self.default is not _NotSet:
value = self.default
break
elif self.default_factory is not _NotSet:
value = self.default_factory()
break
raise DictAttrFieldNotFoundError(obj, self.name, self.attr, self.path_repr)
if self.type is not _NotSet:
# noinspection PyArgumentList
value = self.type(value)
if '#' not in self.name:
obj.__dict__[self.name] = value
return value
# def __get__(self, obj: 'NestObject', cls):
# if obj is None:
# return self
# return self._get(obj).__await__()
class TemperatureProperty(NestProperty):
def __get__(self, obj: 'NestObject', cls):
if obj is None:
return self
value_c = super().__get__(obj, cls)
if obj.client.config.temp_unit == 'f':
return celsius_to_fahrenheit(value_c)
return value_c
| 39.934375 | 118 | 0.622271 |
b7f950d5819720cac0e3b02a614e5bfafe0f239d | 3,603 | py | Python | test/functional/mining_getblocktemplate_longpoll.py | tngc-one/tngcoin | 1382521c4f897cf798e840fee2ce9abd70bbb99b | [
"MIT"
] | null | null | null | test/functional/mining_getblocktemplate_longpoll.py | tngc-one/tngcoin | 1382521c4f897cf798e840fee2ce9abd70bbb99b | [
"MIT"
] | null | null | null | test/functional/mining_getblocktemplate_longpoll.py | tngc-one/tngcoin | 1382521c4f897cf798e840fee2ce9abd70bbb99b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The TNGC Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
import random
import threading
from test_framework.test_framework import TNGCTestFramework
from test_framework.util import get_rpc_proxy
from test_framework.wallet import MiniWallet
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
template = node.getblocktemplate({'rules': ['segwit']})
self.longpollid = template['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid': self.longpollid, 'rules': ['segwit']})
class GetBlockTemplateLPTest(TNGCTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.log.info("Test that longpollid doesn't change between successive getblocktemplate() invocations if nothing else happens")
self.nodes[0].generate(10)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
longpollid = template['longpollid']
template2 = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert template2['longpollid'] == longpollid
self.log.info("Test that longpoll waits if we do nothing")
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert thr.is_alive()
miniwallets = [ MiniWallet(node) for node in self.nodes ]
self.log.info("Test that longpoll will terminate if another node generates a block")
miniwallets[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
self.log.info("Test that longpoll will terminate if we generate a block ourselves")
thr = LongpollThread(self.nodes[0])
thr.start()
miniwallets[0].generate(1) # generate a block on own node
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
# Add enough mature utxos to the wallets, so that all txs spend confirmed coins
self.nodes[0].generate(100)
self.sync_blocks()
self.log.info("Test that introducing a new transaction into the mempool will terminate the longpoll")
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
fee_rate = min_relay_fee + Decimal('0.00000010') * random.randint(0,20)
miniwallets[0].send_self_transfer(from_node=random.choice(self.nodes),
fee_rate=fee_rate)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert not thr.is_alive()
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| 43.939024 | 134 | 0.679711 |
eabbf15760ccc9555acc8b8c43094483bfd2ffa9 | 889 | py | Python | assets/guessing-game-2.py | Akaito/ZeroToBoto | 5806fa004ae4ce54d594303133d043d1e606f0e8 | [
"Zlib"
] | null | null | null | assets/guessing-game-2.py | Akaito/ZeroToBoto | 5806fa004ae4ce54d594303133d043d1e606f0e8 | [
"Zlib"
] | null | null | null | assets/guessing-game-2.py | Akaito/ZeroToBoto | 5806fa004ae4ce54d594303133d043d1e606f0e8 | [
"Zlib"
] | null | null | null | # Guessing game (step 2)
# Store off the computer's target number.
# We're using a string here, instead of an integer, so we can
# directly compare it to the return of raw_input() (the user's typed-in guess).
my_number = 4
# We need to declare the user's guess variable to use it in the while loop
# below, but don't want it to already be correct. So just store some
# value that the computer won't come up with for its number.
user_guess = -1
while user_guess != my_number:
# Take user input with a prompt, and convert it to an int.
user_guess = int(raw_input("Guess my number (1-5): "))
# Help guide the human to the right answer.
if user_guess < my_number:
print "Too low."
if user_guess > my_number:
print "Too high."
# This won't be hit until the while loop ends, when the guess matches my_number.
print "Correct! My number was", my_number
| 34.192308 | 80 | 0.701912 |
83084c41fa388009717a7266fe3dd7de5a4bf0b9 | 598 | py | Python | mongoengine_plus/models/event_handlers.py | cuenca-mx/mongoengine-plus | 01e55db2af58477bac2621ff51708daae3416268 | [
"MIT"
] | 3 | 2020-12-11T16:48:44.000Z | 2021-03-29T00:05:57.000Z | agave/lib/mongoengine/event_handlers.py | cuenca-mx/agave | d4719bdbab8e200c98d206475df6adb275e9fdcc | [
"MIT"
] | 115 | 2020-08-26T13:26:07.000Z | 2022-03-31T23:58:22.000Z | agave/lib/mongoengine/event_handlers.py | cuenca-mx/agave | d4719bdbab8e200c98d206475df6adb275e9fdcc | [
"MIT"
] | null | null | null | import datetime as dt
from typing import Any
from blinker import NamedSignal
from mongoengine import signals
def handler(event: NamedSignal):
"""
http://docs.mongoengine.org/guide/signals.html?highlight=update
Signal decorator to allow use of callback functions as class
decorators
"""
def decorator(fn: Any):
def apply(cls):
event.connect(fn, sender=cls)
return cls
fn.apply = apply
return fn
return decorator
@handler(signals.pre_save)
def updated_at(_, document):
document.updated_at = dt.datetime.utcnow()
| 20.62069 | 67 | 0.67893 |
d4a812bc698ec232379ecda87e57e373fb24c1be | 7,298 | py | Python | sdks/python/apache_beam/io/gcp/gcsio_integration_test.py | bobingm/beam | 7dce40187f939424b8249c2c21eaeb4c70c26d31 | [
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2019-07-27T11:54:33.000Z | 2021-06-06T11:53:36.000Z | sdks/python/apache_beam/io/gcp/gcsio_integration_test.py | bobingm/beam | 7dce40187f939424b8249c2c21eaeb4c70c26d31 | [
"Apache-2.0",
"BSD-3-Clause"
] | 12 | 2019-04-15T15:27:23.000Z | 2019-07-01T18:13:10.000Z | sdks/python/apache_beam/io/gcp/gcsio_integration_test.py | bobingm/beam | 7dce40187f939424b8249c2c21eaeb4c70c26d31 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-06-03T19:54:48.000Z | 2021-06-03T19:54:48.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration tests for gcsio module.
Runs tests against Google Cloud Storage service.
Instantiates a TestPipeline to get options such as GCP project name, but
doesn't actually start a Beam pipeline or test any specific runner.
Options:
--kms_key_name=projects/<project-name>/locations/<region>/keyRings/\
<key-ring-name>/cryptoKeys/<key-name>/cryptoKeyVersions/<version>
Pass a Cloud KMS key name to test GCS operations using customer managed
encryption keys (CMEK).
Cloud KMS permissions:
The project's Cloud Storage service account requires Encrypter/Decrypter
permissions for the key specified in --kms_key_name.
To run these tests manually:
./gradlew :sdks:python:test-suites:dataflow:integrationTest \
-Dtests=apache_beam.io.gcp.gcsio_integration_test:GcsIOIntegrationTest \
-DkmsKeyName=KMS_KEY_NAME
"""
# pytype: skip-file
import logging
import unittest
import uuid
from nose.plugins.attrib import attr
from apache_beam.io.filesystems import FileSystems
from apache_beam.testing.test_pipeline import TestPipeline
try:
from apache_beam.io.gcp import gcsio
except ImportError:
gcsio = None # type: ignore
@unittest.skipIf(gcsio is None, 'GCP dependencies are not installed')
class GcsIOIntegrationTest(unittest.TestCase):
INPUT_FILE = 'gs://dataflow-samples/shakespeare/kinglear.txt'
# Larger than 1MB to test maxBytesRewrittenPerCall.
INPUT_FILE_LARGE = (
'gs://dataflow-samples/wikipedia_edits/wiki_data-000000000000.json')
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.runner_name = type(self.test_pipeline.runner).__name__
if self.runner_name != 'TestDataflowRunner':
# This test doesn't run a pipeline, so it doesn't make sense to try it on
# different runners. Running with TestDataflowRunner makes sense since
# it uses GoogleCloudOptions such as 'project'.
raise unittest.SkipTest('This test only runs with TestDataflowRunner.')
self.project = self.test_pipeline.get_option('project')
self.gcs_tempdir = (
self.test_pipeline.get_option('temp_location') + '/gcs_it-' +
str(uuid.uuid4()))
self.kms_key_name = self.test_pipeline.get_option('kms_key_name')
self.gcsio = gcsio.GcsIO()
def tearDown(self):
FileSystems.delete([self.gcs_tempdir + '/'])
def _verify_copy(self, src, dst, dst_kms_key_name=None):
self.assertTrue(FileSystems.exists(src), 'src does not exist: %s' % src)
self.assertTrue(FileSystems.exists(dst), 'dst does not exist: %s' % dst)
src_checksum = self.gcsio.checksum(src)
dst_checksum = self.gcsio.checksum(dst)
self.assertEqual(src_checksum, dst_checksum)
actual_dst_kms_key = self.gcsio.kms_key(dst)
if actual_dst_kms_key is None:
self.assertEqual(actual_dst_kms_key, dst_kms_key_name)
else:
self.assertTrue(
actual_dst_kms_key.startswith(dst_kms_key_name),
"got: %s, wanted startswith: %s" %
(actual_dst_kms_key, dst_kms_key_name))
def _test_copy(
self,
name,
kms_key_name=None,
max_bytes_rewritten_per_call=None,
src=None):
src = src or self.INPUT_FILE
dst = self.gcs_tempdir + '/%s' % name
extra_kwargs = {}
if max_bytes_rewritten_per_call is not None:
extra_kwargs['max_bytes_rewritten_per_call'] = (
max_bytes_rewritten_per_call)
self.gcsio.copy(src, dst, kms_key_name, **extra_kwargs)
self._verify_copy(src, dst, kms_key_name)
@attr('IT')
def test_copy(self):
self._test_copy("test_copy")
@attr('IT')
def test_copy_kms(self):
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
self._test_copy("test_copy_kms", self.kms_key_name)
@attr('IT')
def test_copy_rewrite_token(self):
# Tests a multi-part copy (rewrite) operation. This is triggered by a
# combination of 3 conditions:
# - a large enough src
# - setting max_bytes_rewritten_per_call
# - setting kms_key_name
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
rewrite_responses = []
self.gcsio._set_rewrite_response_callback(
lambda response: rewrite_responses.append(response))
self._test_copy(
"test_copy_rewrite_token",
kms_key_name=self.kms_key_name,
max_bytes_rewritten_per_call=50 * 1024 * 1024,
src=self.INPUT_FILE_LARGE)
# Verify that there was a multi-part rewrite.
self.assertTrue(any([not r.done for r in rewrite_responses]))
def _test_copy_batch(
self,
name,
kms_key_name=None,
max_bytes_rewritten_per_call=None,
src=None):
num_copies = 10
srcs = [src or self.INPUT_FILE] * num_copies
dsts = [self.gcs_tempdir + '/%s_%d' % (name, i) for i in range(num_copies)]
src_dst_pairs = list(zip(srcs, dsts))
extra_kwargs = {}
if max_bytes_rewritten_per_call is not None:
extra_kwargs['max_bytes_rewritten_per_call'] = (
max_bytes_rewritten_per_call)
result_statuses = self.gcsio.copy_batch(
src_dst_pairs, kms_key_name, **extra_kwargs)
for status in result_statuses:
self.assertIsNone(status[2], status)
for _src, _dst in src_dst_pairs:
self._verify_copy(_src, _dst, kms_key_name)
@attr('IT')
def test_copy_batch(self):
self._test_copy_batch("test_copy_batch")
@attr('IT')
def test_copy_batch_kms(self):
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
self._test_copy_batch("test_copy_batch_kms", self.kms_key_name)
@attr('IT')
def test_copy_batch_rewrite_token(self):
# Tests a multi-part copy (rewrite) operation. This is triggered by a
# combination of 3 conditions:
# - a large enough src
# - setting max_bytes_rewritten_per_call
# - setting kms_key_name
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
rewrite_responses = []
self.gcsio._set_rewrite_response_callback(
lambda response: rewrite_responses.append(response))
self._test_copy_batch(
"test_copy_batch_rewrite_token",
kms_key_name=self.kms_key_name,
max_bytes_rewritten_per_call=50 * 1024 * 1024,
src=self.INPUT_FILE_LARGE)
# Verify that there was a multi-part rewrite.
self.assertTrue(any([not r.done for r in rewrite_responses]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 36.128713 | 79 | 0.725404 |
7c8da78924dd44a43ffd3624bc13d80fb1513ea3 | 3,977 | py | Python | bot/ext/pictionary/src/anti_abuse.py | Rickaym/Nexus-Bot | 86b17ca5f7cce10eae981157971990875eda0a06 | [
"MIT"
] | null | null | null | bot/ext/pictionary/src/anti_abuse.py | Rickaym/Nexus-Bot | 86b17ca5f7cce10eae981157971990875eda0a06 | [
"MIT"
] | null | null | null | bot/ext/pictionary/src/anti_abuse.py | Rickaym/Nexus-Bot | 86b17ca5f7cce10eae981157971990875eda0a06 | [
"MIT"
] | null | null | null | import time
class AntiAbuse:
def __init__(self):
self.watching_sessions = {}
def start_watching_session(self, guild_id, channel_id, rounds, members):
session_id = str(guild_id)+str(channel_id)
self.watching_sessions[session_id] = (rounds, members, time.time())
def flag_suspicious(self, session_id, guild_id, channel_id, flag):
rounds, members, time_started = self.watching_sessions[session_id]
details = f"[DETAILS]\n\nSessionID: {session_id}\nGuild: {guild_id}\nChannel: {channel_id}\n\nRounds: {rounds}\nMax, Min: {rounds*len(members)}, {rounds*-10}\nMembers: {', '.join([str(member.display_name)+'#'+str(member.discriminator) for member in members])}"
durationS, durationM = round(time.time() - time_started), 0
while durationS >= 60:
durationS -= 60
durationM += 1
if flag[0] == "scores":
report = '\n'.join(
[f'{key} = {flag[1][key]}' for key in flag[1].keys()])
suspicion = f"[SCORE UNINTEGRITY]\n\nCulprit(s): {', '.join([user for user in flag[2]])}\nReports: {report}\nTime Took: {durationM} minutes and {durationS} seconds"
elif flag[0] == "rounds":
suspicion = f"[ROUNDS UNINTEGRITY]\n\nRounds: {rounds}"
elif flag[0] == "exit":
suspicion = f"[IMPROPER EXIT]\n\nError: {flag[1]}\nTime Took: {durationM} minutes and {durationS} seconds"
return f"[ACTIVITY FLAGGED SUSPICIOUS]|{details}|{suspicion}"
def do_reality_check(self, scores, guild_id, channel_id):
session_id = str(guild_id)+str(channel_id)
rounds, members, time_started = self.watching_sessions[session_id]
highest_possible = 7 * len(members)
lowest_possible = -10 * rounds
flag = False
statistics = {}
violators = []
for member in members:
if (score := int(scores[member.id])) >= highest_possible or int(scores[member.id]) <= lowest_possible:
flag = True
violators.append(str(member.display_name) +
'#'+str(member.discriminator))
statistics[str(member.display_name)+'#' +
str(member.discriminator)] = score
if flag:
return self.flag_suspicious(str(guild_id)+str(channel_id), guild_id, channel_id, ["scores", statistics, violators])
return None
def do_entrance_check(self, guild_id, channel_id):
session_id = str(guild_id)+str(channel_id)
rounds, members, time_started = self.watching_sessions[session_id]
if rounds >= 5:
return self.flag_suspicious(str(guild_id)+str(channel_id), guild_id, channel_id, ["rounds", rounds, members])
return None
def do_exit_check(self, guild_id, channel_id, lobbyinit_id, data):
flag = False
target = [] # self.channels[guild_id], self.to_ready_up[lobby_init], self.to_complete_answers[channel.id], self.scores[channel.id]]
try:
data[0][guild_id]
except KeyError:
pass
else:
flag = True
target.append(data[0][guild_id])
try:
data[1][lobbyinit_id]
except KeyError:
pass
else:
flag = True
target.append(data[1][lobbyinit_id])
try:
data[2][channel_id]
except KeyError:
pass
else:
flag = True
target.append(data[2][channel_id])
try:
data[3][channel_id]
except KeyError:
pass
else:
flag = True
target.append(data[3][channel_id])
if flag:
return self.flag_suspicious(str(guild_id)+str(channel_id), guild_id, channel_id, ["exit", target])
def end_watching_session(self, guild_id, channel_id):
session_id = str(guild_id)+str(channel_id)
self.watching_sessions.pop(session_id)
| 40.581633 | 268 | 0.599447 |
cb37e3dbc9d09ffd27e893676207ddcded385519 | 3,816 | py | Python | work_unit/watchfile_db.py | rnov/Fingerpay | c66667f8dd3a1bb2f66c0746c20de8677251af4d | [
"MIT"
] | 1 | 2016-11-17T21:50:47.000Z | 2016-11-17T21:50:47.000Z | work_unit/watchfile_db.py | rnov/Fingerpay | c66667f8dd3a1bb2f66c0746c20de8677251af4d | [
"MIT"
] | 2 | 2016-11-17T19:43:13.000Z | 2016-11-17T21:34:04.000Z | work_unit/watchfile_db.py | rnov/Fingerpay | c66667f8dd3a1bb2f66c0746c20de8677251af4d | [
"MIT"
] | null | null | null | import sys
import os
import os.path
import logging
import linecache
import time
#sys.path.remove('/usr/lib/python2.7/site-packages') # para test, probar dependencias
custom_path = '{0}/{1}'
sys.path.insert(1, custom_path.format(os.getcwd(), 'depen_packages'))
sys.path.insert(1, custom_path.format(os.getcwd(), 'payfi'))
#print sys.path
import yaml
from payfi import download_chunks as dw
from payfi.watchdog.observers import Observer
from payfi.watchdog.observers import Observer
from payfi.watchdog.events import LoggingEventHandler
filename = 'db_listener_config.yalm'
log_path = 'db_log.log'
def __load_config_file(file_name):
"""
Loads varibles and constants from yalm config file and turns them into module's global variables
:param filename: str, config file name
:return: None
"""
with open(file_name) as f:
data_map = yaml.safe_load(f)
f.close()
globals().update(data_map)
globals()['file_ext'] = tuple(globals()['file_ext'])
globals()['register'] = tuple(globals()['register'])
globals()['charge'] = tuple(globals()['charge'])
globals()['success'] = tuple(globals()['success'])
globals()['fail'] = tuple(globals()['fail'])
class MySLoggingEventHandler1(LoggingEventHandler):
def on_created(self, event):
print 'file has been created {0}'.format(event.src_path)
file_format = event.src_path[event.src_path.rfind('.'):] # finds the last point.
if file_format in file_ext:
t_op = linecache.getline(event.src_path, 1).rstrip() # removes '\n' 0 operation type
res = linecache.getline(event.src_path, 2).rstrip() # operation result success/fail
id_ter = linecache.getline(event.src_path, 3).rstrip() # terminal id
print res
# IMPORTANT: clear the cache, otherwise will read always the same values that come from an individual terminal
linecache.clearcache()
if t_op in register and res in fail: #'FAILURE':
print 'delete inserted fingerprint needed pin and img_filename.. to do'
# sends to the terminal's folder
try:
destiny_path = id_ter + file_format
# in case terminal_id folder does not exist, create it
dir_name = os.path.dirname(forward_path.format(id_ter, destiny_path)) # get destiny folder id
if not os.path.exists(dir_name): # in case does not exist, create it
os.mkdir(dir_name) # 0o777
epoch = str(time.time())
dw.prepare_client_file(forward_path.format(id_ter, destiny_path), (epoch[:epoch.index('.')], res))
except OSError:
logging.error('destiny path is a directory, not a file')
os.remove(event.src_path) # remove always everything
if __name__ == "__main__":
# init the logger
logging.basicConfig(filename=log_path, format='%(asctime)s - %(levelname)s: (%(module)s) %(funcName)s - '
'%(message)s ', level=logging.ERROR) # logging.WARNING
# load config file, in case of error close the program and register the error
try:
__load_config_file(filename)
print 'watchdog_db has been initialized'
#print globals() # just for tests
except IOError:
logging.error('Could not read config file')
raise SystemExit
path = sys.argv[1] if len(sys.argv) > 1 else db_listening_path # watchdog
event_handler = MySLoggingEventHandler1() # LoggingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| 38.545455 | 118 | 0.649895 |
fdebae8d49449bf0224d1a676d5b5121d3b27b8e | 4,505 | py | Python | pymc/deprecator.py | mjhajharia/pymc3 | f1dbc5f07a49744a0d52da77b082dea94987d27b | [
"Apache-2.0"
] | null | null | null | pymc/deprecator.py | mjhajharia/pymc3 | f1dbc5f07a49744a0d52da77b082dea94987d27b | [
"Apache-2.0"
] | null | null | null | pymc/deprecator.py | mjhajharia/pymc3 | f1dbc5f07a49744a0d52da77b082dea94987d27b | [
"Apache-2.0"
] | null | null | null | import functools
import inspect
import warnings
import regex as re
import textwrap
def deprecator(reason=None, version=None,action='deprecate', deprecated_args=None, docs=True):
"""
PyMC deprecation helper. This decorator emits deprecated warnings.
"""
if deprecated_args is not None:
deprecated_args = set(deprecated_args.split())
cause = reason
if cause is not None and version is not None:
reason = f', since version {version} ({cause})'
if cause is not None and version is None:
reason = '({cause})'
if cause is None and version is not None :
reason = f', since version {version}'
#this function is an edited version of the source code taken from the library Deprecated
#https://github.com/tantale/deprecated (MIT License)
def sphinxformatter(version, reason):
fmtsphinx = ".. deprecated:: {version}" if version else ".. deprecated::"
sphinxtext = [fmtsphinx.format(directive = "deprecated", version=version)]
reason = reason.lstrip()
for paragraph in reason.splitlines():
if paragraph:
sphinxtext.extend(
textwrap.fill(
paragraph,
initial_indent=" ",
subsequent_indent=" ",
).splitlines()
)
else:
sphinxtext.append("")
return sphinxtext
def regex_for_deprecated_args(docstring, deprecated_args):
"""
This function uses regex for positioning deprecation warnings for parameters with their documentation.
"\\n{1}\\w+:{1}" - looks for the next parameter(formatted as [line break followed by some string ending with a colon ])
that is defined in the documentation, so we introduce the warning right before that
"\\n{1}\\w+\\n{1}-+\\n{1}" - looks for the next documentation section like "Parameters", "Examples", "Returns"
these are followed by a line of dashes (------).
we look through all of these possible endings to find the "endpoint" of the param definition and insert the deprecation warning there
"""
for deprecated_arg in deprecated_args:
doc=docstring.split(f'\n{deprecated_arg}:')[1]
nextitem = re.search("\\n{1}\\w+:{1}", doc)
nextsection = re.search("\\n{1}\\w+\\n{1}-+\\n{1}",doc)
last = len(doc)
n = min(nextitem.start(), nextsection.start(), last)
y = len(docstring.split(f'\n{deprecated_arg}:')[0]) + len(f'\n{deprecated_arg}:')
docstring = docstring[:y+n] + str(sphinxtext) + docstring[y+n:]
return docstring
def format_message(func):
"""
This function formats the warning message and sphinx text
"""
if deprecated_args is None:
if inspect.isclass(func):
fmt = "Class {name} is deprecated{reason}."
else:
fmt = "Function or method {name} is deprecated{reason}."
else:
fmt = "Parameter(s) {name} deprecated{reason}"
if docs is True:
docstring = textwrap.dedent(func.__doc__ or "")
if docstring:
docstring = re.sub(r"\n+$", "", docstring, flags=re.DOTALL) + "\n\n"
else:
docstring = "\n"
sphinxtext = sphinxformatter(version, cause)
if deprecated_args is None:
for line in sphinxtext:
docstring += f'{line}\n'
else:
docstring = regex_for_deprecated_arg(docstring, deprecated_args)
func.__doc__ = docstring
@functools.wraps(func)
def new_func(*args, **kwargs):
if deprecated_args is None:
name = func.__name__
else:
argstodeprecate = deprecated_args.intersection(kwargs)
if argstodeprecate is not None:
name = ', '.join(repr(arg) for arg in argstodeprecate)
if name!="":
if action!="ignore":
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
fmt.format(name=name, reason=reason),
category=DeprecationWarning,
stacklevel=2
)
return func
return new_func
return decorator
| 39.517544 | 141 | 0.564928 |
cbe3ce7aee7f59834a1c767d68bd8f5b4a11affc | 451 | py | Python | devices/urls.py | j-windsor/iRiot-WebApp | 2af059f88bd010b98e50b9ee593c89027dcc53e0 | [
"MIT"
] | null | null | null | devices/urls.py | j-windsor/iRiot-WebApp | 2af059f88bd010b98e50b9ee593c89027dcc53e0 | [
"MIT"
] | null | null | null | devices/urls.py | j-windsor/iRiot-WebApp | 2af059f88bd010b98e50b9ee593c89027dcc53e0 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^update/$', views.update, name='update'),
url(r'^new/$', views.new, name='new'),
url(r'^(?P<room_id>[0-9]+)/manage/$', views.manage, name='manage'),
url(r'^(?P<room_id>[0-9]+)/remove_device/(?P<device_id>[0-9]+)$', views.remove_device, name='remove_device'),
url(r'^(?P<function_id>[0-9]+)/send_function/$', views.send_function, name='send_function'),
]
| 37.583333 | 113 | 0.636364 |
107d2e648d67ee5fc83ce06e05d412efb2e98bc1 | 1,063 | py | Python | Owner_contract.py | JunbeomGwak/final_project | 1a90fd4254de22907387c1e45cf2caaa9e0fc907 | [
"MIT"
] | null | null | null | Owner_contract.py | JunbeomGwak/final_project | 1a90fd4254de22907387c1e45cf2caaa9e0fc907 | [
"MIT"
] | null | null | null | Owner_contract.py | JunbeomGwak/final_project | 1a90fd4254de22907387c1e45cf2caaa9e0fc907 | [
"MIT"
] | null | null | null | import sys
import os
import time
import solcx
from solcx import compile_files
from logzero import logger
from web3 import Web3
w3 = Web3(Web3.HTTPProvider('http://127.0.0.1:7545')) # GANACHE
w3.eth.defaultAccount = w3.eth.accounts[0]
print('install solcx 0.8.0..')
solcx.install_solc('0.8.0')
def deploy(contract_file, contract_name):
compiled_sol = compile_files([contract_file])
interface = compiled_sol['{}:{}'.format(contract_file, contract_name)]
contract = w3.eth.contract(abi=interface['abi'],
bytecode='0x'+interface['bin'],
bytecode_runtime=interface['bin-runtime'])
tx_hash = contract.constructor({'from':w3.eth.accounts[0]}).transact()
logger.info(f'Owner_tx_hash: {tx_hash}')
tx_receipt = w3.eth.getTransactionReceipt(tx_hash)
logger.info(f'Owner_tx_receipt: {tx_receipt}')
contract_address = tx_receipt['contractAddress']
logger.info(f'Owner_contract_address: {contract_address}')
contract_instance = contract(contract_address)
logger.info(f'Owner_contract_instance: {contract_instance}')
return contract_instance
| 29.527778 | 71 | 0.761994 |
5449e9a22bb55ff31210acb04bee6393e78a52b4 | 10,775 | py | Python | etsin_finder/rems_service.py | tahme/etsin-finder | ee9207d1af0e6bf40adde9b28e92105e6bbe19df | [
"MIT"
] | null | null | null | etsin_finder/rems_service.py | tahme/etsin-finder | ee9207d1af0e6bf40adde9b28e92105e6bbe19df | [
"MIT"
] | null | null | null | etsin_finder/rems_service.py | tahme/etsin-finder | ee9207d1af0e6bf40adde9b28e92105e6bbe19df | [
"MIT"
] | null | null | null | # This file is part of the Etsin service
#
# Copyright 2017-2018 Ministry of Education and Culture, Finland
#
# :author: CSC - IT Center for Science Ltd., Espoo Finland <servicedesk@csc.fi>
# :license: MIT
"""Used for performing operations related to Fairdata Rems"""
from requests import request, HTTPError
from flask import session
from datetime import datetime
from etsin_finder.cr_service import get_catalog_record_preferred_identifier, get_catalog_record, is_rems_catalog_record
from etsin_finder.app_config import get_fairdata_rems_api_config
from etsin_finder.utils import json_or_empty, FlaskService
from etsin_finder.finder import app
log = app.logger
class RemsAPIService(FlaskService):
"""Rems Service"""
def __init__(self, app, user):
"""Setup Rems API Service"""
super().__init__(app)
rems_api_config = get_fairdata_rems_api_config(app.testing)
if rems_api_config:
self.ENABLED = rems_api_config.get('ENABLED', False)
self.USER_ID = user
self.API_KEY = str(rems_api_config.get('API_KEY'))
self.HOST = rems_api_config.get('HOST')
self.HEADERS = {
'Accept': 'application/json',
'x-rems-api-key': self.API_KEY,
'x-rems-user-id': 'RDowner@funet.fi'
}
self.REMS_URL = 'https://{0}'.format(self.HOST) + '/api/entitlements?resource={0}'
self.REMS_ENTITLEMENTS = 'https://{0}'.format(self.HOST) + '/api/entitlements'
self.REMS_CREATE_USER = 'https://{0}'.format(self.HOST) + '/api/users/create'
self.REMS_GET_MY_APPLICATIONS = 'https://{0}'.format(self.HOST) + '/api/my-applications/'
self.REMS_CATALOGUE_ITEMS = 'https://{0}'.format(self.HOST) + '/api/catalogue-items?resource={0}'
self.REMS_CREATE_APPLICATION = 'https://{0}'.format(self.HOST) + '/api/applications/create'
elif self.is_testing:
self.ENABLED = False
else:
self.ENABLED = False
def rems_request(self, method, url, err_message, json=None, user_id='RDowner@funet.fi'):
"""Genaral method for sending requests to REMS
Arguments:
method [string] -- The http verb, GET or POST
url [string] -- The url for the request
err_message [string] -- An error message to log if something goes wrong
Keyword Arguments:
json dict -- Data to be sent in a POST requests body (default: {None})
user_id str -- The user id if needed (default: {'RDowner@funet.fi'})
Returns:
[tuple] -- Message for the response as first argument, and status code as second.
"""
if not self.ENABLED:
return False
self.HEADERS['x-rems-user-id'] = user_id
assert method in ['GET', 'POST'], 'Method attribute must be one of [GET, POST].'
log.info('Sending {0} request to {1}'.format(method, url))
try:
if json:
rems_api_response = request(method=method, headers=self.HEADERS, url=url, json=json, verify=False, timeout=3)
else:
rems_api_response = request(method=method, headers=self.HEADERS, url=url, verify=False, timeout=3)
rems_api_response.raise_for_status()
except Exception as e:
log.warning(err_message)
if isinstance(e, HTTPError):
log.warning('Response status code: {0}\nResponse text: {1}'.format(rems_api_response.status_code, json_or_empty(rems_api_response)))
return 'HTTPError', rems_api_response.status_code
else:
log.error('Error in request\n{0}'.format(e))
return 'Error in request', 500
log.info('rems_api_response: {0}'.format(rems_api_response.json()))
return rems_api_response.json()
def get_user_applications(self):
"""Get all applications which the current user can see
Returns:
[list] -- List of application dicts
"""
if not self.ENABLED:
return False
log.info('Get all applications for current user')
method = 'GET'
url = self.REMS_GET_MY_APPLICATIONS
err_message = 'Failed to get applications from Fairdata REMS'
return self.rems_request(method, url, err_message, user_id=self.USER_ID)
def create_application(self, id):
"""Creates application in REMS
Arguments:
id [int] -- Catalogue item id
Returns:
[dict] -- Dict with info if the operation was successful
"""
if not self.ENABLED:
return False
assert isinstance(id, int), 'id should be integer, id: {0}'.format(id)
log.info('Create REMS application for catalogue item with id: {0}'.format(id))
method = 'POST'
url = self.REMS_CREATE_APPLICATION
err_message = 'Failed to create application'
json = {'catalogue-item-ids': [id]}
return self.rems_request(method, url, err_message, json=json, user_id=self.USER_ID)
def get_catalogue_item_for_resource(self, resource):
"""Get catalogue item for resource from REMS
Arguments:
resource [string] -- The preferred identifier of the resource
Returns:
[list] -- List containing dict of catalogue item
"""
if not self.ENABLED:
return False
assert isinstance(resource, str), 'resource should be string, resource: {0}'.format(resource)
log.info('Get catalog item for resource: {0}'.format(resource))
method = 'GET'
url = self.REMS_CATALOGUE_ITEMS.format(resource)
err_message = 'Failed to get catalogue item data from Fairdata REMS for resource: {0}'.format(resource)
return self.rems_request(method, url, err_message)
def create_user(self, userdata):
"""Create user in REMS
Arguments:
userdata [dict] -- Dict with name, user_id and email.
Returns:
[dict] -- Information if the creation succeeded.
"""
if not self.ENABLED:
return False
assert isinstance(userdata, dict) and userdata.keys() >= {'userid', 'name', 'email'}, \
'usedata should be a dict containing userid, name and email.'
log.info('Create user in REMS')
method = 'POST'
url = self.REMS_CREATE_USER
err_message = 'Failed to create user to REMS'
json = userdata
return self.rems_request(method, url, err_message, json=json)
def entitlements(self):
"""Get all approved catalog records.
Returns:
[list] -- List of dicts with entitlements.
"""
if not self.ENABLED:
return False
log.info('Get all approved catalog records')
method = 'GET'
url = self.REMS_ENTITLEMENTS
err_message = 'Failed to get entitlement data from Fairdata REMS for user_id: {0}'.format(self.USER_ID)
return self.rems_request(method, url, err_message)
def get_rems_permission(self, rems_resource):
"""Check if user is entitled for a REMS resource.
Arguments:
rems_resource [string] -- The resource
Returns:
[boolean] -- True/False if user is entitled.
"""
if not self.ENABLED:
return False
assert rems_resource, 'rems_resource should be string, rems_resource: {0}'.format(rems_resource)
log.info('Get entitlements for resource: {0}'.format(rems_resource))
method = 'GET'
url = self.REMS_URL.format(rems_resource)
err_message = 'Failed to get entitlement data from Fairdata REMS for user_id: {0}, resource: {1}'.format(self.USER_ID, rems_resource)
return len(self.rems_request(method, url, err_message)) > 0
def get_application_state_for_resource(cr, user_id):
"""Get the state of the users applications for resource.
Arguments:
cr [dict] -- Catalog record
user_id [string] -- The user id
Returns:
[string] -- The application state or False.
"""
_rems_api = RemsAPIService(app, user_id)
if _rems_api.ENABLED:
state = 'apply'
else:
return 'disabled'
if not user_id or not cr:
log.error('Failed to get user application state')
return False
pref_id = get_catalog_record_preferred_identifier(cr)
if not pref_id:
log.error('Could not get preferred identifier.')
return False
user_applications = _rems_api.get_user_applications()
if not isinstance(user_applications, list) or not user_applications:
log.warning('Could not get any applications belonging to user.')
return False
log.info('Got {0} applications for the user.'.format(len(user_applications)))
# Sort applications to get the newest first
user_applications.sort(reverse=True, key=lambda x: datetime.strptime(x['application/last-activity'], '%Y-%m-%dT%H:%M:%S.%fZ'))
for application in user_applications:
resources = application.get('application/resources')
for resource in resources:
if resource.get('resource/ext-id') == pref_id:
state = application.get('application/state').split('/')[1]
# Set the application id to the session so it can be used directly
# by REMSApplyForPermission if the users has already created applications
session['REMS_application_id'] = application.get('application/id')
return state
# Set the value to None if no application for the resource is found
session['REMS_application_id'] = None
return state
def get_user_rems_permission_for_catalog_record(cr_id, user_id):
"""Get info about whether user is entitled for a catalog record.
Arguments:
cr_id [string] -- The catalog record identifier.
user_id [string] -- The user id.
Returns:
[boolean] -- Returns True/False if user is entitled.
"""
if not user_id or not cr_id:
log.error('Failed to get rems permission for catalog record. user_id: {0} or cr_id: {1} is invalid'.format(user_id, cr_id))
return False
cr = get_catalog_record(cr_id, False, False)
if cr and is_rems_catalog_record(cr):
pref_id = get_catalog_record_preferred_identifier(cr)
if not pref_id:
log.error('Could not get cr_id: {0} preferred identifier.'.format(cr_id))
return False
_rems_api = RemsAPIService(app, user_id)
return _rems_api.get_rems_permission(pref_id)
log.warning('Invalid catalog record or not a REMS catalog record. cr_id: {0}'.format(cr_id))
return False
| 38.345196 | 148 | 0.638051 |
e3bb9b1cce00a22f0495fbdf5a953d3c7d1075dd | 185 | py | Python | Hackerrank/Contests/Project Euler/euler001.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
] | 1 | 2021-01-10T13:29:21.000Z | 2021-01-10T13:29:21.000Z | Hackerrank/Contests/Project Euler/euler001.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
] | null | null | null | Hackerrank/Contests/Project Euler/euler001.py | PROxZIMA/Competitive-Coding | ba6b365ea130b6fcaa15c5537b530ed363bab793 | [
"MIT"
] | null | null | null | for _ in range(int(input())):
n = int(input())
occ3, occ5, occ15 = (n-1)//3, (n-1)//5, (n-1)//15
print(3*(occ3*(occ3+1))//2 + 5*(occ5*(occ5+1))//2 - 15*(occ15*(occ15+1))//2) | 46.25 | 80 | 0.497297 |
55fa187f5a1dcda6c909d5000a56a6ba09caf913 | 364 | py | Python | virtualcrypto/__init__.py | sevenc-nanashi/virtualcrypto-python | 28d48f63ecdb191b4ba14fdadf2a06dc3697f0a7 | [
"MIT"
] | 1 | 2022-02-20T15:12:23.000Z | 2022-02-20T15:12:23.000Z | virtualcrypto/__init__.py | sevenc-nanashi/virtualcrypto-python | 28d48f63ecdb191b4ba14fdadf2a06dc3697f0a7 | [
"MIT"
] | 2 | 2021-05-05T03:21:35.000Z | 2021-05-05T13:57:15.000Z | virtualcrypto/__init__.py | sevenc-nanashi/virtualcrypto-python | 28d48f63ecdb191b4ba14fdadf2a06dc3697f0a7 | [
"MIT"
] | 3 | 2021-03-07T12:01:54.000Z | 2021-05-29T22:22:00.000Z | """Top-level package for VirtualCrypto.py."""
from .structs import User, Currency, Claim, ClaimStatus, Scope, Balance
from .errors import VirtualCryptoException, MissingScope, BadRequest
from .client import VirtualCryptoClient
from .async_client import AsyncVirtualCryptoClient
__author__ = """sizumita"""
__email__ = 'contact@sumidora.com'
__version__ = '0.1.3'
| 36.4 | 71 | 0.796703 |
ea76eb8683a0dee5e72fac1a17567abbe5cfcf17 | 3,608 | py | Python | server/dockyard/var.py | tor4z/Galileo-dockyard | c7f65c1466a1450315935ad03a5a504e7bdb1660 | [
"MIT"
] | null | null | null | server/dockyard/var.py | tor4z/Galileo-dockyard | c7f65c1466a1450315935ad03a5a504e7bdb1660 | [
"MIT"
] | null | null | null | server/dockyard/var.py | tor4z/Galileo-dockyard | c7f65c1466a1450315935ad03a5a504e7bdb1660 | [
"MIT"
] | null | null | null | import threading
from tornado.ioloop import IOLoop
from dockyard.const import ExpStatus
import logging
class __GlobalVar:
__INSTANCE = None
__DATA = {}
__INITIATED = False
__LOCK = threading.Lock()
MID = "_id"
MDELETE = "__deleted__"
MCREATE = "__create__"
MUPDATE = "__update__"
LOG_WARN = "warn"
LOG_FATAL = "fatal"
LOG_SUCCESS = "success"
LOG_ERROR = "error"
LOG_INFO = "info"
SYS_ORIGIN = "system"
LOG_ERROR_LEVEL = "_error",
LOG_FATAL_LEVEL = "_fatal",
LOG_SUCCESS_LEVEL = "_success",
LOG_WARN_LEVEL = "_warn",
LOG_PUTS_LEVEL = "_put"
# channel
CHAN_GLOBAL = "global_chan"
CHAN_BUILD = "build_chan"
CHAN_LOG = "log_chan"
def __new_instance(self, name, cls, *args, **kwargs):
if not self.__DATA.get(name):
self.__DATA[name] = cls(*args, **kwargs)
return self.__DATA[name]
def mongo(self, host=None, port=None, database=None):
name = "mongo"
if not self.__DATA.get(name):
from pymongo.mongo_client import MongoClient
if not host or not port or not database:
raise Exception(ExpStatus["STAT_EXP_INIT_MONGO"])
self.__new_instance(name, MongoClient, host, port)
return self.__DATA[name][database]
@property
def tq(self):
# return singleton instance of task queue
name = "tq"
if not self.__DATA.get(name):
from dockyard.service.task import TaskQueue
self.__new_instance(name, TaskQueue)
return self.__DATA[name]
@classmethod
def go(cls, func, *args, **kwargs):
name = "go"
if not cls.__DATA[name]:
cls.__DATA[name] = IOLoop.instance()
cls.__DATA[name].add_callback(func, *args, **kwargs)
@property
def routes(self):
name = "routes"
if not self.__DATA.get(name):
self.__DATA[name] = []
return self.__DATA[name]
@property
def logging(self):
name = "logging"
if not self.__DATA.get(name):
from server.dockyard.driver.log import Log
self.__new_instance(name, Log)
return self.__DATA[name]
@staticmethod
def puts(*args, **kwargs):
logging.info(*args, **kwargs)
def initialize(self):
if not self.__INITIATED:
with self.__LOCK:
if not self.__INITIATED:
self.__INITIATED = True
from dockyard.service.task import init_queue
from dockyard.service.task import init_routine
from dockyard.service.interface import init_interface
self.puts("Init task queue ...")
init_queue()
self.puts("Init routine ...")
init_routine()
self.puts("Init interface ...")
init_interface()
self.puts("Resume task queue ...")
self.tq.resume()
@classmethod
def instance(cls, *args, **kwargs):
if cls.__INSTANCE is None:
with cls.__LOCK:
if cls.__INSTANCE is None:
cls.__INSTANCE = cls(*args, **kwargs)
return cls.__INSTANCE
GLOBAL = __GlobalVar.instance()
| 31.649123 | 73 | 0.528548 |
00803dfe72d3421b2809997701cbaa8e17b57c2d | 50,992 | py | Python | python/tracer.py | vorfol/vms-ide | bbd584c3ae7074bae2024668220794d10d13f174 | [
"MIT"
] | 2 | 2021-01-23T20:30:59.000Z | 2021-03-18T09:11:41.000Z | python/tracer.py | vorfol/vms-ide | bbd584c3ae7074bae2024668220794d10d13f174 | [
"MIT"
] | 7 | 2021-02-27T01:52:35.000Z | 2021-04-07T12:34:39.000Z | python/tracer.py | vorfol/vms-ide | bbd584c3ae7074bae2024668220794d10d13f174 | [
"MIT"
] | 1 | 2021-04-06T13:24:03.000Z | 2021-04-06T13:24:03.000Z | import ctypes
import signal
import socket
import sys
import threading
import time
import os.path
import collections
import base64
import re
# settings
class SETTINGS:
HOST = '127.0.0.1'
PORT = 54326
# messages to send
class MESSAGE:
AMEND = 'AMEND'
BP_CONFIRM = 'BP_CONFIRM'
BP_CONFIRM64 = 'BP_CONFIRM64'
BP_RESET = 'BP_RESET'
BP_RESET64 = 'BP_RESET64'
BP_WAIT = 'BP_WAIT'
BP_WAIT64 = 'BP_WAIT64'
BREAK = 'BREAK'
CONTINUED = 'CONTINUED'
DEBUG = 'DEBUG'
DEVELOPER = 'DEVELOPER'
DISPLAY = 'DISPLAY'
DISPLAY64 = 'DISPLAY64'
ENTRY = 'ENTRY'
EVAL = 'EVALUATE'
EXCEPTION = 'EXCEPTION'
EXECUTE = 'EXECUTE'
EXITED = 'EXITED'
FRAME = 'FRAME'
FRAME64 = 'FRAME64'
GOTO = 'GOTO'
GOTO_TARGETS = 'GOTO_TARGETS'
INFO = 'INFO'
PAUSED = 'PAUSED'
SIGNAL = 'SIGNAL'
STEPPED = 'STEPPED'
SYNTAX_ERROR = 'SYNTAX_ERROR'
THREADS = 'THREADS'
RADIX = 'RADIX'
PATHFILTER = 'PATHFILTER'
# command to receive
# class COMMAND:
# AMEND = 'a' # a ident frame name value
# BP_RESET = 'bpr' # bpr [file [line]]
# BP_SET = 'bps' # bps file line
# CONTINUE = 'c'
# DISPLAY = 'd' # d[h] [ident [frame [fullName [start [count]]]]] // frame is zero-based
# DISPLAY64 = 'd64' # d64[h] [ident [frame [fullName [start [count]]]]] // base64 coded
# EVAL = 'v' # v expression // evaluate expression in the current frame
# EXEC = 'e' # e expression // execute expression in the current frame
# FRAME = 'f' # f [ident [frameStart [frameNum]]] // frame is zero-based
# FRAME64 = 'f64' # f64 [ident [frameStart [frameNum]]] // base64 coded
# GOTO = 'g' # g ident line
# GOTO_TARGETS = 'gt' # gt file line // test if we can go to target from current place
# INFO = 'i'
# MODE = 'm' # m [0|1] // user | developer
# NEXT = 'n' # n [ident] // step over
# PAUSE = 'p'
# RETURN = 'r' # r [ident] // step out
# STEP = 's' # s [ident] // step in
# THREADS = 't'
# RADIX = 'x' # x [10|16] // default 10
# PATHFILTER = 'y' # y [path] // always trace this path
class COMMAND_REGEXP:
AMEND = re.compile('^a (\\d+) (\\d+) (\\S+) (.+)$')
BP_RESET = re.compile('^bpr (?:(\\S)+(?: (\\S+))?)?$')
BP_SET = re.compile('^bps (\\S+) (\\d+)$')
CONTINUE = re.compile('^c$')
DISPLAY = re.compile('^d(h|o)?(?: (\\d+)(?: (\\d+)(?: (\\S+)(?: (\\d+)(?: (\\d+)))?)?)?)?$')
DISPLAY64 = re.compile('^d64(h|o)?(?: (\\d+)(?: (\\d+)(?: (\\S+)(?: (\\d+)(?: (\\d+)))?)?)?)?$')
EVAL = re.compile('^v (.+)$')
EXEC = re.compile('^e (.+)$')
FRAME = re.compile('^f(?: (\\d+)(?: (\\d+)(?: (\\d+))?)?)?$')
FRAME64 = re.compile('^f64(?: (\\d+)(?: (\\d+)(?: (\\d+))?)?)?$')
GOTO = re.compile('^g (\\d+) (\\d+)$')
GOTO_TARGETS = re.compile('^gt (\\S+) (\\d+)$')
INFO = re.compile('^i$')
MODE = re.compile('^m(?: ([0|1]))?$')
NEXT = re.compile('^n(?: (\\d+))?$')
PAUSE = re.compile('^p$')
RETURN = re.compile('^r(?: (\\d+))?$')
STEP = re.compile('^s(?: (\\d+))?$')
THREADS = re.compile('^t$')
RADIX = re.compile('^x (8|10|16)$')
PATHFILTER = re.compile('^y(?: \\S+)?$')
class Tracer:
def __init__(self, port, insensitive=False, developerMode=False):
self._insensitive = insensitive
# self._postDebugMessage = None
self._radix = 10
self._maxSendStrLen = 128
self._maxKeyStrLen = 32
self._developerMode = developerMode
self._pathFilter = ''
self._re_compile = re.compile
self._co_lnotab_signed = sys.version_info.major >= 3 and sys.version_info.minor >= 6
self._knownValueTypes = [int, str, float, bool, complex, type(None)]
self._port = port
self._fileName = __file__
self._socket = None
self._sendBuffer = b''
self._recvBuffer = b''
self._oldSysTrace = None
self._paused = False
self._fileWaitingFor = None
self._startTracing = False
self._originalSigint = None
self._originalSigbreak = None
self._threads = {} # thread enrties by [thread id]
self._mainThread = None
self._steppingThread = None
self._steppingLevel = None
self._breakpointsConfirmed = collections.defaultdict(set) # confirmed break line list by [file name]
self._breakpointsWait = collections.defaultdict(set) # wait break line list by [file name]
self._lines = collections.defaultdict(dict) # all usable line list by [file name [ function name ]]
self._files = set()
# incapsulate functions from other classes
self._lockTrace = threading.Lock()
self._currentThread = threading.current_thread
self._sleep = time.sleep
self._setTrace = sys.settrace
self._setThreadTrace = threading.settrace
self._versionInfo = sys.version_info
self._changeLocals = ctypes.pythonapi.PyFrame_LocalsToFast
self._Obj = ctypes.py_object
self._Int = ctypes.c_int
self._os_path_basename = os.path.basename
self._os_path_abspath = os.path.abspath
self._os_path_splitext = os.path.splitext
self._setSignal = signal.signal
self._messages = MESSAGE
# self._commands = COMMAND
self._commands_regexp = COMMAND_REGEXP
self._sig_int = signal.SIGINT
# self._sig_break = signal.SIGBREAK
self._sig_def = signal.SIG_DFL
self._b64decode = base64.b64decode
self._b64encode = base64.b64encode
# DEBUG
# self._enter_counter = 0
def _setupTrace(self):
self._setSignal(self._sig_int, self._signalHandler)
# self._setSignal(self._sig_break, self._signalHandler)
self._connect()
self._oldSysTrace = sys.gettrace()
self._setTrace(self._traceFunc)
self._setThreadTrace(self._traceFunc)
def _connect(self):
self._open()
self._sendDbgMessage(self._messages.DEBUG)
def _disconnect(self):
self._sendDbgMessage(self._messages.EXITED)
self._close()
def _open(self):
self._socket = None
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((SETTINGS.HOST, self._port))
self._socket.setblocking(False)
except:
# print('Connect failed')
self._socket = None
def _close(self):
try:
self._socket.close()
except:
pass
finally:
self._socket = None
def _cleanupTrace(self):
self._setTrace(self._oldSysTrace)
self._setThreadTrace(self._oldSysTrace)
# self._setSignal(self._sig_break, self._sig_def)
self._setSignal(self._sig_int, self._sig_def)
self._disconnect()
def _signalHandler(self, signum, frame):
self._sendDbgMessage(self._messages.SIGNAL + ':' + str(signum))
self._paused = True
def _isConnected(self):
return bool(self._socket)
def _sendDbgMessage(self, message):
""" Also add EOL at the end of message """
self._sendBuffer += message.encode()
self._sendBuffer += b'\n'
if self._isConnected():
try:
sent = self._socket.send(self._sendBuffer)
self._sendBuffer = self._sendBuffer[sent:]
except Exception as ex:
if isinstance(ex, IOError):
errno = ex.errno % 1000
if errno != 35:
self._close()
else:
self._close()
except:
self._close()
def _readDbgMessage(self):
if self._isConnected():
try:
data = self._socket.recv(4096)
if data:
self._recvBuffer += data
else:
self._close()
except Exception as ex:
if isinstance(ex, IOError):
errno = ex.errno % 1000
if errno != 35:
self._close()
else:
self._close()
except:
self._close()
if self._recvBuffer:
idx = self._recvBuffer.find(b'\n')
if idx >= 0:
line = self._recvBuffer[:idx].decode()
self._recvBuffer = self._recvBuffer[idx+1:]
return line
return None
def _processFile(self, fileName):
if self._developerMode:
# tracer in developer mode
return True
if fileName.startswith(self._cwd):
# file is local file
return True
if self._pathFilter and fileName.startswith(self._pathFilter):
# file in filtered folder
return True
if fileName in self._breakpointsConfirmed.keys() and len(self._breakpointsConfirmed[fileName]) > 0:
# file has confirmed breakpoint
return True
if fileName in self._breakpointsWait.keys() and len(self._breakpointsWait[fileName]) > 0:
# file has waiting breakpoint
return True
return False
def _traceFunc(self, frame, event, arg):
""" Do not forget not sending any data without locking (but ENTRY) """
# self._enter_counter = self._enter_counter + 1
# wait until tracing enabled
if not self._startTracing:
if not self._fileWaitingFor:
return None
return self._traceFunc
currentFile = self._canonizeFile(frame.f_code.co_filename)
if not currentFile in self._files:
self._files.add(currentFile)
# self._sendDbgMessage('NEW FILE: %s' % currentFile)
# skip this file tracer.py
if currentFile == self._fileName:
if not self._fileWaitingFor:
return None
return self._traceFunc
ident = self._currentThread().ident
if not (ident in self._threads and self._threads[ident]['file'] == currentFile):
# we are not in the same file as we did on the previous step
if not (self._steppingThread == ident and self._steppingLevel == None):
# we are not in step into mode
# so test if we may skip this file
if not self._processFile(currentFile):
if not self._fileWaitingFor:
return None
return self._traceFunc
# wait until tracing file entered
if self._fileWaitingFor:
if self._fileWaitingFor != currentFile:
return self._traceFunc
# now we are ready to trace
self._fileWaitingFor = None
# autopause
self._sendDbgMessage(self._messages.ENTRY)
self._paused = True
# take an ident
if self._mainThread == None:
self._mainThread = ident
# create current entry
entry = {
'ident': ident,
'frame': frame,
'file': currentFile,
'event': event,
'arg': arg,
'paused': True,
'level': 0,
'exception': None,
'traceback': None
}
if ident in self._threads:
# get previous entry information from dictionary
entry['level'] = self._threads[ident]['level']
entry['exception'] = self._threads[ident]['exception']
entry['traceback'] = self._threads[ident]['traceback']
# save entry to dictionary
self._threads[ident] = entry
# frame level tracking
if event == 'call':
entry['level'] = entry['level'] + 1
if event == 'return':
entry['level'] = entry['level'] - 1
# clear exception info if it is already handled
if event not in ['exception', 'return']:
entry['exception'] = None
entry['traceback'] = None
with self._lockTrace:
# point of tracing
if event == 'call':
# test if that function not in list
code_name = frame.f_code.co_name + ":" + str(frame.f_lineno)
code_lines = self._linesByFile(currentFile)
if code_name not in code_lines:
# collect usable code lines
lines = []
lineno = frame.f_lineno
lines.append(lineno)
tail = frame.f_code.co_lnotab
while len(tail) > 1:
line_incr = tail[1]
tail = tail[2:]
if line_incr:
if isinstance(line_incr, str):
line_incr = ord(line_incr)
if self._co_lnotab_signed:
if line_incr > 127:
line_incr = line_incr - 256
lineno += line_incr
lines.append(lineno)
code_lines[code_name] = sorted(lines)
self._checkFileBreakpoints(currentFile, lines)
# self._sendDbgMessage('NEW FRAME: %s %s %s' % (currentFile, frame.f_code.co_name, repr(lines)))
# examine exception and save it
if event == 'exception':
entry['exception'] = arg[1]
entry['traceback'] = arg[2]
# pause on unhandled exception
if entry['exception'] != None and entry['level'] <= 0:
self._sendDbgMessage(self._messages.EXCEPTION + ' ' + repr(entry['exception']))
self._paused = True
# examine breakpoint
if not self._paused and frame.f_lineno in self._breakpointsConfirmed[currentFile]:
self._sendDbgMessage(self._messages.BREAK)
self._paused = True
# tests runtime commands
cmd = self._readDbgMessage()
while cmd:
if self._commands_regexp.PAUSE.match(cmd):
if not self._paused:
self._sendDbgMessage(self._messages.PAUSED)
self._paused = True
elif self._commands_regexp.INFO.match(cmd):
self._showInfo(ident)
# breakpoints
elif self._commands_regexp.BP_SET.match(cmd):
self._doSetBreakPoint(cmd)
elif self._commands_regexp.BP_RESET.match(cmd):
self._doResetBreakPoint(cmd)
cmd = self._readDbgMessage()
# test stepping
if not self._paused and \
self._steppingThread == ident and \
(self._steppingLevel == None or self._steppingLevel >= entry['level'] and event != 'return'):
self._steppingThread = None
self._steppingLevel = None
self._paused = True
self._sendDbgMessage(self._messages.STEPPED)
# pause loop
while self._paused and self._isConnected():
if cmd:
# continue
if self._commands_regexp.CONTINUE.match(cmd):
self._doContinue()
break # break pause loop
# step
elif self._commands_regexp.STEP.match(cmd) or \
self._commands_regexp.NEXT.match(cmd) or \
self._commands_regexp.RETURN.match(cmd):
self._doStepping(cmd, ident, entry)
break # break pause loop
# show threads
elif self._commands_regexp.THREADS.match(cmd):
self._showThreads(ident)
# change variable
elif self._commands_regexp.AMEND.match(cmd):
self._doAmend(cmd)
# show frames
elif self._commands_regexp.FRAME64.match(cmd):
self._doFrames(cmd, ident, True)
elif self._commands_regexp.FRAME.match(cmd):
self._doFrames(cmd, ident, False)
# display variable
elif self._commands_regexp.DISPLAY64.match(cmd):
self._doDisplay(cmd, ident, True)
elif self._commands_regexp.DISPLAY.match(cmd):
self._doDisplay(cmd, ident, False)
# information (unused)
elif self._commands_regexp.INFO.match(cmd):
self._showInfo(ident)
# breakpoints
elif self._commands_regexp.BP_SET.match(cmd):
self._doSetBreakPoint(cmd)
elif self._commands_regexp.BP_RESET.match(cmd):
self._doResetBreakPoint(cmd)
elif self._commands_regexp.GOTO_TARGETS.match(cmd):
self._doGotoTargets(cmd, ident)
elif self._commands_regexp.GOTO.match(cmd):
self._doGoto(cmd)
elif self._commands_regexp.MODE.match(cmd):
self._doMode(cmd)
# show threads
elif self._commands_regexp.RADIX.match(cmd):
self._setRadix(cmd)
elif self._commands_regexp.PATHFILTER.match(cmd):
self._setFilter(cmd)
elif self._commands_regexp.EXEC.match(cmd):
self._execExpression(cmd, ident)
elif self._commands_regexp.EVAL.match(cmd):
self._evalExpression(cmd, ident)
else:
self._sendDbgMessage('%s: %s' % (self._messages.SYNTAX_ERROR, cmd))
# wait and read command again
self._sleep(0.3)
cmd = self._readDbgMessage()
# ---------------------------------------------
entry['paused'] = False
if entry['level'] <= 0:
# last line in this thread
del self._threads[ident]
if entry['exception'] != None:
# unhandled exception
if ident == self._mainThread:
self._sendDbgMessage(self._messages.EXITED)
raise SystemExit()
return self._traceFunc
def _canonizeFile(self, fileName):
if fileName.startswith('./'):
fileName = self._cwd + '/' + fileName[2:]
elif not fileName.startswith(('/', '<')):
fileName = self._cwd + '/' + fileName
if self._insensitive:
return fileName.lower()
return fileName
def _doGoto(self, cmd):
locals_args = cmd.split()
try:
ident = int(locals_args[1])
nextLine = int(locals_args[2])
currFrame, isPostMortem = self._getFrame(ident, 0)
isPostMortem = isPostMortem
if currFrame != None:
currFrame.f_lineno = nextLine
self._sendDbgMessage('%s %s' % (self._messages.GOTO, 'ok'))
# self._doStepping('s %i' % ident, ident, self._threads[ident])
except Exception as ex:
self._sendDbgMessage('%s %s %s' % (self._messages.GOTO, 'failed', repr(ex)))
def _setRadix(self, cmd):
locals_args = cmd.split()
try:
self._radix = int(locals_args[1])
if self._radix not in (8, 10, 16):
self._radix = 10
self._sendDbgMessage('%s %i' % (self._messages.RADIX, self._radix))
except Exception as ex:
self._sendDbgMessage('%s %s %s' % (self._messages.RADIX, 'failed', repr(ex)))
def _setFilter(self, cmd):
try:
self._pathFilter = cmd[2:].strip()
if self._insensitive:
self._pathFilter = self._pathFilter.lower()
self._sendDbgMessage('%s %s %s' % (self._messages.PATHFILTER, 'set', repr(self._pathFilter)))
except Exception as ex:
self._sendDbgMessage('%s %s %s' % (self._messages.PATHFILTER, 'failed', repr(ex)))
def _execExpression(self, cmd, ident):
try:
expression = cmd[2:].strip()
frame, isPostMortem = self._getFrame(ident, 0)
isPostMortem = isPostMortem
if frame != None:
exec(expression, globals(), frame.f_locals)
except Exception as ex:
self._sendDbgMessage('%s %s %s' % (self._messages.EXECUTE, 'failed', repr(ex)))
def _evalExpression(self, cmd, ident):
try:
expression = cmd[2:].strip()
frame, isPostMortem = self._getFrame(ident, 0)
isPostMortem = isPostMortem
if frame != None:
result = eval(expression, globals(), frame.f_locals)
self._sendDbgMessage(repr(result))
except Exception as ex:
self._sendDbgMessage('%s %s %s' % (self._messages.EXECUTE, 'failed', repr(ex)))
def _doGotoTargets(self, cmd, ident):
locals_args = cmd.split()
try:
frame = self._threads[ident]['frame']
code_file = self._canonizeFile(frame.f_code.co_filename)
gotoFile = locals_args[1]
if self._insensitive:
gotoFile = gotoFile.lower()
code_file = code_file.lower()
if code_file == gotoFile:
gotoLine = int(locals_args[2])
currentLine = frame.f_lineno
for _, code_lines in self._linesByFile(gotoFile).items():
if currentLine in code_lines and gotoLine in code_lines:
self._sendDbgMessage('%s ok' % self._messages.GOTO_TARGETS)
return
except Exception as ex:
self._sendDbgMessage('%s failed %s' % (self._messages.GOTO_TARGETS, repr(ex)))
return
self._sendDbgMessage('%s failed' % self._messages.GOTO_TARGETS)
def _doDisplay(self, cmd, ident, do_encode):
locals_args = cmd.split()
radix = self._radix
if locals_args[0].endswith('h'):
radix = 16
elif locals_args[0].endswith('o'):
radix = 8
if len(locals_args) == 1:
self._display(ident, 0, '.', None, None, radix, do_encode)
elif len(locals_args) == 2:
self._display(int(locals_args[1]), 0, '.', None, None, radix, do_encode)
elif len(locals_args) == 3:
self._display(int(locals_args[1]), int(locals_args[2]), '.', None, None, radix, do_encode)
elif len(locals_args) == 4:
self._display(int(locals_args[1]), int(locals_args[2]), locals_args[3], None, None, radix, do_encode)
elif len(locals_args) == 5:
self._display(int(locals_args[1]), int(locals_args[2]), locals_args[3], int(locals_args[4]), None, radix, do_encode)
elif len(locals_args) == 6:
self._display(int(locals_args[1]), int(locals_args[2]), locals_args[3], int(locals_args[4]), int(locals_args[5]), radix, do_encode)
def _doFrames(self, cmd, ident, do_encode):
locals_args = cmd.split()
if len(locals_args) == 1:
self._showFrames(ident, None, None, do_encode) # all frames in current ident
elif len(locals_args) == 2:
self._showFrames(int(locals_args[1]), None, None, do_encode) # all frames in given ident
elif len(locals_args) == 3:
self._showFrames(int(locals_args[1]), int(locals_args[2]), 1, do_encode) # one given frame in given ident
elif len(locals_args) == 4:
self._showFrames(int(locals_args[1]), int(locals_args[2]), int(locals_args[3]), do_encode) # given amount of frames starting given frame in given ident
def _doAmend(self, cmd):
sep = ' '
cmd, sep, tail = cmd.partition(sep)
aIdent, sep, tail = tail.partition(sep)
aFrame, sep, tail = tail.partition(sep)
aName, sep, aValue = tail.partition(sep)
self._amend(int(aIdent), int(aFrame), aName, aValue)
def _doContinue(self):
if self._paused:
self._sendDbgMessage(self._messages.CONTINUED)
self._paused = False
self._steppingThread = None
self._steppingLevel = None
def _doStepping(self, cmd, ident, entry):
locals_args = cmd.split()
if len(locals_args) == 1:
self._steppingThread = ident
elif len(locals_args) == 2:
self._steppingThread = int(locals_args[1])
self._steppingLevel = None
if self._commands_regexp.NEXT.match(cmd):
self._steppingLevel = entry['level']
elif self._commands_regexp.RETURN.match(cmd):
self._steppingLevel = entry['level'] - 1
self._paused = False
self._sendDbgMessage(self._messages.CONTINUED)
def _doMode(self, cmd):
locals_args = cmd.split()
if len(locals_args) == 1:
self._developerMode = False
elif len(locals_args) == 2:
self._developerMode = str(locals_args[1]).lower() in ['true', '1', 't', 'y', 'yes', 'yeah', 'yup', 'certainly', 'uh-huh']
self._sendDbgMessage(self._messages.DEVELOPER + ' ' + str(self._developerMode))
def _numFrames(self, entry):
numFrames = 0
if entry['traceback'] == None or entry['level'] > 0:
frame = entry['frame']
while frame:
if self._isDebuggerFrame(frame):
frame = None
break
numFrames = numFrames + 1
frame = frame.f_back
else:
trace = entry['traceback']
while trace:
numFrames = numFrames + 1
trace = trace.tb_next
return numFrames
def _showInfo(self, ident):
self._sendDbgMessage(self._messages.INFO)
self._sendDbgMessage('Main: %i' % self._mainThread)
self._sendDbgMessage('Where: %i' % ident)
self._sendDbgMessage('Threads: %i' % len(self._threads))
for threadEntry in self._threads.values():
if threadEntry['exception'] != None and threadEntry['level'] <= 0:
# post-mortem info
self._sendDbgMessage(' thread %i unhandled exception:' % threadEntry['ident'])
else:
# runing info
self._sendDbgMessage(' thread %i frames %i %s:' % (
threadEntry['ident'],
self._numFrames(threadEntry),
'paused' if threadEntry['paused'] else 'running' ))
self._sendDbgMessage(' file: "%s"' % self._canonizeFile(threadEntry['frame'].f_code.co_filename))
self._sendDbgMessage(' line: %i' % threadEntry['frame'].f_lineno)
self._sendDbgMessage(' function: "%s"' % threadEntry['frame'].f_code.co_name)
def _getFrame(self, ident, frameNum):
for entry in self._threads.values():
if entry['ident'] != ident:
continue
if entry['traceback'] == None or entry['level'] > 0:
currentFrame = entry['frame']
currentFrameNum = 0
while frameNum != currentFrameNum and currentFrame:
if self._isDebuggerFrame(currentFrame):
currentFrame = None
break
currentFrameNum = currentFrameNum + 1
currentFrame = currentFrame.f_back
# check if given frame isn't debugger frame
if self._isDebuggerFrame(currentFrame):
currentFrame = None
if currentFrame == None:
self._sendDbgMessage('%s: %s has no frame %s' % (self._messages.SYNTAX_ERROR, ident, frameNum))
return (None, False)
else:
return (currentFrame, False)
break
else:
frames = []
trace = entry['traceback']
while trace:
frames.append(trace.tb_frame)
trace = trace.tb_next
if len(frames) > frameNum:
return (frames[len(frames) - frameNum - 1], True)
else:
self._sendDbgMessage('%s: invalid ident %s' % (self._messages.SYNTAX_ERROR, ident))
return (None, False)
def _eval_variable(self, name, root):
idxStart = name.find('[')
if idxStart < 0:
return eval(name, {}, root)
#get head
if idxStart:
head = name[:idxStart]
head = eval(head, {}, root)
else:
head = root
# get idx name
idxEnd = name.find("]", idxStart)
idx = name[idxStart+1:idxEnd]
# get tail
tail = name[idxEnd+1:]
# find a value by idx
result = None
if type(head) == dict:
# idx is an base64 encoded string or an integer value
try:
# if decoding is success get value by key
idx = self._b64decode(idx, validate=True).decode('utf-8')
for k, v in head.items():
if repr(k) == idx:
result = v
break
except:
# if decoding fails get value by index
try:
idx = int(idx)
for k, v in head.items():
if idx == 0:
result = v
break
idx = idx - 1
except:
pass
elif type(head) == set:
idx = int(idx)
for v in iter(head):
if idx == 0:
result = v
break
idx = idx - 1
else:
result = head[int(idx)]
if tail and result != None:
if tail.startswith('['):
return self._eval_variable(tail, result)
elif tail.startswith('.'):
return self._eval_variable(tail[1:], result.__dict__)
return result
def _amend_impl(self, name, value, frame):
if not any((c in ".[]") for c in name):
self._changeLocalVar(frame, name, value)
else:
if name.endswith(']'):
brkPos = name.rfind('[')
head = self._eval_variable(name[:brkPos], frame.f_locals)
idx = name[brkPos+1:-1]
if type(head) == dict:
try:
idx = self._b64decode(idx).decode('utf-8')
for k, _ in head.items():
if repr(k) == idx:
head[k] = value
break
except:
try:
idx = int(idx)
for k, _ in head.items():
if idx == 0:
head[k] = value
break
idx = idx - 1
except:
pass
else:
head[int(idx)] = value
else:
dotPos = name.rfind('.')
if dotPos > 0:
head = self._eval_variable(name[:dotPos], frame.f_locals)
try:
head.__dict__.update({
name[dotPos+1:]: value,
})
# head.__dict__[name[dotPos+1:]] = value
testValue = head.__dict__[name[dotPos+1:]]
if value != testValue:
self._sendDbgMessage('_amend_impl value != testValue')
except Exception as ex:
self._sendDbgMessage('_amend_impl head.__dict__ exception %s' % str(ex))
else:
self._sendDbgMessage('_amend_impl has no dot')
return
def _amend(self, ident, frameNum, name, value):
frame, isPostMortem = self._getFrame(ident, frameNum)
if isPostMortem:
self._sendDbgMessage('%s failed Cannot amend post-mortem frames' % self._messages.AMEND)
return
if frame != None:
try:
value = eval(value, {}, {})
self._amend_impl(name, value, frame)
result = self._eval_variable(name, frame.f_locals)
resultType = type(result)
if resultType in self._knownValueTypes:
# if we know that is valueType, return it
fn = repr
if resultType == int:
if self._radix == 16:
fn = hex
elif self._radix == 8:
fn = oct
self._sendDbgMessage('%s ok %s = %s' % (self._messages.AMEND, resultType, fn(result)))
return
else:
try:
# in first try to get length of value (test if it is enumerable)
length = len(result)
self._sendDbgMessage('%s ok %s [%s]' % (self._messages.AMEND, resultType, length))
except:
self._sendDbgMessage('%s ok %s' % (self._messages.AMEND, resultType))
return
except Exception as ex:
self._sendDbgMessage('%s failed %s' % (self._messages.AMEND, str(ex)))
return
self._sendDbgMessage('%s failed Invalid frame' % self._messages.AMEND)
def _sendDisplayResult(self, result, do_encode):
if do_encode:
result = self._b64encode(result.encode()).decode()
self._sendDbgMessage('%s %s %s' % (self._messages.DISPLAY64, len(result), result))
else:
self._sendDbgMessage('%s %s' % (self._messages.DISPLAY, result))
def _sendKnownType(self, displayName, valueType, value, radix, do_encode, start):
if valueType == int:
fn = repr
if radix == 16:
fn = hex
elif radix == 8:
fn = oct
self._sendDisplayResult('"%s" %s value: %s' % (displayName, valueType, fn(value)), do_encode)
elif valueType == str:
long_str_flag = ''
if start != None:
start = (start + 1) * self._maxSendStrLen
value = value[start:]
self._sendDisplayResult('"%s" %s length: 1' % (displayName, valueType), do_encode)
if len(value) > self._maxSendStrLen:
value = value[:self._maxSendStrLen]
long_str_flag = '*'
if start != None:
displayName = '[' + str(start) + '-' + str(start + len(value)) + ']'
self._sendDisplayResult('"%s" %s value: %s%s' % (displayName, valueType, repr(value), long_str_flag), do_encode)
else:
self._sendDisplayResult('"%s" %s value: %s' % (displayName, valueType, repr(value)), do_encode)
def _display(self, ident, frameNum, fullName, start, count, radix, do_encode):
frame, isPostMortem = self._getFrame(ident, frameNum)
isPostMortem = isPostMortem
if frame != None:
try:
if fullName.endswith('.'):
displayChildren = True
fullName = fullName[:-1]
displayName = fullName
else:
displayChildren = False
displayName = fullName.rpartition('.')[2]
if fullName:
# we have a name - get its value
value = self._eval_variable(fullName, frame.f_locals)
valueType = type(value)
if valueType in self._knownValueTypes:
# if we know that is valueType, display it
self._sendKnownType(displayName, valueType, value, radix, do_encode, start)
return
else:
try:
# in first try to get length of value (test if it is enumerable)
length = len(value)
# we have a length, so test given start and count
if start != None:
# go through indexed children
if start < length:
if count == None or start + count > length:
count = length - start
self._sendDisplayResult('"%s" %s length: %s' % (displayName, valueType, count), do_encode)
# enumerate through, cutting displayName
displayName = fullName.rpartition('.')[2]
enumerated = enumerate(iter(value))
for x in enumerated:
if start > 0:
# wait a start
start = start - 1
continue
if count > 0:
# until count
idx, subValue = x
if valueType == dict:
idx_s = repr(subValue)
if len(idx_s) > self._maxKeyStrLen:
idx_s = idx_s[:self._maxKeyStrLen-3] + '...'
idx = '=' + self._b64encode(idx_s.encode()).decode()
subValue = value[subValue]
subValueType = type(subValue)
if subValueType in self._knownValueTypes:
# if we know that is valueType, display it
self._sendKnownType(displayName + ('[%s]' % idx), subValueType, subValue, radix, do_encode, None)
else:
try:
length = len(subValue)
self._sendDisplayResult('"%s" %s length: %s' % (displayName + ('[%s]' % idx), subValueType, length), do_encode)
except:
children = dir(subValue)
self._sendDisplayResult('"%s" %s children: %s' % (displayName + ('[%s]' % idx), subValueType, len(children)), do_encode)
count = count - 1
else:
break
# enumerated all
if count:
self._sendDisplayResult('"%s" aborted There are %s elements missed' % (displayName, repr(count)), do_encode)
return
else:
# have no corresponding children
self._sendDisplayResult('"%s" %s length: 0' % (displayName, valueType), do_encode)
return
else:
# no start, just return length of children
self._sendDisplayResult('"%s" %s length: %s' % (displayName, valueType, length), do_encode)
return
except:
children = dir(value)
else:
# localc
valueType = "<type '-locals-'>"
children = frame.f_locals
displayChildren = True
# test if variable has at least children
self._sendDisplayResult('"%s" %s children: %s' % (displayName, valueType, len(children)), do_encode)
if displayChildren:
for childName in children:
self._display(ident, frameNum, (fullName + '.' if fullName else '') + childName, None, None, radix, do_encode)
except Exception as ex:
self._sendDisplayResult('"%s" failed: %s' % (displayName, repr(ex)), do_encode)
def _isDebuggerFrame(self, frame):
return frame and self._canonizeFile(frame.f_code.co_filename) == self._fileName and frame.f_code.co_name == "_runscript"
def _showThreads(self, ident):
self._sendDbgMessage(self._messages.THREADS + (' %i current %i' % (len(self._threads), ident)))
for threadEntry in self._threads.values():
self._sendDbgMessage('thread %i frames %i is %s' % (
threadEntry['ident'],
self._numFrames(threadEntry),
'paused' if threadEntry['paused'] else 'running' ))
def _sendFrame(self, file, line, function, dead_or_alive, do_encode):
message = \
'file: "%s" line: %d function: "%s" %s' % \
( file,
line,
function,
dead_or_alive )
if do_encode:
message = self._b64encode(message.encode()).decode()
self._sendDbgMessage('%s %s %s' % (self._messages.FRAME64, len(message), message))
else:
self._sendDbgMessage('%s %s' % (self._messages.FRAME, message))
def _showFrames(self, ident, frameStart, numFrames, do_encode):
if frameStart == None:
frameStart = 0
frame, isPostMortem = self._getFrame(ident, frameStart)
frameNum = 0
dead_or_alive = 'dead' if isPostMortem else 'alive'
while frame != None and frameNum != numFrames:
if self._isDebuggerFrame(frame):
self._sendFrame('<debugger>', 0, 'none', dead_or_alive, do_encode)
else:
self._sendFrame(self._canonizeFile(frame.f_code.co_filename), frame.f_lineno, frame.f_code.co_name, dead_or_alive, do_encode)
frameNum = frameNum + 1
frame = frame.f_back
def _checkFileBreakpoints(self, bp_file, lines):
""" test all waiting breakpoints for file """
unconfirmed = set()
bp_file = bp_file.lower() if self._insensitive else bp_file
for bp_line in self._breakpointsWait[bp_file]:
if bp_line in lines:
self._confirmBreakpoint(bp_file, bp_line, None)
else:
confirmed = False
# if bp at the non-code line between adjacent real-code lines
if bp_line > lines[0] and bp_line < lines[-1]:
for i in range(len(lines) - 1):
if bp_line < lines[i+1]:
if lines[i+1] - lines[i] < 3:
self._confirmBreakpoint(bp_file, bp_line, lines[i])
confirmed = True
break
if not confirmed:
unconfirmed.add(bp_line)
self._breakpointsWait[bp_file] = unconfirmed
def _linesByFile(self, file):
if self._insensitive and self._lines:
file = file.lower()
for key, value in self._lines.items():
if key.lower() == file:
return value
return self._lines[file]
def _testBreakpoint(self, bp_file, bp_line):
""" test breakpoint """
for funcLines in self._linesByFile(bp_file).values():
if bp_line in funcLines:
return True
return False
def _confirmBreakpoint(self, bp_file, bp_line, bp_line_real):
""" add to confirmed """
if bp_line_real != None:
result = '"%s" %i %i' % (bp_file, bp_line, bp_line_real)
# self._sendDbgMessage(self._messages.BP_CONFIRM + (' "%s" %i %i' % (bp_file, bp_line, bp_line_real)))
self._breakpointsConfirmed[bp_file].add(bp_line_real)
else:
result = '"%s" %i' % (bp_file, bp_line)
# self._sendDbgMessage(self._messages.BP_CONFIRM + (' "%s" %i' % (bp_file, bp_line)))
self._breakpointsConfirmed[bp_file].add(bp_line)
result = self._b64encode(result.encode()).decode()
self._sendDbgMessage('%s %s %s' % (self._messages.BP_CONFIRM64, len(result), result))
def _waitBreakpoint(self, bp_file, bp_line):
""" add for waiting """
result = '"%s" %i' % (bp_file, bp_line)
result = self._b64encode(result.encode()).decode()
self._sendDbgMessage('%s %s %s' % (self._messages.BP_WAIT64, len(result), result))
# self._sendDbgMessage(self._messages.BP_WAIT + (' "%s" %i' % (bp_file, bp_line)))
self._breakpointsWait[bp_file].add(bp_line)
def _doSetBreakPoint(self, cmd):
try:
cmd, bp_file, bp_line = cmd.split()
self._setBp(bp_file, int(bp_line))
except Exception as ex:
self._sendDbgMessage(self._messages.EXCEPTION + ' ' + repr(ex))
def _doResetBreakPoint(self, cmd):
try:
bp_args = cmd.split()
if len(bp_args) == 1:
self._resetBp(None, None)
elif len(bp_args) == 2:
cmd, bp_file = bp_args
self._resetBp(bp_file, None)
else:
cmd, bp_file, bp_line = bp_args
self._resetBp(bp_file, int(bp_line))
except Exception as ex:
self._sendDbgMessage(self._messages.EXCEPTION + ' ' + repr(ex))
def _setBp(self, bp_file, bp_line):
bp_file = bp_file.lower() if self._insensitive else bp_file
if self._testBreakpoint(bp_file, bp_line):
self._confirmBreakpoint(bp_file, bp_line, None)
else:
self._waitBreakpoint(bp_file, bp_line)
def _resetBp(self, bp_file, bp_line):
if bp_file:
bp_file = bp_file.lower() if self._insensitive else bp_file
if bp_line != None:
self._breakpointsWait[bp_file].discard(bp_line)
self._breakpointsConfirmed[bp_file].discard(bp_line)
result = '"%s" %i' % (bp_file, bp_line)
# self._sendDbgMessage(self._messages.BP_RESET + (' "%s" %i' % (bp_file, bp_line)))
else:
del self._breakpointsWait[bp_file]
del self._breakpointsConfirmed[bp_file]
result = '"%s"' % bp_file
# self._sendDbgMessage(self._messages.BP_RESET + (' "%s"' % bp_file))
result = self._b64encode(result.encode()).decode()
self._sendDbgMessage('%s %s %s' % (self._messages.BP_RESET64, len(result), result))
else:
self._breakpointsWait.clear()
self._breakpointsConfirmed.clear()
self._sendDbgMessage(self._messages.BP_RESET)
def _changeLocalVar(self, frame, varName, newValue):
frame.f_locals.update({
varName: newValue,
})
self._changeLocals(self._Obj(frame), self._Int(0))
def _runscript(self, filename):
sys.path.insert(0, '.') # add cwd
self._cwd = os.getcwd()
if self._insensitive:
self._cwd = self._cwd.lower()
self._fileName = self._canonizeFile(__file__)
class _Empty: pass
__spec__ = _Empty()
__spec__.name = self._os_path_splitext(self._os_path_basename(filename))[0]
# === Given from PDB.PY ===
import __main__
builtinsT = __builtins__
__main__.__dict__.clear()
__main__.__dict__.update({'__name__' : '__main__',
'__file__' : filename,
'__builtins__': builtinsT,
'__spec__' : __spec__,
})
self._fileWaitingFor = self._canonizeFile(filename)
globalsT = __main__.__dict__
try:
with open(filename, 'rb') as fp:
statement = "exec(compile(%r, %r, 'exec'))" % (fp.read(), filename)
self._startTracing = True
exec(statement, globalsT, globalsT)
except Exception as ex:
if self._isConnected():
self._sendDbgMessage(self._messages.EXCEPTION + ' ' + repr(ex))
else:
print(repr(ex))
def run(self, filename):
self._setupTrace()
self._runscript(filename)
self._cleanupTrace()
#===================================================================
if __name__ == '__main__':
_usage = """\
usage: tracer.py -p port [-d] [-i] pyfile [arg] ...
Debug the Python program given by pyfile."""
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'hp:di', ['help','port='])
if not args:
print(_usage)
sys.exit(2)
developerMode = False
insensitive = False
for opt, optarg in opts:
if opt in ['-h', '--help']:
print(_usage)
sys.exit()
elif opt in ['-p', '--port']:
SETTINGS.PORT = int(optarg)
elif opt in ['-d']:
developerMode = True
elif opt in ['-i']:
insensitive = True
else:
print('Unknown option %s' % opt)
sys.argv = args
tracer = Tracer(SETTINGS.PORT, developerMode=developerMode, insensitive=insensitive)
tracer.run(args[0])
| 43.582906 | 172 | 0.50704 |
8fd3f12a1e278fef102d76b21fdbeb9534bbbe59 | 5,158 | py | Python | mscreen/autodocktools_prepare_py3k/MolKit/pdb2pqr/pdb2pka/ligandclean/peoe_PDB2PQR.py | e-mayo/mscreen | a50f0b2f7104007c730baa51b4ec65c891008c47 | [
"MIT"
] | 9 | 2021-03-06T04:24:28.000Z | 2022-01-03T09:53:07.000Z | MolKit/pdb2pqr/pdb2pka/ligandclean/peoe_PDB2PQR.py | e-mayo/autodocktools-prepare-py3k | 2dd2316837bcb7c19384294443b2855e5ccd3e01 | [
"BSD-3-Clause"
] | 3 | 2021-03-07T05:37:16.000Z | 2021-09-19T15:06:54.000Z | MolKit/pdb2pqr/pdb2pka/ligandclean/peoe_PDB2PQR.py | e-mayo/autodocktools-prepare-py3k | 2dd2316837bcb7c19384294443b2855e5ccd3e01 | [
"BSD-3-Clause"
] | 4 | 2019-08-28T23:11:39.000Z | 2021-11-27T08:43:36.000Z | # http://users.unimi.it/~ddl/vega/manual/pages/appx_a.htm
# Type a b c d
# =============================================
# H 7.17 6.24 -0.56 12.85
# C3 7.98 9.18 1.88 19.04
# C2 8.79 9.32 1.51 19.62
# C1 10.39 9.45 0.73 20.57
# N3 11.54 10.82 1.36 23.72
# N2 12.87 11.15 0.85 24.87
# N1 15.68 11.70 -0.27 27.11
# O3 14.18 12.92 1.39 28.49
# O2 17.07 13.79 0.47 31.33
# F 14.66 13.85 2.31 30.82
# Cl 11.00 9.69 1.35 22.04
# Br 10.08 8.47 1.16 19.71
# I 9.90 7.96 0.96 18.82
# S3 10.14 9.13 1.38 20.65
ground_states = {\
# Eg Ig
'H' : (0.747, 13.595),
'B' : (0.330, 8.296),
'C' : (1.120, 11.256),
'N' : (1.050, 14.535),
'O' : (1.465, 13.614),
'F' : (3.480, 17.418),
'Si': (1.460, 8.149),
'P' : (0.770, 10.977),
'S' : (2.070, 10.357),
'Cl': (3.690, 12.974),
'Br': (3.550, 11.84),
'I' : (3.210, 10.45)
}
Chargeterms = {
'H' : ( 7.17, 6.24, -0.56, 12.85),
# Carbon
'C.3': ( 7.98, 9.18, 1.88, 19.04),
'C.cat':( 7.98, 9.18, 1.88, 19.04),
'C.2': ( 8.79+0.5, 9.32, 1.51, 19.62),
'C.ar': ( 7.98+0.55, 9.18, 1.88, 19.04),
'C.1': (10.39, 9.45, 0.73, 20.57),
# Nitrogen
'N.3': (11.54+6.0, 10.28, 1.36, 28.00),
'N.4': (11.54+6.0, 10.28, 1.36, 28.00),
'N.ar': (12.87-1.29, 11.15, 0.85, 24.87),
'N.2': (12.87, 11.15, 0.85, 24.87),
'N.pl3':(12.87+0.5, 11.15, 0.85, 24.87),
'N.am': (12.87+3.5, 11.15, 0.85, 24.87),
'N.1': (15.68, 11.70, -0.27, 27.11),
# Oxygen
'O.OH': (14.18+0.8, 12.92, 1.39, 28.49),
'O.3': (14.18-3.1, 12.92, 1.39, 28.49),
'O.2': (14.18, 12.92, 1.39, 28.49),
'O.co2':(15.25, 13.79, 0.47, 31.33),
# Halogenides etc
'F' : (12.36, 13.85, 2.31, 30.82),
'Cl': ( 9.38+1.0, 9.69, 1.35, 22.04),
'Br': (10.08+0.8, 8.47, 1.16, 19.71),
'I' : ( 9.90+1.0, 7.96, 0.96, 18.82),
'S.3': (10.13+0.5, 9.13, 1.38, 20.65),
'S.2': (10.13+0.5, 9.13, 1.38, 20.65),
'S.o2': (10.13+0.5, 9.13, 1.38, 20.65),
'P.3': (10.13+0.5, 9.13, 1.38, 20.65),
}
def PEOE( atoms, damp=0.778, k=1.56):
def calcchi(atom, q):
if abs(q) > 1.1:
if q < 0.0: q = -1.1
else: q = 1.1
if (q == 1.0) and (atom.sybylType == 'H'):
return 20.02
else:
if len(atom.abc) == 4:
a,b,c,d = atom.abc
return a + b*q + c*q*q + d*q*q*q
else:
a,b,c = atom.abc
return a + b*q + c*q*q
abs_qges = 0.0
counter = 0
for a in atoms.atoms:
if a.sybylType not in Chargeterms:
raise KeyError('PEOE Error: Atomtype <%s> not known, treating atom %s as dummy' % (a.sybylType, a.name))
if a.sybylType == 'O.3':
a.chi = Chargeterms['O.OH'][0]
a.abc = Chargeterms['O.OH']
else:
a.chi = Chargeterms[a.sybylType][0]
a.abc = Chargeterms[a.sybylType]
if a.charge != 0.0:
a.formal_charge = a.charge*(1/k)
abs_qges = abs_qges+abs(a.charge)
counter = counter+1
else:
a.formal_charge = 0.0
a.charge = 0.0
a.dq = 0.0
if abs_qges != 0.0:
cycles = 7
for b in range(1,cycles):
for i in atoms.atoms: # lAtoms
i.chi = calcchi(i, i.charge)
i.dq = 0.0
for j in i.intrabonds: ### lBondedAtoms
for yyy in atoms.atoms:
if yyy.name == j:
dchi = (calcchi(yyy, yyy.charge) - i.chi)
if dchi > 0.0: i.dq += (dchi / calcchi(i, +1) * (damp**b))
else: i.dq += (dchi / calcchi(yyy, +1) * (damp**b))
for i in atoms.atoms: # lAtoms
i.charge += i.dq+(0.166666667*i.formal_charge)
for i in atoms.atoms: # lAtoms
i.charge = i.charge * k
i.charge = i.charge
del i.dq
del i.abc
return atoms
else:
#
for a in range(1,7):
for i in atoms.atoms: # lAtoms
i.chi = calcchi(i, i.charge)
i.dq = 0.0
for j in i.intrabonds: ### lBondedAtoms
for xxx in atoms.atoms:
if xxx.name == j:
dchi = (calcchi(xxx, xxx.charge) - i.chi)
if dchi > 0.0: i.dq += (dchi / calcchi(i, +1) * (damp**a))
else: i.dq += (dchi / calcchi(xxx, +1) * (damp**a))
for i in atoms.atoms: # lAtoms
i.charge += i.dq
for i in atoms.atoms: #lAtoms
i.charge = i.charge * k
i.charge = i.charge
del i.dq
del i.abc
return atoms
| 35.819444 | 116 | 0.403451 |
70ecf9b6589782d26a80b22bec8c300bb1b9de9e | 25,331 | py | Python | redis/google/cloud/redis_v1beta1/gapic/cloud_redis_client.py | jo2y/google-cloud-python | 1b76727be16bc4335276f793340bb72d32be7166 | [
"Apache-2.0"
] | null | null | null | redis/google/cloud/redis_v1beta1/gapic/cloud_redis_client.py | jo2y/google-cloud-python | 1b76727be16bc4335276f793340bb72d32be7166 | [
"Apache-2.0"
] | null | null | null | redis/google/cloud/redis_v1beta1/gapic/cloud_redis_client.py | jo2y/google-cloud-python | 1b76727be16bc4335276f793340bb72d32be7166 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.redis.v1beta1 CloudRedis API."""
import functools
import pkg_resources
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
from google.cloud.redis_v1beta1.gapic import cloud_redis_client_config
from google.cloud.redis_v1beta1.gapic import enums
from google.cloud.redis_v1beta1.proto import cloud_redis_pb2
from google.cloud.redis_v1beta1.proto import cloud_redis_pb2_grpc
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-redis', ).version
class CloudRedisClient(object):
"""
Configures and manages Cloud Memorystore for Redis instances
Google Cloud Memorystore for Redis v1beta1
The ``redis.googleapis.com`` service implements the Google Cloud Memorystore
for Redis API and defines the following resource model for managing Redis
instances:
* The service works with a collection of cloud projects, named: ``/projects/*``
* Each project has a collection of available locations, named: ``/locations/*``
* Each location has a collection of Redis instances, named: ``/instances/*``
* As such, Redis instances are resources of the form:
``/projects/{project_id}/locations/{location_id}/instances/{instance_id}``
Note that location_id must be refering to a GCP ``region``; for example:
* ``projects/redpepper-1290/locations/us-central1/instances/my-redis``
"""
SERVICE_ADDRESS = 'redis.googleapis.com:443'
"""The default address of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
# The name of the interface for this client. This is the key used to find
# method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.cloud.redis.v1beta1.CloudRedis'
@classmethod
def location_path(cls, project, location):
"""Return a fully-qualified location string."""
return google.api_core.path_template.expand(
'projects/{project}/locations/{location}',
project=project,
location=location,
)
@classmethod
def instance_path(cls, project, location, instance):
"""Return a fully-qualified instance string."""
return google.api_core.path_template.expand(
'projects/{project}/locations/{location}/instances/{instance}',
project=project,
location=location,
instance=instance,
)
def __init__(self,
channel=None,
credentials=None,
client_config=cloud_redis_client_config.config,
client_info=None):
"""Constructor.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_config (dict): A dictionary of call options for each
method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments to {} are mutually '
'exclusive.'.format(self.__class__.__name__), )
# Create the channel.
if channel is None:
channel = google.api_core.grpc_helpers.create_channel(
self.SERVICE_ADDRESS,
credentials=credentials,
scopes=self._DEFAULT_SCOPES,
)
# Create the gRPC stubs.
self.cloud_redis_stub = (cloud_redis_pb2_grpc.CloudRedisStub(channel))
# Operations client for methods that return long-running operations
# futures.
self.operations_client = (
google.api_core.operations_v1.OperationsClient(channel))
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Write the "inner API call" methods to the class.
# These are wrapped versions of the gRPC stub methods, with retry and
# timeout configuration applied, called by the public methods on
# this class.
self._list_instances = google.api_core.gapic_v1.method.wrap_method(
self.cloud_redis_stub.ListInstances,
default_retry=method_configs['ListInstances'].retry,
default_timeout=method_configs['ListInstances'].timeout,
client_info=client_info,
)
self._get_instance = google.api_core.gapic_v1.method.wrap_method(
self.cloud_redis_stub.GetInstance,
default_retry=method_configs['GetInstance'].retry,
default_timeout=method_configs['GetInstance'].timeout,
client_info=client_info,
)
self._create_instance = google.api_core.gapic_v1.method.wrap_method(
self.cloud_redis_stub.CreateInstance,
default_retry=method_configs['CreateInstance'].retry,
default_timeout=method_configs['CreateInstance'].timeout,
client_info=client_info,
)
self._update_instance = google.api_core.gapic_v1.method.wrap_method(
self.cloud_redis_stub.UpdateInstance,
default_retry=method_configs['UpdateInstance'].retry,
default_timeout=method_configs['UpdateInstance'].timeout,
client_info=client_info,
)
self._delete_instance = google.api_core.gapic_v1.method.wrap_method(
self.cloud_redis_stub.DeleteInstance,
default_retry=method_configs['DeleteInstance'].retry,
default_timeout=method_configs['DeleteInstance'].timeout,
client_info=client_info,
)
# Service calls
def list_instances(self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists all Redis instances owned by a project in either the specified
location (region) or all locations.
The location should have the following format:
* ``projects/{project_id}/locations/{location_id}``
If ``location_id`` is specified as ``-`` (wildcard), then all regions
available to the project are queried, and the results are aggregated.
Example:
>>> from google.cloud import redis_v1beta1
>>>
>>> client = redis_v1beta1.CloudRedisClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>>
>>> # Iterate over all results
>>> for element in client.list_instances(parent):
... # process element
... pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in client.list_instances(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The resource name of the instance location using the form:
::
`projects/{project_id}/locations/{location_id}`
where ``location_id`` refers to a GCP region
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.redis_v1beta1.types.Instance` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = cloud_redis_pb2.ListInstancesRequest(
parent=parent,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._list_instances,
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='instances',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def get_instance(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets the details of a specific Redis instance.
Example:
>>> from google.cloud import redis_v1beta1
>>>
>>> client = redis_v1beta1.CloudRedisClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')
>>>
>>> response = client.get_instance(name)
Args:
name (str): Required. Redis instance resource name using the form:
::
`projects/{project_id}/locations/{location_id}/instances/{instance_id}`
where ``location_id`` refers to a GCP region
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1beta1.types.Instance` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = cloud_redis_pb2.GetInstanceRequest(name=name, )
return self._get_instance(
request, retry=retry, timeout=timeout, metadata=metadata)
def create_instance(self,
parent,
instance_id,
instance,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates a Redis instance based on the specified tier and memory size.
By default, the instance is peered to the project's
`default network <https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`_.
The creation is executed asynchronously and callers may check the returned
operation to track its progress. Once the operation is completed the Redis
instance will be fully functional. Completed longrunning.Operation will
contain the new instance object in the response field.
The returned operation is automatically deleted after a few hours, so there
is no need to call DeleteOperation.
Example:
>>> from google.cloud import redis_v1beta1
>>> from google.cloud.redis_v1beta1 import enums
>>>
>>> client = redis_v1beta1.CloudRedisClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>> instance_id = 'test_instance'
>>> tier = enums.Instance.Tier.BASIC
>>> memory_size_gb = 1
>>> instance = {'tier': tier, 'memory_size_gb': memory_size_gb}
>>>
>>> response = client.create_instance(parent, instance_id, instance)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The resource name of the instance location using the form:
::
`projects/{project_id}/locations/{location_id}`
where ``location_id`` refers to a GCP region
instance_id (str): Required. The logical name of the Redis instance in the customer project
with the following restrictions:
* Must contain only lowercase letters, numbers, and hyphens.
* Must start with a letter.
* Must be between 1-40 characters.
* Must end with a number or a letter.
* Must be unique within the customer project / location
instance (Union[dict, ~google.cloud.redis_v1beta1.types.Instance]): Required. A Redis [Instance] resource
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.redis_v1beta1.types.Instance`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = cloud_redis_pb2.CreateInstanceRequest(
parent=parent,
instance_id=instance_id,
instance=instance,
)
operation = self._create_instance(
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.operations_client,
cloud_redis_pb2.Instance,
metadata_type=any_pb2.Any,
)
def update_instance(self,
update_mask,
instance,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates the metadata and configuration of a specific Redis instance.
Completed longrunning.Operation will contain the new instance object
in the response field. The returned operation is automatically deleted
after a few hours, so there is no need to call DeleteOperation.
Example:
>>> from google.cloud import redis_v1beta1
>>>
>>> client = redis_v1beta1.CloudRedisClient()
>>>
>>> paths_element = 'display_name'
>>> paths_element_2 = 'memory_size_gb'
>>> paths = [paths_element, paths_element_2]
>>> update_mask = {'paths': paths}
>>> display_name = 'UpdatedDisplayName'
>>> memory_size_gb = 4
>>> instance = {'display_name': display_name, 'memory_size_gb': memory_size_gb}
>>>
>>> response = client.update_instance(update_mask, instance)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
update_mask (Union[dict, ~google.cloud.redis_v1beta1.types.FieldMask]): Required. Mask of fields to update. At least one path must be supplied in
this field. The elements of the repeated paths field may only include these
fields from ``Instance``:
* ``display_name``
* ``labels``
* ``memory_size_gb``
* ``redis_config``
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.redis_v1beta1.types.FieldMask`
instance (Union[dict, ~google.cloud.redis_v1beta1.types.Instance]): Required. Update description.
Only fields specified in update_mask are updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.redis_v1beta1.types.Instance`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = cloud_redis_pb2.UpdateInstanceRequest(
update_mask=update_mask,
instance=instance,
)
operation = self._update_instance(
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.operations_client,
cloud_redis_pb2.Instance,
metadata_type=any_pb2.Any,
)
def delete_instance(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes a specific Redis instance. Instance stops serving and data is
deleted.
Example:
>>> from google.cloud import redis_v1beta1
>>>
>>> client = redis_v1beta1.CloudRedisClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')
>>>
>>> response = client.delete_instance(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. Redis instance resource name using the form:
::
`projects/{project_id}/locations/{location_id}/instances/{instance_id}`
where ``location_id`` refers to a GCP region
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = cloud_redis_pb2.DeleteInstanceRequest(name=name, )
operation = self._delete_instance(
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.operations_client,
empty_pb2.Empty,
metadata_type=any_pb2.Any,
)
| 44.440351 | 157 | 0.608464 |
618050544cfb6f83887e7e97bdf5839d9eaba27b | 3,847 | py | Python | zat/json_log_to_dataframe.py | SuperCowPowers/zat | 7f0de8bb052e8c84ab9bd00f195514d957eac9ec | [
"Apache-2.0"
] | 146 | 2019-11-07T20:57:27.000Z | 2022-03-24T00:03:55.000Z | zat/json_log_to_dataframe.py | Kitware/BroUtils | fed88c4310cf70c8b01c9a7eb0918b8c4d117e77 | [
"Apache-2.0"
] | 43 | 2017-09-18T06:22:35.000Z | 2018-09-05T19:59:43.000Z | zat/json_log_to_dataframe.py | Kitware/BroUtils | fed88c4310cf70c8b01c9a7eb0918b8c4d117e77 | [
"Apache-2.0"
] | 41 | 2017-09-11T09:59:47.000Z | 2018-09-01T18:36:47.000Z | """JSONLogToDataFrame: Converts a Zeek JSON log to a Pandas DataFrame"""
import os
# Third Party
import pandas as pd
# Local Imports
class JSONLogToDataFrame(object):
"""JSONLogToDataFrame: Converts a Zeek JSON log to a Pandas DataFrame
Notes:
Unlike the regular Zeek logs, when you dump the data to JSON you lose
all the type information. This means we have to guess/infer a lot
of the types, we HIGHLY recommend that you use the standard Zeek output
log format as it will result in both faster and better dataframes.
Todo:
1. Have a more formal column mapping
2. Convert Categorial columns
"""
def __init__(self):
"""Initialize the JSONLogToDataFrame class"""
# Type conversion Map: This is simple for now but can/should be improved
self.type_map = {}
def create_dataframe(self, log_filename, ts_index=True, aggressive_category=True, maxrows=None):
""" Create a Pandas dataframe from a Zeek JSON log file
Args:
log_filename (string): The full path to the Zeek log
ts_index (bool): Set the index to the 'ts' field (default = True)
aggressive_category (bool): convert unknown columns to category (default = True)
maxrows: Read in a subset of rows for testing/inspecting (default = None)
"""
# Sanity check the filename
if not os.path.isfile(log_filename):
print(f'Could not find file: {log_filename}')
return pd.DataFrame()
# Read in the JSON file as a dataframe
_df = pd.read_json(log_filename, nrows=maxrows, lines=True)
# If we have a ts field convert it to datetime (and optionally set as index)
if 'ts' in _df.columns:
_df['ts'] = pd.to_datetime(_df['ts'], unit='s')
# Set the index
if ts_index:
_df.set_index('ts', inplace=True)
# Okay our dataframe should be ready to go
return _df
# Simple test of the functionality
def test():
"""Test for JSONLogToDataFrame Class"""
import os
pd.set_option('display.width', 1000)
from zat.utils import file_utils
# Grab a test file
data_path = file_utils.relative_dir(__file__, '../data/json')
log_path = os.path.join(data_path, 'conn.log')
# Convert it to a Pandas DataFrame
log_to_df = JSONLogToDataFrame()
my_df = log_to_df.create_dataframe(log_path)
# Print out the head
print(my_df.head())
# Print out the datatypes
print(my_df.dtypes)
# Test a bunch
tests = ['capture_loss.log', 'dhcp.log', 'http.log', 'ntp.log', 'smb_mapping.log', 'weird.log',
'conn.log', 'dns.log', 'kerberos.log', 'packet_filter.log', 'ssl.log', 'x509.log',
'dce_rpc.log', 'files.log', 'loaded_scripts.log', 'smb_files.log', 'stats.log']
for log_path in [os.path.join(data_path, log) for log in tests]:
print('Testing: {:s}...'.format(log_path))
my_df = log_to_df.create_dataframe(log_path)
print(my_df.head())
print(my_df.dtypes)
# Test out maxrows arg
conn_path = os.path.join(data_path, 'conn.log')
my_df = log_to_df.create_dataframe(conn_path, maxrows=3)
print(my_df.head())
# Test an empty log (a log with header/close but no data rows)
log_path = os.path.join(data_path, 'http_empty.log')
my_df = log_to_df.create_dataframe(log_path)
# Print out the head
print(my_df.head())
# Print out the datatypes
print(my_df.dtypes)
print('JSONLogToDataFrame Test successful!')
if __name__ == '__main__':
# Run the test for easy testing/debugging
# Setup Pandas output options
pd.options.display.max_colwidth = 20
pd.options.display.max_columns = 10
test()
| 34.348214 | 100 | 0.642839 |
c66439f72b275b5d7dda40bc5ea351b65b78d99c | 304 | py | Python | Life2Coding/01-hello-world/main.py | dloperab/Python-GUI | ab71f14c7fbb011af0735f48f5196146de11ea23 | [
"MIT"
] | 2 | 2019-03-17T16:08:09.000Z | 2019-03-29T06:37:16.000Z | Life2Coding/01-hello-world/main.py | dloperab/OpenCV-GUI | ab71f14c7fbb011af0735f48f5196146de11ea23 | [
"MIT"
] | null | null | null | Life2Coding/01-hello-world/main.py | dloperab/OpenCV-GUI | ab71f14c7fbb011af0735f48f5196146de11ea23 | [
"MIT"
] | 2 | 2020-03-14T04:32:19.000Z | 2021-03-09T17:21:33.000Z | import sys
from PyQt5.QtWidgets import QDialog, QApplication
from PyQt5.uic import loadUi
class MainForm(QDialog):
def __init__(self):
super(MainForm, self).__init__()
loadUi('main-form.ui', self)
app = QApplication(sys.argv)
window = MainForm()
window.show()
sys.exit(app.exec_()) | 21.714286 | 49 | 0.713816 |
cb67fa550ae2060c163ba777faa26bc67fac9a81 | 1,306 | py | Python | allauth/socialaccount/providers/soundcloud/tests.py | swapnilt/django-mongoengine-allauth | c35127a2b140fad7a06989edb6ad9defffe9e657 | [
"MIT"
] | null | null | null | allauth/socialaccount/providers/soundcloud/tests.py | swapnilt/django-mongoengine-allauth | c35127a2b140fad7a06989edb6ad9defffe9e657 | [
"MIT"
] | null | null | null | allauth/socialaccount/providers/soundcloud/tests.py | swapnilt/django-mongoengine-allauth | c35127a2b140fad7a06989edb6ad9defffe9e657 | [
"MIT"
] | 1 | 2019-06-10T16:02:31.000Z | 2019-06-10T16:02:31.000Z | from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import SoundCloudProvider
class SoundCloudTests(OAuth2TestsMixin, TestCase):
provider_id = SoundCloudProvider.id
def get_mocked_response(self):
return MockedResponse(200, """
{
"website": null,
"myspace_name": null,
"public_favorites_count": 0,
"followings_count": 1,
"full_name": "",
"id": 22341947,
"city": null,
"track_count": 0,
"playlist_count": 0,
"discogs_name": null,
"private_tracks_count": 0,
"followers_count": 0,
"online": true,
"username": "user187631676",
"description": null,
"kind": "user",
"website_title": null,
"primary_email_confirmed": false,
"permalink_url": "http://soundcloud.com/user187631676",
"private_playlists_count": 0,
"permalink": "user187631676",
"country": null,
"uri": "https://api.soundcloud.com/users/22341947",
"avatar_url": "https://a1.sndcdn.com/images/default_avatar_large.png?4b4189b",
"plan": "Free"
}""")
| 34.368421 | 90 | 0.562787 |
cd6f71f77fc0c1c0f4bafc682681f31f0218cfeb | 1,893 | py | Python | Loan/code.py | deepikakeswani/ga-learner-dsmp-repo | e06132631df81c22384bc4f74187fdc566baa4c8 | [
"MIT"
] | null | null | null | Loan/code.py | deepikakeswani/ga-learner-dsmp-repo | e06132631df81c22384bc4f74187fdc566baa4c8 | [
"MIT"
] | null | null | null | Loan/code.py | deepikakeswani/ga-learner-dsmp-repo | e06132631df81c22384bc4f74187fdc566baa4c8 | [
"MIT"
] | null | null | null | # --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank=pd.read_csv(path)
categorical_var=bank.select_dtypes(include='object')
print(categorical_var)
numerical_var=bank.select_dtypes(include='number')
# code ends here
# --------------
# code starts here
banks = bank.drop('Loan_ID',axis=1)
#print(banks.isnull().sum())
bank_mode=banks.mode()
print(type(bank_mode))
print(bank_mode)
for column in banks.columns:
print("bank_mode",bank_mode[column][0])
banks[column].fillna(bank_mode[column][0],inplace=True)
print("Gender Value",banks['Gender'].value_counts(dropna=False))
print("Total number of nan",banks.isna().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount=banks.pivot_table(index=['Gender','Married','Self_Employed'],values=['LoanAmount'],aggfunc='mean')
print(avg_loan_amount)
# code ends here
# --------------
# code starts here
print(banks.columns)
loan_approved_se = banks[(banks['Self_Employed'] == 'Yes') & (banks['Loan_Status'] == 'Y')].shape[0]
print(loan_approved_se)
loan_approved_nse = banks[(banks['Self_Employed'] == 'No') & (banks['Loan_Status'] == 'Y')].shape[0]
print(loan_approved_nse)
Loan_Status = 614
percentage_se = loan_approved_se/Loan_Status * 100
print(percentage_se)
percentage_nse = loan_approved_nse/Loan_Status * 100
print(percentage_nse)
# code ends here
# --------------
# code starts here
print(bank.columns)
loan_term= banks['Loan_Amount_Term'].apply(lambda x: x/12)
big_loan_term = loan_term[loan_term >= 25].count()
print(big_loan_term)
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
print(loan_groupby)
loan_groupby = loan_groupby['ApplicantIncome','Credit_History']
print(loan_groupby.head(5))
mean_values = loan_groupby.mean()
print(mean_values)
# code ends here
| 22.011628 | 114 | 0.713154 |
6611f2623dea4fe3358c4854f01af5dd766d12cb | 40,241 | py | Python | omxdsm/tests/test_xdsm_viewer.py | naylor-b/OpenMDAO-XDSM | 1497d0e9b1b4d7e62278f42ff036cf772b604039 | [
"Apache-2.0"
] | null | null | null | omxdsm/tests/test_xdsm_viewer.py | naylor-b/OpenMDAO-XDSM | 1497d0e9b1b4d7e62278f42ff036cf772b604039 | [
"Apache-2.0"
] | null | null | null | omxdsm/tests/test_xdsm_viewer.py | naylor-b/OpenMDAO-XDSM | 1497d0e9b1b4d7e62278f42ff036cf772b604039 | [
"Apache-2.0"
] | null | null | null | import os
import unittest
import numpy as np
from numpy.distutils.exec_command import find_executable
import openmdao.api as om
from omxdsm import write_xdsm, write_html
from openmdao.test_suite.components.sellar import SellarNoDerivatives, SellarDis1, SellarDis2
from openmdao.test_suite.components.sellar_feature import SellarMDA
from openmdao.test_suite.scripts.circuit import Circuit
from openmdao.utils.assert_utils import assert_warning
from openmdao.utils.shell_proc import check_call
from openmdao.utils.testing_utils import use_tempdirs
try:
from pyxdsm.XDSM import XDSM
except ImportError:
XDSM = None
# Set DEBUG to True if you want to view the generated HTML and PDF output files.
DEBUG = False
# Suppress pyXDSM console output. Not suppressed in debug mode.
QUIET = not DEBUG
# If not in debug mode, tests will generate only the TeX files and not the PDFs, except for the
# PDF creation test, which is independent of this setting.
PYXDSM_OUT = 'pdf' if DEBUG else 'tex'
# Show in browser
SHOW = False
if DEBUG:
use_tempdirs = lambda cls: cls
@unittest.skipUnless(XDSM, "The pyXDSM package is required.")
@use_tempdirs
class TestPyXDSMViewer(unittest.TestCase):
def test_pyxdsm_output_sides(self):
"""Makes XDSM for the Sellar problem"""
prob = om.Problem()
prob.model = model = SellarNoDerivatives()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.final_setup()
# Write output (outputs on the left)
filename = 'xdsm_outputs_on_the_left'
write_xdsm(prob, filename=filename, out_format=PYXDSM_OUT, show_browser=SHOW,
quiet=QUIET, output_side='left')
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, PYXDSM_OUT])))
filename = 'xdsm_outputs_on_the_right'
# Write output (all outputs on the right)
write_xdsm(prob, filename=filename, out_format=PYXDSM_OUT, show_browser=SHOW,
quiet=QUIET, output_side='right')
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, PYXDSM_OUT])))
filename = 'xdsm_outputs_side_mixed'
# Write output (outputs mixed)
write_xdsm(prob, filename=filename, out_format=PYXDSM_OUT, show_browser=SHOW,
quiet=QUIET, output_side={'optimization': 'left', 'default': 'right'})
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, PYXDSM_OUT])))
def test_pyxdsm_case_reading(self):
"""
Writes a recorder file, and the XDSM writer makes the diagram based on the SQL file
and not the Problem instance.
"""
import openmdao.api as om
filename = 'xdsm_from_sql'
case_recording_filename = filename + '.sql'
prob = om.Problem()
prob.model = model = SellarNoDerivatives()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
recorder = om.SqliteRecorder(case_recording_filename)
prob.driver.add_recorder(recorder)
prob.setup()
prob.final_setup()
# Write output
write_xdsm(case_recording_filename, filename=filename, out_format='tex',
show_browser=False, quiet=QUIET)
# Check if file was created
self.assertTrue(os.path.isfile(case_recording_filename))
self.assertTrue(os.path.isfile('.'.join([filename, 'tex'])))
# Check that there are no errors when running from the command line with a recording.
check_call('openmdao xdsm --no_browser %s' % case_recording_filename)
def test_pyxdsm_sellar_no_recurse(self):
"""Makes XDSM for the Sellar problem, with no recursion."""
filename = 'xdsm1'
prob = om.Problem()
prob.model = model = SellarNoDerivatives()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.final_setup()
# Write output
write_xdsm(prob, filename=filename, out_format=PYXDSM_OUT, show_browser=SHOW,
recurse=False, quiet=QUIET)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, PYXDSM_OUT])))
def test_pyxdsm_pdf(self):
"""
Makes an XDSM of the Sphere test case. It also adds a design variable, constraint and
objective.
"""
class Rosenbrock(om.ExplicitComponent):
def __init__(self, problem):
super(Rosenbrock, self).__init__()
self.problem = problem
self.counter = 0
def setup(self):
self.add_input('x', np.array([1.5, 1.5]))
self.add_output('f', 0.0)
self.declare_partials('f', 'x', method='fd', form='central', step=1e-4)
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
x = inputs['x']
outputs['f'] = sum(x**2)
x0 = np.array([1.2, 1.5])
filename = 'xdsm2'
prob = om.Problem()
indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(problem=prob), promotes=['*'])
indeps.add_output('x', list(x0))
prob.model.add_subsystem('sphere', Rosenbrock(problem=prob), promotes=['*'])
prob.model.add_subsystem('con', om.ExecComp('c=sum(x)', x=np.ones(2)), promotes=['*'])
prob.driver = om.ScipyOptimizeDriver()
prob.model.add_design_var('x')
prob.model.add_objective('f')
prob.model.add_constraint('c', lower=1.0)
prob.setup()
prob.final_setup()
# requesting 'pdf', but if 'pdflatex' is not found we will only get 'tex'
pdflatex = find_executable('pdflatex')
# Write output
write_xdsm(prob, filename=filename, out_format='pdf', show_browser=SHOW, quiet=QUIET)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, 'tex'])))
# Check if PDF was created (only if pdflatex is installed)
self.assertTrue(not pdflatex or os.path.isfile('.'.join([filename, 'pdf'])))
def test_pyxdsm_identical_relative_names(self):
class TimeComp(om.ExplicitComponent):
def setup(self):
self.add_input('t_initial', val=0.)
self.add_input('t_duration', val=1.)
self.add_output('time', shape=(2,))
def compute(self, inputs, outputs):
t_initial = inputs['t_initial']
t_duration = inputs['t_duration']
outputs['time'][0] = t_initial
outputs['time'][1] = t_initial + t_duration
class Phase(om.Group):
def setup(self):
super(Phase, self).setup()
indep = om.IndepVarComp()
for var in ['t_initial', 't_duration']:
indep.add_output(var, val=1.0)
self.add_subsystem('time_extents', indep, promotes_outputs=['*'])
time_comp = TimeComp()
self.add_subsystem('time', time_comp)
self.connect('t_initial', 'time.t_initial')
self.connect('t_duration', 'time.t_duration')
self.set_order(['time_extents', 'time'])
p = om.Problem()
p.driver = om.ScipyOptimizeDriver()
orbit_phase = Phase()
p.model.add_subsystem('orbit_phase', orbit_phase)
systems_phase = Phase()
p.model.add_subsystem('systems_phase', systems_phase)
systems_phase = Phase()
p.model.add_subsystem('extra_phase', systems_phase)
p.model.add_design_var('orbit_phase.t_initial')
p.model.add_design_var('orbit_phase.t_duration')
p.model.add_objective('systems_phase.time.time')
p.setup()
p.run_model()
# Test non unique local names
filename = 'pyxdsm_identical_rel_names'
write_xdsm(p, filename, out_format=PYXDSM_OUT, quiet=QUIET, show_browser=SHOW)
self.assertTrue(os.path.isfile('.'.join([filename, PYXDSM_OUT])))
# Check formatting
# Max character box formatting
filename = 'pyxdsm_cut_char'
write_xdsm(p, filename, out_format=PYXDSM_OUT, quiet=QUIET, show_browser=SHOW,
box_stacking='cut_chars', box_width=15)
self.assertTrue(os.path.isfile('.'.join([filename, PYXDSM_OUT])))
# Cut characters box formatting
filename = 'pyxdsm_max_chars'
write_xdsm(p, filename, out_format=PYXDSM_OUT, quiet=True, show_browser=SHOW,
box_stacking='max_chars', box_width=15)
self.assertTrue(os.path.isfile('.'.join([filename, PYXDSM_OUT])))
def test_model_path_and_recursion(self):
import openmdao.api as om
p = om.Problem()
model = p.model
group = model.add_subsystem('G1', om.Group(), promotes=['*'])
group2 = model.add_subsystem('G2', om.Group())
group.add_subsystem('ground', om.IndepVarComp('V', 0., units='V'))
group.add_subsystem('source', om.IndepVarComp('I', 0.1, units='A'))
group2.add_subsystem('source2', om.IndepVarComp('I', 0.1, units='A'))
group.add_subsystem('circuit', Circuit())
group.connect('source.I', 'circuit.I_in')
group.connect('ground.V', 'circuit.Vg')
model.add_design_var('ground.V')
model.add_design_var('source.I')
model.add_objective('circuit.D1.I')
p.setup()
# set some initial guesses
p['circuit.n1.V'] = 10.
p['circuit.n2.V'] = 1.
p.run_model()
# No model path, no recursion
write_xdsm(p, 'xdsm_circuit', out_format=PYXDSM_OUT, quiet=QUIET, show_browser=SHOW,
recurse=False)
self.assertTrue(os.path.isfile('.'.join(['xdsm_circuit', PYXDSM_OUT])))
# Model path given + recursion
write_xdsm(p, 'xdsm_circuit2', out_format=PYXDSM_OUT, quiet=QUIET, show_browser=SHOW,
recurse=True, model_path='G2', include_external_outputs=False)
self.assertTrue(os.path.isfile('.'.join(['xdsm_circuit2', PYXDSM_OUT])))
# Model path given + no recursion
write_xdsm(p, 'xdsm_circuit3', out_format=PYXDSM_OUT, quiet=QUIET, show_browser=SHOW,
recurse=False, model_path='G1')
self.assertTrue(os.path.isfile('.'.join(['xdsm_circuit3', PYXDSM_OUT])))
# Invalid model path, should raise error
with self.assertRaises(ValueError):
write_xdsm(p, 'xdsm_circuit4', out_format='tex', quiet=QUIET, show_browser=SHOW,
recurse=False, model_path='G3')
def test_pyxdsm_solver(self):
import openmdao.api as om
out_format = PYXDSM_OUT
prob = om.Problem()
prob.model = model = SellarNoDerivatives()
model.nonlinear_solver = om.NonlinearBlockGS()
prob.driver = om.ScipyOptimizeDriver()
prob.model.add_objective('obj')
prob.setup()
prob.run_model()
filename = 'pyxdsm_solver'
# Write output
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, include_solver=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
filename = 'pyxdsm_solver2'
# Write output
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, include_solver=True, recurse=False)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_pyxdsm_mda(self):
filename = 'pyxdsm_mda'
out_format = PYXDSM_OUT
prob = om.Problem(model=SellarMDA())
prob.setup()
prob.final_setup()
# Write output
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, include_solver=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_pyxdsm_mdf(self):
filename = 'pyxdsm_mdf'
out_format = PYXDSM_OUT
prob = om.Problem(model=SellarMDA())
model = prob.model
prob.driver = om.ScipyOptimizeDriver()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.final_setup()
# Write output
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, include_solver=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_parallel(self):
import openmdao.api as om
class SellarMDA(om.Group):
"""
Group containing the Sellar MDA.
"""
def setup(self):
indeps = self.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('x', 1.0)
indeps.add_output('z', np.array([5.0, 2.0]))
cycle = self.add_subsystem('cycle', om.ParallelGroup(), promotes=['*'])
cycle.add_subsystem('d1', SellarDis1(), promotes_inputs=['x', 'z', 'y2'],
promotes_outputs=['y1'])
cycle.add_subsystem('d2', SellarDis2(), promotes_inputs=['z', 'y1'],
promotes_outputs=['y2'])
# Nonlinear Block Gauss Seidel is a gradient free solver
cycle.nonlinear_solver = om.NonlinearBlockGS()
self.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['x', 'z', 'y1', 'y2', 'obj'])
self.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'),
promotes=['con1', 'y1'])
self.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'),
promotes=['con2', 'y2'])
filename = 'pyxdsm_parallel'
out_format = PYXDSM_OUT
prob = om.Problem(model=SellarMDA())
model = prob.model
prob.driver = om.ScipyOptimizeDriver()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.final_setup()
# Write output
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, show_parallel=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_execcomp(self):
filename = 'pyxdsm_execcomp'
out_format = PYXDSM_OUT
prob = om.Problem()
indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('x')
prob.model.add_subsystem('C1', om.ExecComp(['y=2.0*x+1.'], x=2.0), promotes=['*'])
prob.driver = om.ScipyOptimizeDriver()
prob.model.add_design_var('x', lower=0.0, upper=10.0)
prob.model.add_objective('y')
prob.setup()
# Conclude setup but don't run model.
prob.final_setup()
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, show_parallel=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
# Including the expression from the ExecComp formatted in LaTeX
write_xdsm(prob, filename=filename + "2", out_format=out_format, quiet=QUIET,
show_browser=SHOW, show_parallel=True, equations=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_doe(self):
filename = 'pyxdsm_doe'
out_format = PYXDSM_OUT
prob = om.Problem()
indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('x')
prob.model.add_subsystem('C1', om.ExecComp(['y=2.0*x+1.'], x=2.0), promotes=['*'])
prob.driver = om.DOEDriver()
prob.model.add_design_var('x', lower=0.0, upper=10.0)
prob.model.add_objective('y')
prob.setup()
# Conclude setup but don't run model.
prob.final_setup()
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, show_parallel=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_meta_model(self):
import openmdao.api as om
from openmdao.components.tests.test_meta_model_structured_comp import SampleMap
filename = 'pyxdsm_meta_model'
out_format = PYXDSM_OUT
model = om.Group()
ivc = om.IndepVarComp()
mapdata = SampleMap()
params = mapdata.param_data
x, y, z = params
outs = mapdata.output_data
z = outs[0]
ivc.add_output('x', x['default'], units=x['units'])
ivc.add_output('y', y['default'], units=y['units'])
ivc.add_output('z', z['default'], units=z['units'])
model.add_subsystem('des_vars', ivc, promotes=["*"])
comp = om.MetaModelStructuredComp(method='slinear', extrapolate=True)
for param in params:
comp.add_input(param['name'], param['default'], param['values'])
for out in outs:
comp.add_output(out['name'], out['default'], out['values'])
model.add_subsystem('comp', comp, promotes=["*"])
prob = om.Problem(model)
prob.setup()
prob.final_setup()
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, show_parallel=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
@use_tempdirs
class TestXDSMjsViewer(unittest.TestCase):
def test_xdsmjs(self):
"""
Makes XDSMjs input file for the Sellar problem.
Data is in a separate JSON file.
"""
filename = 'xdsmjs' # this name is needed for XDSMjs
prob = om.Problem()
prob.model = model = SellarNoDerivatives()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.final_setup()
# Write output
write_xdsm(prob, filename=filename, out_format='html', subs=(), show_browser=SHOW,
embed_data=False)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, 'json'])))
self.assertTrue(os.path.isfile('.'.join([filename, 'html'])))
def test_xdsmjs_embed_data(self):
"""
Makes XDSMjs HTML file for the Sellar problem.
Data is embedded into the HTML file.
"""
filename = 'xdsmjs_embedded' # this name is needed for XDSMjs
prob = om.Problem()
prob.model = model = SellarNoDerivatives()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.final_setup()
# Write output
write_xdsm(prob, filename=filename, out_format='html', subs=(), show_browser=SHOW,
embed_data=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, 'html'])))
def test_xdsmjs_embeddable(self):
"""
Makes XDSMjs HTML file for the Sellar problem.
The HTML file is embeddable (no head and body tags).
"""
filename = 'xdsmjs_embeddable' # this name is needed for XDSMjs
prob = om.Problem()
prob.model = model = SellarNoDerivatives()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.final_setup()
# Write output
write_xdsm(prob, filename=filename, out_format='html', subs=(), show_browser=SHOW,
embed_data=True, embeddable=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, 'html'])))
def test_html_writer_dct(self):
"""
Makes XDSMjs input file.
Data is in a dictionary
"""
filename = 'xdsmjs2' # this name is needed for XDSMjs
data = {
"nodes": [{"id": "Opt", "name": "Optimization", "type": "optimization"},
{"id": "MDA", "name": "MDA", "type": "mda"},
{"id": "DA1", "name": "Analysis 1"},
{"id": "DA2", "name": "Analysis 2"},
{"id": "DA3", "name": "Analysis 3"},
{"id": "Func", "name": "Functions"}
],
"edges": [{"from": "Opt", "to": "DA1", "name": "x_0,x_1"},
{"from": "DA1", "to": "DA3", "name": "x_share"},
{"from": "DA3", "to": "DA1", "name": "y_1^2"},
{"from": "MDA", "to": "DA1", "name": "x_2"},
{"from": "Func", "to": "Opt", "name": "f,c"},
{"from": "_U_", "to": "DA1", "name": "x_0"},
{"from": "DA3", "to": "_U_", "name": "y_0"}
],
"workflow": ["Opt", ["MDA", "DA1", "DA2", "DA3"], "Func"]
}
outfile = '.'.join([filename, 'html'])
write_html(outfile=outfile, source_data=data)
self.assertTrue(os.path.isfile(outfile))
def test_html_writer_str(self):
"""
Makes XDSMjs input file.
Data is a string.
"""
filename = 'xdsmjs4' # this name is needed for XDSMjs
data = ("{'nodes': [{'type': 'optimization', 'id': 'Opt', 'name': 'Optimization'}, "
"{'type': 'mda', 'id': 'MDA', 'name': 'MDA'}, {'id': 'DA1', 'name': 'Analysis 1'}, "
"{'id': 'DA2', 'name': 'Analysis 2'}, {'id': 'DA3', 'name': 'Analysis 3'}, "
"{'id': 'Func', 'name': 'Functions'}], "
"'edges': [{'to': 'DA1', 'from': 'Opt', 'name': 'x_0,x_1'}, "
"{'to': 'DA3', 'from': 'DA1', 'name': 'x_share'}, "
"{'to': 'DA1', 'from': 'DA3', 'name': 'y_1^2'}, "
"{'to': 'DA1', 'from': 'MDA', 'name': 'x_2'}, "
"{'to': 'Opt', 'from': 'Func', 'name': 'f,c'}, "
"{'to': 'DA1', 'from': '_U_', 'name': 'x_0'}, "
"{'to': '_U_', 'from': 'DA3', 'name': 'y_0'}], "
"'workflow': ['Opt', ['MDA', 'DA1', 'DA2', 'DA3'], 'Func']}")
outfile = '.'.join([filename, 'html'])
write_html(outfile=outfile, source_data=data)
self.assertTrue(os.path.isfile(outfile))
def test_pyxdsm_identical_relative_names(self):
class TimeComp(om.ExplicitComponent):
def setup(self):
self.add_input('t_initial', val=0.)
self.add_input('t_duration', val=1.)
self.add_output('time', shape=(2,))
def compute(self, inputs, outputs):
t_initial = inputs['t_initial']
t_duration = inputs['t_duration']
outputs['time'][0] = t_initial
outputs['time'][1] = t_initial + t_duration
class Phase(om.Group):
def setup(self):
super(Phase, self).setup()
indep = om.IndepVarComp()
for var in ['t_initial', 't_duration']:
indep.add_output(var, val=1.0)
self.add_subsystem('time_extents', indep, promotes_outputs=['*'])
time_comp = TimeComp()
self.add_subsystem('time', time_comp)
self.connect('t_initial', 'time.t_initial')
self.connect('t_duration', 'time.t_duration')
self.set_order(['time_extents', 'time'])
p = om.Problem()
p.driver = om.ScipyOptimizeDriver()
orbit_phase = Phase()
p.model.add_subsystem('orbit_phase', orbit_phase)
systems_phase = Phase()
p.model.add_subsystem('systems_phase', systems_phase)
systems_phase = Phase()
p.model.add_subsystem('extra_phase', systems_phase)
p.model.add_design_var('orbit_phase.t_initial')
p.model.add_design_var('orbit_phase.t_duration')
p.model.add_objective('systems_phase.time.time')
p.setup()
p.run_model()
write_xdsm(p, 'xdsmjs_orbit', out_format='html', show_browser=SHOW)
self.assertTrue(os.path.isfile('.'.join(['xdsmjs_orbit', 'html'])))
def test_xdsmjs_mda(self):
filename = 'xdsmjs_mda'
out_format = 'html'
prob = om.Problem(model=SellarMDA())
prob.setup()
prob.final_setup()
# Write output
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, embed_data=True, embeddable=True, include_solver=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_xdsmjs_mdf(self):
filename = 'xdsmjs_mdf'
out_format = 'html'
prob = om.Problem(model=SellarMDA())
model = prob.model
prob.driver = om.ScipyOptimizeDriver()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.final_setup()
# Write output
write_xdsm(prob, filename=filename, out_format=out_format,
show_browser=SHOW, include_solver=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_xdsm_solver(self):
import openmdao.api as om
filename = 'xdsmjs_solver'
out_format = 'html'
prob = om.Problem(model=SellarNoDerivatives())
prob.model.nonlinear_solver = om.NonlinearBlockGS()
prob.driver = om.ScipyOptimizeDriver()
prob.model.add_objective('obj')
prob.setup()
prob.run_model()
# Write output
write_xdsm(prob, filename=filename, out_format=out_format,
show_browser=SHOW, include_solver=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_parallel(self):
import openmdao.api as om
class SellarMDA(om.Group):
"""
Group containing the Sellar MDA.
"""
def setup(self):
indeps = self.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('x', 1.0)
indeps.add_output('z', np.array([5.0, 2.0]))
cycle = self.add_subsystem('cycle', om.ParallelGroup(), promotes=['*'])
cycle.add_subsystem('d1', SellarDis1(), promotes_inputs=['x', 'z', 'y2'],
promotes_outputs=['y1'])
cycle.add_subsystem('d2', SellarDis2(), promotes_inputs=['z', 'y1'],
promotes_outputs=['y2'])
# Nonlinear Block Gauss Seidel is a gradient free solver
cycle.nonlinear_solver = om.NonlinearBlockGS()
self.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['x', 'z', 'y1', 'y2', 'obj'])
self.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'),
promotes=['con1', 'y1'])
self.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'),
promotes=['con2', 'y2'])
filename = 'xdsmjs_parallel'
out_format = 'html'
prob = om.Problem(model=SellarMDA())
model = prob.model
prob.driver = om.ScipyOptimizeDriver()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.final_setup()
# Write output
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, show_parallel=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_execcomp(self):
filename = 'xdsmjs_execcomp'
out_format = 'html'
prob = om.Problem()
indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('x')
prob.model.add_subsystem('C1', om.ExecComp(['y=2.0*x+1.'], x=2.0), promotes=['*'])
prob.driver = om.ScipyOptimizeDriver()
prob.model.add_design_var('x', lower=0.0, upper=10.0)
prob.model.add_objective('y')
prob.setup()
# Conclude setup but don't run model.
prob.final_setup()
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, show_parallel=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_doe(self):
filename = 'xdsmjs_doe'
out_format = 'html'
prob = om.Problem()
indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('x')
prob.model.add_subsystem('C1', om.ExecComp(['y=2.0*x+1.'], x=2.0), promotes=['*'])
prob.driver = om.DOEDriver()
prob.model.add_design_var('x', lower=0.0, upper=10.0)
prob.model.add_objective('y')
prob.setup()
# Conclude setup but don't run model.
prob.final_setup()
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, show_parallel=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_meta_model(self):
import openmdao.api as om
from openmdao.components.tests.test_meta_model_structured_comp import SampleMap
filename = 'xdsmjs_meta_model'
out_format = 'html'
model = om.Group()
ivc = om.IndepVarComp()
mapdata = SampleMap()
params = mapdata.param_data
x, y, z = params
outs = mapdata.output_data
z = outs[0]
ivc.add_output('x', x['default'], units=x['units'])
ivc.add_output('y', y['default'], units=y['units'])
ivc.add_output('z', z['default'], units=z['units'])
model.add_subsystem('des_vars', ivc, promotes=["*"])
comp = om.MetaModelStructuredComp(method='slinear', extrapolate=True)
for param in params:
comp.add_input(param['name'], param['default'], param['values'])
for out in outs:
comp.add_output(out['name'], out['default'], out['values'])
model.add_subsystem('comp', comp, promotes=["*"])
prob = om.Problem(model)
prob.setup()
prob.final_setup()
write_xdsm(prob, filename=filename, out_format=out_format, quiet=QUIET,
show_browser=SHOW, show_parallel=True)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, out_format])))
def test_circuit_recurse(self):
# Implicit component is also tested here
import openmdao.api as om
p = om.Problem()
model = p.model
model.add_subsystem('ground', om.IndepVarComp('V', 0., units='V'))
model.add_subsystem('source', om.IndepVarComp('I', 0.1, units='A'))
model.add_subsystem('circuit', Circuit())
model.connect('source.I', 'circuit.I_in')
model.connect('ground.V', 'circuit.Vg')
model.add_design_var('ground.V')
model.add_design_var('source.I')
model.add_objective('circuit.D1.I')
p.setup()
# set some initial guesses
p['circuit.n1.V'] = 10.
p['circuit.n2.V'] = 1.
p.run_model()
write_xdsm(p, 'xdsmjs_circuit', out_format='html', quiet=QUIET, show_browser=SHOW,
recurse=True)
self.assertTrue(os.path.isfile('.'.join(['xdsmjs_circuit', 'html'])))
def test_legend_and_class_names(self):
import openmdao.api as om
p = om.Problem()
model = p.model
model.add_subsystem('ground', om.IndepVarComp('V', 0., units='V'))
model.add_subsystem('source', om.IndepVarComp('I', 0.1, units='A'))
model.add_subsystem('circuit', Circuit())
model.connect('source.I', 'circuit.I_in')
model.connect('ground.V', 'circuit.Vg')
model.add_design_var('ground.V')
model.add_design_var('source.I')
model.add_objective('circuit.D1.I')
p.setup()
# set some initial guesses
p['circuit.n1.V'] = 10.
p['circuit.n2.V'] = 1.
p.run_model()
write_xdsm(p, 'xdsmjs_circuit_legend', out_format='html', quiet=QUIET, show_browser=SHOW,
recurse=True, legend=True)
self.assertTrue(os.path.isfile('.'.join(['xdsmjs_circuit_legend', 'html'])))
write_xdsm(p, 'xdsmjs_circuit_class_names', out_format='html', quiet=QUIET,
show_browser=SHOW, recurse=True, class_names=True)
self.assertTrue(os.path.isfile('.'.join(['xdsmjs_circuit_class_names', 'html'])))
def test_xdsmjs_right_outputs(self):
"""Makes XDSM for the Sellar problem"""
filename = 'xdsmjs_outputs_on_the_right'
prob = om.Problem()
prob.model = model = SellarNoDerivatives()
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
prob.setup()
prob.final_setup()
msg = 'Right side outputs not implemented for XDSMjs.'
# Write output
with assert_warning(Warning, msg):
write_xdsm(prob, filename=filename, out_format='html', show_browser=SHOW,
quiet=QUIET, output_side='right')
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, 'html'])))
def test_wrong_out_format(self):
"""Incorrect output format error."""
filename = 'xdsm_wrong_format' # this name is needed for XDSMjs
prob = om.Problem()
prob.model = SellarNoDerivatives()
prob.setup()
prob.final_setup()
# no output checking, just make sure no exceptions raised
with self.assertRaises(ValueError):
write_xdsm(prob, filename=filename, out_format='jpg', subs=(), show_browser=SHOW)
def test_command(self):
"""
Check that there are no errors when running from the command line with a script.
"""
from openmdao.test_suite.scripts import sellar
filename = os.path.abspath(sellar.__file__).replace('.pyc', '.py') # PY2
check_call('openmdao xdsm --no_browser %s' % filename)
@unittest.skipUnless(XDSM, "The pyXDSM package is required.")
@use_tempdirs
class TestCustomXDSMViewer(unittest.TestCase):
def test_custom_writer(self):
from openmdao_xdsm.xdsm_writer import XDSMjsWriter
class CustomWriter(XDSMjsWriter):
"""Customized XDSM writer, based on the XDSMjs writer."""
@staticmethod
def format_block(names, **kwargs):
"""This method is overwritten, to implement some different formatting."""
return [name.upper() for name in names]
prob = om.Problem()
prob.model = SellarNoDerivatives()
prob.setup()
prob.final_setup()
my_writer = CustomWriter()
filename = 'xdsm_custom_writer' # this name is needed for XDSMjs
# Write output
write_xdsm(prob, filename=filename, writer=my_writer, show_browser=SHOW)
# Check if file was created
self.assertTrue(os.path.isfile('.'.join([filename, my_writer.extension])))
# Check that error is raised in case of wrong writer type
filename = 'xdsm_custom_writer2' # this name is needed for XDSMjs
with self.assertRaises(TypeError): # Wrong type passed for writer
write_xdsm(prob, filename=filename, writer=1, subs=(), show_browser=SHOW)
# Check warning, if settings for custom writer are not found
my_writer2 = CustomWriter(name='my_writer')
filename = 'xdsm_custom_writer3'
msg = 'Writer name "my_writer" not found, there will be no character ' \
'substitutes used. Add "my_writer" to your settings, or provide a tuple for' \
'character substitutes.'
# Write output
with assert_warning(Warning, msg):
write_xdsm(prob, filename=filename, writer=my_writer2, show_browser=SHOW)
if __name__ == "__main__":
unittest.main()
| 38.251901 | 100 | 0.585199 |
4f21001becfd9476ad6b20a4c27f4f5c7622f70a | 92 | py | Python | src/setup.py | SandroWissmann/Bricks-Py | 640140f9fd4a947b175a090563113108cc047f89 | [
"MIT"
] | null | null | null | src/setup.py | SandroWissmann/Bricks-Py | 640140f9fd4a947b175a090563113108cc047f89 | [
"MIT"
] | null | null | null | src/setup.py | SandroWissmann/Bricks-Py | 640140f9fd4a947b175a090563113108cc047f89 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(name="bricks", packages=find_packages())
| 23 | 46 | 0.804348 |
a6f5e791299d0c55de717f2a12dbb1ac756ae437 | 1,786 | py | Python | ff_wrapper/logbuffer.py | hidnoiz/ff_wrapper | 4571a2141b3e6cb21ac40f5ead5497b41f1d8f4b | [
"MIT"
] | null | null | null | ff_wrapper/logbuffer.py | hidnoiz/ff_wrapper | 4571a2141b3e6cb21ac40f5ead5497b41f1d8f4b | [
"MIT"
] | null | null | null | ff_wrapper/logbuffer.py | hidnoiz/ff_wrapper | 4571a2141b3e6cb21ac40f5ead5497b41f1d8f4b | [
"MIT"
] | null | null | null | from typing import List
def progress_str_to_dict(progress_log: str) -> dict:
progress_log = progress_log.strip().split(' ')
result = {}
for line in progress_log:
k, v = line.split('=')
result[k] = v
return result
class LogBuffer:
def __init__(self, size_max):
self._next = 0
self.max = size_max
self._data = [None] * size_max
def append(self, item):
self._data[self._next % self.max] = item
self._next += 1
def get_last_items(self, n) -> (List[str], int):
# Получить n количество последних строк
# Возвращает список строк и текущую позицию лога
# Если количество запрашиваемых элементов больше, чем максимальное количество строк
if n > self.max:
return self.get_all()
# Если нет переполнения или это последний элемент перед ним
if self._next <= self.max:
if self._next <= n:
return self._data[:self._next], self._next
return self._data[self._next-n:self._next], self._next
# Переполнение
else:
split = self._next % self.max
if split == 0:
return self._data[-n:], self._next
elif n > split:
return self._data[self.max - (n - split):] + self._data[:split], self._next
else:
return self._data[split-n:split], self._next
def get_all(self) -> (List[str], int):
# Возвращает список строк и текущую позицию лога
if self._next < self.max:
return self._data[:self._next], self._next
split = self._next % self.max
return self._data[split:] + self._data[:split], self._next
def get_current_position(self) -> int:
return self._next
| 32.472727 | 91 | 0.585666 |
5532bb8ac7abbe470df4e6c8382d9ea18ce7a2df | 1,056 | py | Python | favorProject/tests/factories.py | eric-newcomer/favor | 0435b8bb211071cc6508d598ee65228c50f91e2f | [
"MIT"
] | 2 | 2020-03-24T05:06:09.000Z | 2021-06-15T04:26:51.000Z | favorProject/tests/factories.py | eric-newcomer/favor | 0435b8bb211071cc6508d598ee65228c50f91e2f | [
"MIT"
] | 25 | 2020-06-06T01:34:05.000Z | 2022-03-12T00:20:44.000Z | favorProject/tests/factories.py | eric-newcomer/favor | 0435b8bb211071cc6508d598ee65228c50f91e2f | [
"MIT"
] | null | null | null | import factory
from django.contrib.auth.models import User
from favorApp.models import Favor
from django.utils import timezone
class UserFactory(factory.Factory):
class Meta:
model = User
first_name = 'John'
last_name = 'Doe'
id = factory.Sequence(lambda n: n)
email = factory.Sequence(lambda n: 'johndoe{0}@johndoe.com'.format(n))
username = factory.Sequence(lambda n: 'johndoe{0}'.format(n))
password = factory.PostGenerationMethodCall('set_password', 'password')
class AdminFactory(factory.DjangoModelFactory):
class Meta:
model = User
email = 'admin@admin.com'
username = 'admin'
password = 'adm1n'
is_superuser = True
is_staff = True
is_active = True
class FavorFactory(factory.Factory):
class Meta:
model = Favor
id = factory.Sequence(lambda n: n)
title = factory.Sequence(lambda n: 'Massage{}'.format(n))
description = 'Massage'
number_of_favors = 3
date = timezone.now() + timezone.timedelta(weeks=2)
location = 'location'
| 25.756098 | 75 | 0.675189 |
37eb4a482cd853b6b5b7aaaee12705a4a5c2ea98 | 2,162 | py | Python | 101-200/121-130/128-longestConsecutiveSequence/longestConsecutiveSequence.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | 101-200/121-130/128-longestConsecutiveSequence/longestConsecutiveSequence.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | 101-200/121-130/128-longestConsecutiveSequence/longestConsecutiveSequence.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = set(nums)
best = 0
for num in nums:
if num - 1 not in nums:
y = num + 1
while y in nums:
y += 1
best = max(best, y-num)
return best
class Solution(object):
def binary_search(self, alist, x, left, right):
while left < right:
mid = left + (right - left) / 2
if alist[mid][0] == x[0]:
return mid
elif alist[mid][0] < x[0]:
left = mid + 1
else:
right = mid
return left
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
ranges = [[nums[0], nums[0]]]
for num in nums[1:]:
length = len(ranges)
index = self.binary_search(ranges, [num, num], 0, length)
if index == 0:
if ranges[index][0] == num + 1:
ranges[index][0] = num
elif num < ranges[0][0] - 1:
ranges.insert(0, [num, num])
elif index == length:
if ranges[index-1][1] == num - 1:
ranges[index-1][1] = num
elif num > ranges[index-1][1] + 1:
ranges.append([num, num])
else:
if ranges[index-1][1] == num - 1 and ranges[index][0] == num + 1:
ranges[index-1][1] = ranges[index][1]
del ranges[index]
elif ranges[index-1][1] == num - 1:
ranges[index-1][1] = num
elif ranges[index][0] == num + 1:
ranges[index][0] = num
elif ranges[index-1][1] + 1 < num < ranges[index][0] - 1:
ranges.insert(index, [num, num])
maximum = 0
for start, end in ranges:
maximum = max(maximum, end-start+1)
return maximum
| 30.027778 | 81 | 0.418131 |
f252bd3a9a28837fd81e8e266ba1ed1dc708a34c | 2,928 | py | Python | theta/exampleFortran/src/driver.py | FilippoSimini/smartsim_alcf | 213dc01fb5aa9d330995621a69b8316dd2a7ddde | [
"BSD-3-Clause"
] | 3 | 2021-11-04T19:19:37.000Z | 2021-11-17T16:44:49.000Z | theta/exampleFortran/src/driver.py | FilippoSimini/smartsim_alcf | 213dc01fb5aa9d330995621a69b8316dd2a7ddde | [
"BSD-3-Clause"
] | null | null | null | theta/exampleFortran/src/driver.py | FilippoSimini/smartsim_alcf | 213dc01fb5aa9d330995621a69b8316dd2a7ddde | [
"BSD-3-Clause"
] | null | null | null | import os, sys, time
from smartsim import Experiment
from smartsim.settings import AprunSettings
from smartsim.database import CobaltOrchestrator
# Parse command line arguments
ppn = int(sys.argv[1])
nodes = int(sys.argv[2])
allprocs = int(sys.argv[3])
dbnodes = int(sys.argv[4])
simnodes = int(sys.argv[5])
mlnodes = int(sys.argv[6])
simprocs = int(sys.argv[7])
mlprocs = int(sys.argv[8])
assert (dbnodes + simnodes + mlnodes <= nodes) and (mlnodes >= 0), "The nodes requested are not enough."
# Define function to parse node list
def parseNodeList():
cobStr = os.environ['COBALT_PARTNAME']
tmp = cobStr.split(',')
nodelist = []
for item in tmp:
if (item.find('-') > 0):
tmp2 = item.split('-')
istart = int(tmp2[0])
iend = int(tmp2[1])
for i in range(istart,iend+1):
nodelist.append(str(i))
else:
nodelist.append(item)
nnodes = len(nodelist)
return nodelist, nnodes
# Get nodes of this allocation (job) and split them between the tasks
nodelist, nnodes = parseNodeList()
print(f"\nRunning on {nnodes} total nodes on Theta")
print(nodelist, "\n")
dbNodes = ','.join(nodelist[0: dbnodes])
simNodes = ','.join(nodelist[dbnodes: dbnodes + simnodes])
mlNodes = ','.join(nodelist[dbnodes + simnodes: dbnodes + simnodes + mlnodes])
print(f"Database running on {dbnodes} nodes:")
print(dbNodes)
print(f"Simulatiom running on {simnodes} nodes:")
print(simNodes)
print(f"ML running on {mlnodes} nodes:")
print(mlNodes, "\n")
# Set up database and start it
PORT = 6780
exp = Experiment("train-example", launcher="cobalt")
db = CobaltOrchestrator(port=PORT, batch=False, db_nodes=dbnodes, run_args={"node-list": dbNodes})
print("Starting database ...")
exp.start(db)
print("Done\n")
# data producer
print("Launching data producer ...")
Ftn_exe = 'src/dataLoaderFtn'
aprun = AprunSettings(Ftn_exe, run_args={"node-list": simNodes})
aprun.set_tasks(simprocs)
aprun.set_tasks_per_node(ppn)
load_data = exp.create_model("load_data", aprun)
exp.start(load_data, summary=False, block=False)
print("Done\n")
# data consumer
print("Launching data consumer ...")
ml_exe = "src/trainPar.py"
aprunML = AprunSettings("python",
exe_args=ml_exe,
run_args={"node-list": mlNodes})
aprunML.set_tasks(mlprocs)
aprunML.set_tasks_per_node(mlprocs)
ml_model = exp.create_model("train_model", aprunML)
exp.start(ml_model, summary=False, block=True)
print("Done\n")
# inference
print("Starting inference ...")
inf_exe = "src/inference.py"
aprun = AprunSettings("python",
exe_args=inf_exe,
run_args={"node-list": mlNodes})
aprun.set_tasks(mlprocs)
aprun.set_tasks_per_node(mlprocs)
inf_model = exp.create_model("inference", aprun)
exp.start(inf_model, summary=False, block=True)
print(f" Done \n")
# Stop database
print("Stopping the Orchestrator ...")
exp.stop(db)
print("Done")
print("Quitting")
| 29.28 | 104 | 0.696721 |
1987ce4046d17516960692e56454602185051eff | 26 | py | Python | terrascript/grafana/__init__.py | GarnerCorp/python-terrascript | ec6c2d9114dcd3cb955dd46069f8ba487e320a8c | [
"BSD-2-Clause"
] | null | null | null | terrascript/grafana/__init__.py | GarnerCorp/python-terrascript | ec6c2d9114dcd3cb955dd46069f8ba487e320a8c | [
"BSD-2-Clause"
] | null | null | null | terrascript/grafana/__init__.py | GarnerCorp/python-terrascript | ec6c2d9114dcd3cb955dd46069f8ba487e320a8c | [
"BSD-2-Clause"
] | 1 | 2018-11-15T16:23:05.000Z | 2018-11-15T16:23:05.000Z | """2019-05-28 10:49:41"""
| 13 | 25 | 0.538462 |
7587eaaf88e0305b1ae5174128ab4388ad6462a9 | 5,233 | py | Python | app_backend/forms/production_sensitive.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 1 | 2020-06-21T04:08:26.000Z | 2020-06-21T04:08:26.000Z | app_backend/forms/production_sensitive.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 13 | 2019-10-18T17:19:32.000Z | 2022-01-13T00:44:43.000Z | app_backend/forms/production_sensitive.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 5 | 2019-02-07T03:15:16.000Z | 2021-09-04T14:06:28.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: production_sensitive.py
@time: 2018-08-14 16:34
"""
from __future__ import unicode_literals
from flask_babel import lazy_gettext as _
from flask_wtf import FlaskForm
from wtforms import StringField, DateField, IntegerField, SelectField
from wtforms.validators import DataRequired, Length, InputRequired
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_STR_OPTION
class ProductionSensitiveSearchForm(FlaskForm):
"""
搜索表单
"""
customer_cid = IntegerField(
_('customer company id'),
validators=[
InputRequired(),
],
default=0,
description=_('customer company id'),
render_kw={
'rel': 'tooltip',
'title': _('customer company id'),
'placeholder': _('customer company id'),
'autocomplete': 'off',
'type': 'hidden',
}
)
customer_company_name = StringField(
_('customer company name'),
validators=[],
description=_('customer company name'),
render_kw={
'placeholder': _('customer company name'),
'rel': 'tooltip',
'title': _('customer company name'),
}
)
production_brand = SelectField(
_('production brand'),
validators=[], # 字符类型,非必填
default=DEFAULT_SEARCH_CHOICES_STR_OPTION,
description=_('production brand'),
render_kw={
'rel': 'tooltip',
'title': _('production brand'),
}
)
production_model = StringField(
_('production model'),
validators=[],
description=_('production model'),
render_kw={
'placeholder': _('production model'),
'rel': 'tooltip',
'title': _('production model'),
'autocomplete': 'off',
}
)
op = IntegerField(
_('operation'),
validators=[],
default=0,
)
page = IntegerField(
_('page'),
validators=[],
default=1,
)
class ProductionSensitiveAddForm(FlaskForm):
"""
创建表单
"""
customer_cid = IntegerField(
_('customer company id'),
validators=[
DataRequired(),
],
default=0,
description=_('customer company id'),
render_kw={
'rel': 'tooltip',
'title': _('customer company id'),
'placeholder': _('customer company id'),
'autocomplete': 'off',
'type': 'hidden',
}
)
customer_company_name = StringField(
_('customer company name'),
validators=[],
description=_('customer company name'),
render_kw={
'placeholder': _('customer company name'),
'rel': 'tooltip',
'title': _('customer company name'),
'autocomplete': 'off',
}
)
production_id = IntegerField(
_('production id'),
validators=[
DataRequired(),
],
render_kw={
'type': 'hidden',
}
)
production_brand = StringField(
_('production brand'),
validators=[
DataRequired(),
],
default='',
description='产品品牌(例如:SKF、FAG、NSK...)',
render_kw={
'placeholder': _('production brand'),
'rel': 'tooltip',
'title': _('production brand'),
'autocomplete': 'off',
'readonly': 'readonly',
}
)
production_model = StringField(
_('production model'),
validators=[
DataRequired(),
],
default='',
description='产品型号(例如:7008CEGA/HCP4A)',
render_kw={
'placeholder': _('production model'),
'rel': 'tooltip',
'title': _('production model'),
'autocomplete': 'off',
}
)
production_sku = StringField(
_('production sku'),
validators=[
DataRequired(),
Length(min=2, max=16),
],
description='单位(Pcs:个,Pair:对,Set:组)',
render_kw={
'placeholder': _('production sku'),
'rel': 'tooltip',
'title': _('production sku'),
'autocomplete': 'off',
'readonly': 'readonly',
}
)
note = StringField(
_('production note'),
validators=[],
default='',
description='产品备注(例如:最小起订量12个)',
render_kw={
'placeholder': _('production note'),
'rel': 'tooltip',
'title': _('production note'),
'autocomplete': 'off',
}
)
class ProductionSensitiveEditForm(ProductionSensitiveAddForm):
"""
编辑表单
"""
id = IntegerField(
_('production sensitive id'),
validators=[
DataRequired(),
],
render_kw={
'type': 'hidden',
}
)
create_time = DateField(
_('create time'),
validators=[],
description=_('create time')
)
update_time = DateField(
_('update time'),
validators=[],
description=_('update time')
)
| 25.778325 | 69 | 0.515574 |
696f848af010e1d6fbd4fca0c416f71fbb7167e3 | 77,464 | py | Python | vectorbt/generic/accessors.py | davidandreoletti/vectorbt | 0cd596e1be975d4af6379d883090ffb5b7375d08 | [
"Apache-2.0"
] | null | null | null | vectorbt/generic/accessors.py | davidandreoletti/vectorbt | 0cd596e1be975d4af6379d883090ffb5b7375d08 | [
"Apache-2.0"
] | null | null | null | vectorbt/generic/accessors.py | davidandreoletti/vectorbt | 0cd596e1be975d4af6379d883090ffb5b7375d08 | [
"Apache-2.0"
] | null | null | null | """Custom pandas accessors.
Methods can be accessed as follows:
* `GenericSRAccessor` -> `pd.Series.vbt.*`
* `GenericDFAccessor` -> `pd.DataFrame.vbt.*`
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> # vectorbt.generic.accessors.GenericAccessor.rolling_mean
>>> pd.Series([1, 2, 3, 4]).vbt.rolling_mean(2)
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
```
The accessors inherit `vectorbt.base.accessors` and are inherited by more
specialized accessors, such as `vectorbt.signals.accessors` and `vectorbt.returns.accessors`.
!!! note
Input arrays can be of any type, but most output arrays are `np.float64`.
Grouping is only supported by the methods that accept the `group_by` argument.
Run for the examples below:
```python-repl
>>> import vectorbt as vbt
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from datetime import datetime, timedelta
>>> df = pd.DataFrame({
... 'a': [1, 2, 3, 4, 5],
... 'b': [5, 4, 3, 2, 1],
... 'c': [1, 2, 3, 2, 1]
... }, index=pd.Index([
... datetime(2020, 1, 1),
... datetime(2020, 1, 2),
... datetime(2020, 1, 3),
... datetime(2020, 1, 4),
... datetime(2020, 1, 5)
... ]))
>>> df
a b c
2020-01-01 1 5 1
2020-01-02 2 4 2
2020-01-03 3 3 3
2020-01-04 4 2 2
2020-01-05 5 1 1
>>> index = [datetime(2020, 1, 1) + timedelta(days=i) for i in range(10)]
>>> sr = pd.Series(np.arange(len(index)), index=index)
>>> sr
2020-01-01 0
2020-01-02 1
2020-01-03 2
2020-01-04 3
2020-01-05 4
2020-01-06 5
2020-01-07 6
2020-01-08 7
2020-01-09 8
2020-01-10 9
dtype: int64
```"""
import numpy as np
import pandas as pd
from scipy import stats
from numba.typed import Dict
import warnings
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import (
Binarizer,
MinMaxScaler,
MaxAbsScaler,
Normalizer,
RobustScaler,
StandardScaler,
QuantileTransformer,
PowerTransformer
)
from vectorbt import _typing as tp
from vectorbt.utils import checks
from vectorbt.utils.config import merge_dicts, resolve_dict
from vectorbt.utils.figure import make_figure, make_subplots
from vectorbt.utils.decorators import cached_property, cached_method
from vectorbt.base import index_fns, reshape_fns
from vectorbt.base.accessors import BaseAccessor, BaseDFAccessor, BaseSRAccessor
from vectorbt.base.class_helpers import add_nb_methods
from vectorbt.generic import plotting, nb
from vectorbt.generic.drawdowns import Drawdowns
from vectorbt.generic.splitters import SplitterT, RangeSplitter, RollingSplitter, ExpandingSplitter
from vectorbt.records.mapped_array import MappedArray
try: # pragma: no cover
# Adapted from https://github.com/quantopian/empyrical/blob/master/empyrical/utils.py
import bottleneck as bn
nanmean = bn.nanmean
nanstd = bn.nanstd
nansum = bn.nansum
nanmax = bn.nanmax
nanmin = bn.nanmin
nanmedian = bn.nanmedian
nanargmax = bn.nanargmax
nanargmin = bn.nanargmin
except ImportError:
# slower numpy
nanmean = np.nanmean
nanstd = np.nanstd
nansum = np.nansum
nanmax = np.nanmax
nanmin = np.nanmin
nanmedian = np.nanmedian
nanargmax = np.nanargmax
nanargmin = np.nanargmin
class TransformerT(tp.Protocol):
def __init__(self, **kwargs) -> None:
...
def transform(self, *args, **kwargs) -> tp.Array2d:
...
def fit_transform(self, *args, **kwargs) -> tp.Array2d:
...
WrapperFuncT = tp.Callable[[tp.Type[tp.T]], tp.Type[tp.T]]
TransformFuncInfoT = tp.Tuple[str, tp.Type[TransformerT]]
SplitOutputT = tp.Union[tp.MaybeTuple[tp.Tuple[tp.Frame, tp.Index]], tp.BaseFigure]
def add_transform_methods(transformers: tp.Iterable[TransformFuncInfoT]) -> WrapperFuncT:
"""Class decorator to add scikit-learn transformers as transform methods."""
def wrapper(cls: tp.Type[tp.T]) -> tp.Type[tp.T]:
for fname, transformer in transformers:
def transform(self, wrap_kwargs: tp.KwargsLike = None,
_transformer: tp.Type[TransformerT] = transformer, **kwargs) -> tp.SeriesFrame:
return self.transform(_transformer(**kwargs), wrap_kwargs=wrap_kwargs)
transform.__doc__ = f"Transform using `sklearn.preprocessing.{transformer.__name__}`."
setattr(cls, fname, transform)
return cls
return wrapper
@add_nb_methods([
(nb.shuffle_nb, False),
(nb.fillna_nb, False),
(nb.bshift_nb, False),
(nb.fshift_nb, False),
(nb.diff_nb, False),
(nb.pct_change_nb, False),
(nb.ffill_nb, False),
(nb.cumsum_nb, False),
(nb.cumprod_nb, False),
(nb.rolling_min_nb, False),
(nb.rolling_max_nb, False),
(nb.rolling_mean_nb, False),
(nb.expanding_min_nb, False),
(nb.expanding_max_nb, False),
(nb.expanding_mean_nb, False),
(nb.product_nb, True, 'product')
], module_name='vectorbt.generic.nb')
@add_transform_methods([
('binarize', Binarizer),
('minmax_scale', MinMaxScaler),
('maxabs_scale', MaxAbsScaler),
('normalize', Normalizer),
('robust_scale', RobustScaler),
('scale', StandardScaler),
('quantile_transform', QuantileTransformer),
('power_transform', PowerTransformer)
])
class GenericAccessor(BaseAccessor):
"""Accessor on top of data of any type. For both, Series and DataFrames.
Accessible through `pd.Series.vbt` and `pd.DataFrame.vbt`."""
def __init__(self, obj: tp.SeriesFrame, **kwargs) -> None:
if not checks.is_pandas(obj): # parent accessor
obj = obj._obj
BaseAccessor.__init__(self, obj, **kwargs)
def rolling_std(self, window: int, minp: tp.Optional[int] = None, ddof: int = 1,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: # pragma: no cover
"""See `vectorbt.generic.nb.rolling_std_nb`."""
out = nb.rolling_std_nb(self.to_2d_array(), window, minp=minp, ddof=ddof)
return self.wrapper.wrap(out, **merge_dicts({}, wrap_kwargs))
def expanding_std(self, minp: tp.Optional[int] = 1, ddof: int = 1,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: # pragma: no cover
"""See `vectorbt.generic.nb.expanding_std_nb`."""
out = nb.expanding_std_nb(self.to_2d_array(), minp=minp, ddof=ddof)
return self.wrapper.wrap(out, **merge_dicts({}, wrap_kwargs))
def ewm_mean(self, span: int, minp: tp.Optional[int] = 0, adjust: bool = True,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: # pragma: no cover
"""See `vectorbt.generic.nb.ewm_mean_nb`."""
out = nb.ewm_mean_nb(self.to_2d_array(), span, minp=minp, adjust=adjust)
return self.wrapper.wrap(out, **merge_dicts({}, wrap_kwargs))
def ewm_std(self, span: int, minp: tp.Optional[int] = 0, adjust: bool = True, ddof: int = 1,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame: # pragma: no cover
"""See `vectorbt.generic.nb.ewm_std_nb`."""
out = nb.ewm_std_nb(self.to_2d_array(), span, minp=minp, adjust=adjust, ddof=ddof)
return self.wrapper.wrap(out, **merge_dicts({}, wrap_kwargs))
def apply_along_axis(self, apply_func_nb: tp.Union[nb.apply_nbT, nb.row_apply_nbT], *args, axis: int = 0,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Apply a function `apply_func_nb` along an axis."""
checks.assert_numba_func(apply_func_nb)
if axis == 0:
out = nb.apply_nb(self.to_2d_array(), apply_func_nb, *args)
elif axis == 1:
out = nb.row_apply_nb(self.to_2d_array(), apply_func_nb, *args)
else:
raise ValueError("Only axes 0 and 1 are supported")
return self.wrapper.wrap(out, **merge_dicts({}, wrap_kwargs))
def rolling_apply(self, window: int, apply_func_nb: tp.Union[nb.rolling_apply_nbT, nb.rolling_matrix_apply_nbT],
*args, minp: tp.Optional[int] = None, on_matrix: bool = False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.rolling_apply_nb` and
`vectorbt.generic.nb.rolling_matrix_apply_nb` for `on_matrix=True`.
## Example
```python-repl
>>> mean_nb = njit(lambda i, col, a: np.nanmean(a))
>>> df.vbt.rolling_apply(3, mean_nb)
a b c
2020-01-01 1.0 5.0 1.000000
2020-01-02 1.5 4.5 1.500000
2020-01-03 2.0 4.0 2.000000
2020-01-04 3.0 3.0 2.333333
2020-01-05 4.0 2.0 2.000000
>>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
>>> df.vbt.rolling_apply(3, mean_matrix_nb, on_matrix=True)
a b c
2020-01-01 2.333333 2.333333 2.333333
2020-01-02 2.500000 2.500000 2.500000
2020-01-03 2.666667 2.666667 2.666667
2020-01-04 2.777778 2.777778 2.777778
2020-01-05 2.666667 2.666667 2.666667
```
"""
checks.assert_numba_func(apply_func_nb)
if on_matrix:
out = nb.rolling_matrix_apply_nb(self.to_2d_array(), window, minp, apply_func_nb, *args)
else:
out = nb.rolling_apply_nb(self.to_2d_array(), window, minp, apply_func_nb, *args)
return self.wrapper.wrap(out, **merge_dicts({}, wrap_kwargs))
def expanding_apply(self, apply_func_nb: tp.Union[nb.rolling_apply_nbT, nb.rolling_matrix_apply_nbT],
*args, minp: tp.Optional[int] = 1, on_matrix: bool = False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.expanding_apply_nb` and
`vectorbt.generic.nb.expanding_matrix_apply_nb` for `on_matrix=True`.
## Example
```python-repl
>>> mean_nb = njit(lambda i, col, a: np.nanmean(a))
>>> df.vbt.expanding_apply(mean_nb)
a b c
2020-01-01 1.0 5.0 1.0
2020-01-02 1.5 4.5 1.5
2020-01-03 2.0 4.0 2.0
2020-01-04 2.5 3.5 2.0
2020-01-05 3.0 3.0 1.8
>>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
>>> df.vbt.expanding_apply(mean_matrix_nb, on_matrix=True)
a b c
2020-01-01 2.333333 2.333333 2.333333
2020-01-02 2.500000 2.500000 2.500000
2020-01-03 2.666667 2.666667 2.666667
2020-01-04 2.666667 2.666667 2.666667
2020-01-05 2.600000 2.600000 2.600000
```
"""
checks.assert_numba_func(apply_func_nb)
if on_matrix:
out = nb.expanding_matrix_apply_nb(self.to_2d_array(), minp, apply_func_nb, *args)
else:
out = nb.expanding_apply_nb(self.to_2d_array(), minp, apply_func_nb, *args)
return self.wrapper.wrap(out, **merge_dicts({}, wrap_kwargs))
def groupby_apply(self, by: tp.PandasGroupByLike,
apply_func_nb: tp.Union[nb.groupby_apply_nbT, nb.groupby_apply_matrix_nbT],
*args, on_matrix: bool = False, wrap_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.groupby_apply_nb` and
`vectorbt.generic.nb.groupby_apply_matrix_nb` for `on_matrix=True`.
For `by`, see `pd.DataFrame.groupby`.
## Example
```python-repl
>>> mean_nb = njit(lambda i, col, a: np.nanmean(a))
>>> df.vbt.groupby_apply([1, 1, 2, 2, 3], mean_nb)
a b c
1 1.5 4.5 1.5
2 3.5 2.5 2.5
3 5.0 1.0 1.0
>>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
>>> df.vbt.groupby_apply([1, 1, 2, 2, 3], mean_matrix_nb, on_matrix=True)
a b c
1 2.500000 2.500000 2.500000
2 2.833333 2.833333 2.833333
3 2.333333 2.333333 2.333333
```
"""
checks.assert_numba_func(apply_func_nb)
regrouped = self._obj.groupby(by, axis=0, **kwargs)
groups = Dict()
for i, (k, v) in enumerate(regrouped.indices.items()):
groups[i] = np.asarray(v)
if on_matrix:
out = nb.groupby_apply_matrix_nb(self.to_2d_array(), groups, apply_func_nb, *args)
else:
out = nb.groupby_apply_nb(self.to_2d_array(), groups, apply_func_nb, *args)
wrap_kwargs = merge_dicts(dict(name_or_index=list(regrouped.indices.keys())), wrap_kwargs)
return self.wrapper.wrap_reduced(out, **wrap_kwargs)
def resample_apply(self, freq: tp.PandasFrequencyLike,
apply_func_nb: tp.Union[nb.groupby_apply_nbT, nb.groupby_apply_matrix_nbT],
*args, on_matrix: bool = False, wrap_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.groupby_apply_nb` and
`vectorbt.generic.nb.groupby_apply_matrix_nb` for `on_matrix=True`.
For `freq`, see `pd.DataFrame.resample`.
## Example
```python-repl
>>> mean_nb = njit(lambda i, col, a: np.nanmean(a))
>>> df.vbt.resample_apply('2d', mean_nb)
a b c
2020-01-01 1.5 4.5 1.5
2020-01-03 3.5 2.5 2.5
2020-01-05 5.0 1.0 1.0
>>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
>>> df.vbt.resample_apply('2d', mean_matrix_nb, on_matrix=True)
a b c
2020-01-01 2.500000 2.500000 2.500000
2020-01-03 2.833333 2.833333 2.833333
2020-01-05 2.333333 2.333333 2.333333
```
"""
checks.assert_numba_func(apply_func_nb)
resampled = self._obj.resample(freq, axis=0, **kwargs)
groups = Dict()
for i, (k, v) in enumerate(resampled.indices.items()):
groups[i] = np.asarray(v)
if on_matrix:
out = nb.groupby_apply_matrix_nb(self.to_2d_array(), groups, apply_func_nb, *args)
else:
out = nb.groupby_apply_nb(self.to_2d_array(), groups, apply_func_nb, *args)
out_obj = self.wrapper.wrap(out, index=list(resampled.indices.keys()))
resampled_arr = np.full((resampled.ngroups, self.to_2d_array().shape[1]), np.nan)
resampled_obj = self.wrapper.wrap(
resampled_arr,
index=pd.Index(list(resampled.groups.keys()), freq=freq),
**merge_dicts({}, wrap_kwargs)
)
resampled_obj.loc[out_obj.index] = out_obj.values
return resampled_obj
def applymap(self, apply_func_nb: nb.applymap_nbT, *args,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.applymap_nb`.
## Example
```python-repl
>>> multiply_nb = njit(lambda i, col, a: a ** 2)
>>> df.vbt.applymap(multiply_nb)
a b c
2020-01-01 1.0 25.0 1.0
2020-01-02 4.0 16.0 4.0
2020-01-03 9.0 9.0 9.0
2020-01-04 16.0 4.0 4.0
2020-01-05 25.0 1.0 1.0
```
"""
checks.assert_numba_func(apply_func_nb)
out = nb.applymap_nb(self.to_2d_array(), apply_func_nb, *args)
return self.wrapper.wrap(out, **merge_dicts({}, wrap_kwargs))
def filter(self, filter_func_nb: nb.filter_nbT, *args,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.filter_nb`.
## Example
```python-repl
>>> greater_nb = njit(lambda i, col, a: a > 2)
>>> df.vbt.filter(greater_nb)
a b c
2020-01-01 NaN 5.0 NaN
2020-01-02 NaN 4.0 NaN
2020-01-03 3.0 3.0 3.0
2020-01-04 4.0 NaN NaN
2020-01-05 5.0 NaN NaN
```
"""
checks.assert_numba_func(filter_func_nb)
out = nb.filter_nb(self.to_2d_array(), filter_func_nb, *args)
return self.wrapper.wrap(out, **merge_dicts({}, wrap_kwargs))
def apply_and_reduce(self, apply_func_nb: nb.apply_and_reduce_nbAT, reduce_func_nb: nb.apply_and_reduce_nbRT,
apply_args: tp.Optional[tuple] = None, reduce_args: tp.Optional[tuple] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""See `vectorbt.generic.nb.apply_and_reduce_nb`.
## Example
```python-repl
>>> greater_nb = njit(lambda col, a: a[a > 2])
>>> mean_nb = njit(lambda col, a: np.nanmean(a))
>>> df.vbt.apply_and_reduce(greater_nb, mean_nb)
a 4.0
b 4.0
c 3.0
dtype: float64
```
"""
checks.assert_numba_func(apply_func_nb)
checks.assert_numba_func(reduce_func_nb)
if apply_args is None:
apply_args = ()
if reduce_args is None:
reduce_args = ()
out = nb.apply_and_reduce_nb(self.to_2d_array(), apply_func_nb, apply_args, reduce_func_nb, reduce_args)
wrap_kwargs = merge_dicts(dict(name_or_index='apply_and_reduce'), wrap_kwargs)
return self.wrapper.wrap_reduced(out, **wrap_kwargs)
def reduce(self, reduce_func_nb: tp.Union[nb.flat_reduce_grouped_nbT,
nb.flat_reduce_grouped_to_array_nbT,
nb.reduce_grouped_nbT,
nb.reduce_grouped_to_array_nbT,
nb.reduce_nbT,
nb.reduce_to_array_nbT],
*args, to_array: bool = False, to_idx: bool = False, flatten: bool = False,
order: str = 'C', idx_labeled: bool = True, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeriesFrame[float]:
"""Reduce by column.
See `vectorbt.generic.nb.flat_reduce_grouped_to_array_nb` if grouped, `to_array` is True and `flatten` is True.
See `vectorbt.generic.nb.flat_reduce_grouped_nb` if grouped, `to_array` is False and `flatten` is True.
See `vectorbt.generic.nb.reduce_grouped_to_array_nb` if grouped, `to_array` is True and `flatten` is False.
See `vectorbt.generic.nb.reduce_grouped_nb` if grouped, `to_array` is False and `flatten` is False.
See `vectorbt.generic.nb.reduce_to_array_nb` if not grouped and `to_array` is True.
See `vectorbt.generic.nb.reduce_nb` if not grouped and `to_array` is False.
Set `to_idx` to True if values returned by `reduce_func_nb` are indices/positions.
Set `idx_labeled` to False to return raw positions instead of labels.
## Example
```python-repl
>>> mean_nb = njit(lambda col, a: np.nanmean(a))
>>> df.vbt.reduce(mean_nb)
a 3.0
b 3.0
c 1.8
dtype: float64
>>> argmax_nb = njit(lambda col, a: np.argmax(a))
>>> df.vbt.reduce(argmax_nb, to_idx=True)
a 2020-01-05
b 2020-01-01
c 2020-01-03
dtype: datetime64[ns]
>>> argmax_nb = njit(lambda col, a: np.argmax(a))
>>> df.vbt.reduce(argmax_nb, to_idx=True, idx_labeled=False)
a 4
b 0
c 2
dtype: int64
>>> min_max_nb = njit(lambda col, a: np.array([np.nanmin(a), np.nanmax(a)]))
>>> df.vbt.reduce(min_max_nb, name_or_index=['min', 'max'], to_array=True)
a b c
min 1.0 1.0 1.0
max 5.0 5.0 3.0
>>> group_by = pd.Series(['first', 'first', 'second'], name='group')
>>> df.vbt.reduce(mean_nb, group_by=group_by)
group
first 3.0
second 1.8
dtype: float64
>>> df.vbt.reduce(min_max_nb, name_or_index=['min', 'max'],
... to_array=True, group_by=group_by)
group first second
min 1.0 1.0
max 5.0 3.0
```
"""
checks.assert_numba_func(reduce_func_nb)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
if flatten:
checks.assert_in(order.upper(), ['C', 'F'])
in_c_order = order.upper() == 'C'
if to_array:
out = nb.flat_reduce_grouped_to_array_nb(
self.to_2d_array(), group_lens, in_c_order, reduce_func_nb, *args)
else:
out = nb.flat_reduce_grouped_nb(
self.to_2d_array(), group_lens, in_c_order, reduce_func_nb, *args)
if to_idx:
if in_c_order:
out //= group_lens # flattened in C order
else:
out %= self.wrapper.shape[0] # flattened in F order
else:
if to_array:
out = nb.reduce_grouped_to_array_nb(
self.to_2d_array(), group_lens, reduce_func_nb, *args)
else:
out = nb.reduce_grouped_nb(
self.to_2d_array(), group_lens, reduce_func_nb, *args)
else:
if to_array:
out = nb.reduce_to_array_nb(
self.to_2d_array(), reduce_func_nb, *args)
else:
out = nb.reduce_nb(
self.to_2d_array(), reduce_func_nb, *args)
# Perform post-processing
if to_idx:
nan_mask = np.isnan(out)
if idx_labeled:
out = out.astype(object)
out[~nan_mask] = self.wrapper.index[out[~nan_mask].astype(np.int_)]
else:
out[nan_mask] = -1
out = out.astype(np.int_)
wrap_kwargs = merge_dicts(dict(name_or_index='reduce' if not to_array else None), wrap_kwargs)
return self.wrapper.wrap_reduced(out, group_by=group_by, **wrap_kwargs)
def squeeze_grouped(self, reduce_func_nb: nb.squeeze_grouped_nbT, *args, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Squeeze each group of columns into a single column.
See `vectorbt.generic.nb.squeeze_grouped_nb`.
## Example
```python-repl
>>> group_by = pd.Series(['first', 'first', 'second'], name='group')
>>> mean_nb = njit(lambda i, group, a: np.nanmean(a))
>>> df.vbt.squeeze_grouped(mean_nb, group_by=group_by)
group first second
2020-01-01 3.0 1.0
2020-01-02 3.0 2.0
2020-01-03 3.0 3.0
2020-01-04 3.0 2.0
2020-01-05 3.0 1.0
```
"""
if not self.wrapper.grouper.is_grouped(group_by=group_by):
raise ValueError("Grouping required")
checks.assert_numba_func(reduce_func_nb)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
out = nb.squeeze_grouped_nb(self.to_2d_array(), group_lens, reduce_func_nb, *args)
return self.wrapper.wrap(out, group_by=group_by, **merge_dicts({}, wrap_kwargs))
def flatten_grouped(self, group_by: tp.GroupByLike = None, order: str = 'C',
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Flatten each group of columns.
See `vectorbt.generic.nb.flatten_grouped_nb`.
!!! warning
Make sure that the distribution of group lengths is close to uniform, otherwise
groups with less columns will be filled with NaN and needlessly occupy memory.
## Example
```python-repl
>>> group_by = pd.Series(['first', 'first', 'second'], name='group')
>>> df.vbt.flatten_grouped(group_by=group_by, order='C')
group first second
2020-01-01 1.0 1.0
2020-01-01 5.0 NaN
2020-01-02 2.0 2.0
2020-01-02 4.0 NaN
2020-01-03 3.0 3.0
2020-01-03 3.0 NaN
2020-01-04 4.0 2.0
2020-01-04 2.0 NaN
2020-01-05 5.0 1.0
2020-01-05 1.0 NaN
>>> df.vbt.flatten_grouped(group_by=group_by, order='F')
group first second
2020-01-01 1.0 1.0
2020-01-02 2.0 2.0
2020-01-03 3.0 3.0
2020-01-04 4.0 2.0
2020-01-05 5.0 1.0
2020-01-01 5.0 NaN
2020-01-02 4.0 NaN
2020-01-03 3.0 NaN
2020-01-04 2.0 NaN
2020-01-05 1.0 NaN
```
"""
if not self.wrapper.grouper.is_grouped(group_by=group_by):
raise ValueError("Grouping required")
checks.assert_in(order.upper(), ['C', 'F'])
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
if order.upper() == 'C':
out = nb.flatten_grouped_nb(self.to_2d_array(), group_lens, True)
new_index = index_fns.repeat_index(self.wrapper.index, np.max(group_lens))
else:
out = nb.flatten_grouped_nb(self.to_2d_array(), group_lens, False)
new_index = index_fns.tile_index(self.wrapper.index, np.max(group_lens))
wrap_kwargs = merge_dicts(dict(index=new_index), wrap_kwargs)
return self.wrapper.wrap(out, group_by=group_by, **wrap_kwargs)
def min(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return min of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='min'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.min_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nanmin = np.nanmin
else:
_nanmin = nanmin
return self.wrapper.wrap_reduced(_nanmin(arr, axis=0), **wrap_kwargs)
def max(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return max of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='max'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.max_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nanmax = np.nanmax
else:
_nanmax = nanmax
return self.wrapper.wrap_reduced(_nanmax(arr, axis=0), **wrap_kwargs)
def mean(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return mean of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='mean'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(
nb.mean_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nanmean = np.nanmean
else:
_nanmean = nanmean
return self.wrapper.wrap_reduced(_nanmean(arr, axis=0), **wrap_kwargs)
def median(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return median of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='median'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.median_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nanmedian = np.nanmedian
else:
_nanmedian = nanmedian
return self.wrapper.wrap_reduced(_nanmedian(arr, axis=0), **wrap_kwargs)
def std(self, ddof: int = 1, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return standard deviation of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='std'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.std_reduce_nb, ddof, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nanstd = np.nanstd
else:
_nanstd = nanstd
return self.wrapper.wrap_reduced(_nanstd(arr, ddof=ddof, axis=0), **wrap_kwargs)
def sum(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return sum of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='sum'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.sum_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
arr = self.to_2d_array()
if arr.dtype != int and arr.dtype != float:
# bottleneck can't consume other than that
_nansum = np.nansum
else:
_nansum = nansum
return self.wrapper.wrap_reduced(_nansum(arr, axis=0), **wrap_kwargs)
def count(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return count of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='count', dtype=np.int_), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(nb.count_reduce_nb, group_by=group_by, flatten=True, wrap_kwargs=wrap_kwargs)
return self.wrapper.wrap_reduced(np.sum(~np.isnan(self.to_2d_array()), axis=0), **wrap_kwargs)
def idxmin(self, group_by: tp.GroupByLike = None, order: str = 'C',
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return labeled index of min of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='idxmin'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(
nb.argmin_reduce_nb,
group_by=group_by,
flatten=True,
to_idx=True,
order=order,
wrap_kwargs=wrap_kwargs
)
obj = self.to_2d_array()
out = np.full(obj.shape[1], np.nan, dtype=object)
nan_mask = np.all(np.isnan(obj), axis=0)
out[~nan_mask] = self.wrapper.index[nanargmin(obj[:, ~nan_mask], axis=0)]
return self.wrapper.wrap_reduced(out, **wrap_kwargs)
def idxmax(self, group_by: tp.GroupByLike = None, order: str = 'C',
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Return labeled index of max of non-NaN elements."""
wrap_kwargs = merge_dicts(dict(name_or_index='idxmax'), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(
nb.argmax_reduce_nb,
group_by=group_by,
flatten=True,
to_idx=True,
order=order,
wrap_kwargs=wrap_kwargs
)
obj = self.to_2d_array()
out = np.full(obj.shape[1], np.nan, dtype=object)
nan_mask = np.all(np.isnan(obj), axis=0)
out[~nan_mask] = self.wrapper.index[nanargmax(obj[:, ~nan_mask], axis=0)]
return self.wrapper.wrap_reduced(out, **wrap_kwargs)
def describe(self, percentiles: tp.Optional[tp.ArrayLike] = None, ddof: int = 1,
group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""See `vectorbt.generic.nb.describe_reduce_nb`.
For `percentiles`, see `pd.DataFrame.describe`.
## Example
```python-repl
>>> df.vbt.describe()
a b c
count 5.000000 5.000000 5.00000
mean 3.000000 3.000000 1.80000
std 1.581139 1.581139 0.83666
min 1.000000 1.000000 1.00000
25% 2.000000 2.000000 1.00000
50% 3.000000 3.000000 2.00000
75% 4.000000 4.000000 2.00000
max 5.000000 5.000000 3.00000
```
"""
if percentiles is not None:
percentiles = reshape_fns.to_1d(percentiles, raw=True)
else:
percentiles = np.array([0.25, 0.5, 0.75])
percentiles = percentiles.tolist()
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.unique(percentiles)
perc_formatted = pd.io.formats.format.format_percentiles(percentiles)
index = pd.Index(['count', 'mean', 'std', 'min', *perc_formatted, 'max'])
wrap_kwargs = merge_dicts(dict(name_or_index=index), wrap_kwargs)
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.reduce(
nb.describe_reduce_nb, percentiles, ddof,
group_by=group_by, flatten=True, to_array=True,
wrap_kwargs=wrap_kwargs)
return self.reduce(
nb.describe_reduce_nb, percentiles, ddof,
to_array=True, wrap_kwargs=wrap_kwargs)
def drawdown(self, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Drawdown series."""
out = self.to_2d_array() / nb.expanding_max_nb(self.to_2d_array()) - 1
return self.wrapper.wrap(out, **merge_dicts({}, wrap_kwargs))
@cached_property
def drawdowns(self) -> Drawdowns:
"""`GenericAccessor.get_drawdowns` with default arguments."""
return self.get_drawdowns()
@cached_method
def get_drawdowns(self, group_by: tp.GroupByLike = None, **kwargs) -> Drawdowns:
"""Generate drawdown records.
See `vectorbt.generic.drawdowns.Drawdowns`."""
if group_by is None:
group_by = self.wrapper.grouper.group_by
return Drawdowns.from_ts(self._obj, freq=self.wrapper.freq, group_by=group_by, **kwargs)
def to_mapped_array(self, dropna: bool = True, group_by: tp.GroupByLike = None, **kwargs) -> MappedArray:
"""Convert this object into an instance of `vectorbt.records.mapped_array.MappedArray`."""
mapped_arr = reshape_fns.to_2d(self._obj, raw=True).flatten(order='F')
col_arr = np.repeat(np.arange(self.wrapper.shape_2d[1]), self.wrapper.shape_2d[0])
idx_arr = np.tile(np.arange(self.wrapper.shape_2d[0]), self.wrapper.shape_2d[1])
if dropna:
not_nan_mask = ~np.isnan(mapped_arr)
mapped_arr = mapped_arr[not_nan_mask]
col_arr = col_arr[not_nan_mask]
idx_arr = idx_arr[not_nan_mask]
if group_by is None:
group_by = self.wrapper.grouper.group_by
return MappedArray(self.wrapper, mapped_arr, col_arr, idx_arr=idx_arr, **kwargs).regroup(group_by)
# ############# Transforming ############# #
def transform(self, transformer: TransformerT, wrap_kwargs: tp.KwargsLike = None, **kwargs) -> tp.SeriesFrame:
"""Transform using a transformer.
A transformer can be any class instance that has `transform` and `fit_transform` methods,
ideally subclassing `sklearn.base.TransformerMixin` and `sklearn.base.BaseEstimator`.
Will fit `transformer` if not fitted.
`**kwargs` are passed to the `transform` or `fit_transform` method.
## Example
```python-repl
>>> from sklearn.preprocessing import MinMaxScaler
>>> df.vbt.transform(MinMaxScaler((-1, 1)))
a b c
2020-01-01 -1.0 1.0 -1.0
2020-01-02 -0.5 0.5 0.0
2020-01-03 0.0 0.0 1.0
2020-01-04 0.5 -0.5 0.0
2020-01-05 1.0 -1.0 -1.0
>>> fitted_scaler = MinMaxScaler((-1, 1)).fit(np.array([[2], [4]]))
>>> df.vbt.transform(fitted_scaler)
a b c
2020-01-01 -2.0 2.0 -2.0
2020-01-02 -1.0 1.0 -1.0
2020-01-03 0.0 0.0 0.0
2020-01-04 1.0 -1.0 -1.0
2020-01-05 2.0 -2.0 -2.0
```"""
is_fitted = True
try:
check_is_fitted(transformer)
except NotFittedError:
is_fitted = False
if not is_fitted:
result = transformer.fit_transform(self.to_2d_array(), **kwargs)
else:
result = transformer.transform(self.to_2d_array(), **kwargs)
return self.wrapper.wrap(result, **merge_dicts({}, wrap_kwargs))
def zscore(self, **kwargs) -> tp.SeriesFrame:
"""Compute z-score using `sklearn.preprocessing.StandardScaler`."""
return self.scale(with_mean=True, with_std=True, **kwargs)
# ############# Splitting ############# #
def split(self, splitter: SplitterT, stack_kwargs: tp.KwargsLike = None, keys: tp.Optional[tp.IndexLike] = None,
plot: bool = False, trace_names: tp.TraceNames = None, heatmap_kwargs: tp.KwargsLike = None,
**kwargs) -> SplitOutputT:
"""Split using a splitter.
Returns a tuple of tuples, each corresponding to a set and composed of a dataframe and split indexes.
A splitter can be any class instance that has `split` method, ideally subclassing
`sklearn.model_selection.BaseCrossValidator` or `vectorbt.generic.splitters.BaseSplitter`.
`heatmap_kwargs` are passed to `vectorbt.generic.plotting.Heatmap` if `plot` is True,
can be a dictionary or a list per set, for example, to set trace name for each set ('train', 'test', etc.).
`**kwargs` are passed to the `split` method.
!!! note
The datetime-like format of the index will be lost as result of this operation.
Make sure to store the index metadata such as frequency information beforehand.
## Example
```python-repl
>>> from sklearn.model_selection import TimeSeriesSplit
>>> splitter = TimeSeriesSplit(n_splits=3)
>>> (train_df, train_indexes), (test_df, test_indexes) = sr.vbt.split(splitter)
>>> train_df
split_idx 0 1 2
0 0.0 0.0 0
1 1.0 1.0 1
2 2.0 2.0 2
3 3.0 3.0 3
4 NaN 4.0 4
5 NaN 5.0 5
6 NaN NaN 6
7 NaN NaN 7
>>> train_indexes
[DatetimeIndex(['2020-01-01', ..., '2020-01-04'], dtype='datetime64[ns]', name='split_0'),
DatetimeIndex(['2020-01-01', ..., '2020-01-06'], dtype='datetime64[ns]', name='split_1'),
DatetimeIndex(['2020-01-01', ..., '2020-01-08'], dtype='datetime64[ns]', name='split_2')]
>>> test_df
split_idx 0 1 2
0 4 6 8
1 5 7 9
>>> test_indexes
[DatetimeIndex(['2020-01-05', '2020-01-06'], dtype='datetime64[ns]', name='split_0'),
DatetimeIndex(['2020-01-07', '2020-01-08'], dtype='datetime64[ns]', name='split_1'),
DatetimeIndex(['2020-01-09', '2020-01-10'], dtype='datetime64[ns]', name='split_2')]
>>> sr.vbt.split(splitter, plot=True, trace_names=['train', 'test'])
```

"""
total_range_sr = pd.Series(np.arange(len(self.wrapper.index)), index=self.wrapper.index)
set_ranges = list(splitter.split(total_range_sr, **kwargs))
if len(set_ranges) == 0:
raise ValueError("No splits were generated")
idxs_by_split_and_set = list(zip(*set_ranges))
results = []
if keys is not None:
if not isinstance(keys, pd.Index):
keys = pd.Index(keys)
for idxs_by_split in idxs_by_split_and_set:
split_dfs = []
split_indexes = []
for split_idx, idxs in enumerate(idxs_by_split):
split_dfs.append(self._obj.iloc[idxs].reset_index(drop=True))
if keys is not None:
split_name = keys[split_idx]
else:
split_name = 'split_' + str(split_idx)
split_indexes.append(pd.Index(self.wrapper.index[idxs], name=split_name))
set_df = pd.concat(split_dfs, axis=1).reset_index(drop=True)
if keys is not None:
split_columns = keys
else:
split_columns = pd.Index(np.arange(len(split_indexes)), name='split_idx')
split_columns = index_fns.repeat_index(split_columns, len(self.wrapper.columns))
if stack_kwargs is None:
stack_kwargs = {}
set_df = set_df.vbt.stack_index(split_columns, **stack_kwargs)
results.append((set_df, split_indexes))
if plot: # pragma: no cover
if trace_names is None:
trace_names = list(range(len(results)))
if isinstance(trace_names, str):
trace_names = [trace_names]
nan_df = pd.DataFrame(np.nan, columns=pd.RangeIndex(stop=len(results[0][1])), index=self.wrapper.index)
fig = None
for i, (_, split_indexes) in enumerate(results):
heatmap_df = nan_df.copy()
for j in range(len(split_indexes)):
heatmap_df.loc[split_indexes[j], j] = i
_heatmap_kwargs = resolve_dict(heatmap_kwargs, i=i)
fig = heatmap_df.vbt.ts_heatmap(fig=fig, **merge_dicts(
dict(
trace_kwargs=dict(
showscale=False,
name=str(trace_names[i]),
showlegend=True
)
),
_heatmap_kwargs
))
if fig.layout.colorway is not None:
colorway = fig.layout.colorway
else:
colorway = fig.layout.template.layout.colorway
if 'colorscale' not in _heatmap_kwargs:
fig.data[-1].update(colorscale=[colorway[i], colorway[i]])
return fig
if len(results) == 1:
return results[0]
return tuple(results)
def range_split(self, **kwargs) -> SplitOutputT:
"""Split using `GenericAccessor.split` on `vectorbt.generic.splitters.RangeSplitter`.
## Example
```python-repl
>>> range_df, range_indexes = sr.vbt.range_split(n=2)
>>> range_df
split_idx 0 1
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> range_indexes
[DatetimeIndex(['2020-01-01', ..., '2020-01-05'], dtype='datetime64[ns]', name='split_0'),
DatetimeIndex(['2020-01-06', ..., '2020-01-10'], dtype='datetime64[ns]', name='split_1')]
>>> range_df, range_indexes = sr.vbt.range_split(range_len=4)
>>> range_df
split_idx 0 1 2 3 4 5 6
0 0 1 2 3 4 5 6
1 1 2 3 4 5 6 7
2 2 3 4 5 6 7 8
3 3 4 5 6 7 8 9
>>> range_indexes
[DatetimeIndex(['2020-01-01', ..., '2020-01-04'], dtype='datetime64[ns]', name='split_0'),
DatetimeIndex(['2020-01-02', ..., '2020-01-05'], dtype='datetime64[ns]', name='split_1'),
DatetimeIndex(['2020-01-03', ..., '2020-01-06'], dtype='datetime64[ns]', name='split_2'),
DatetimeIndex(['2020-01-04', ..., '2020-01-07'], dtype='datetime64[ns]', name='split_3'),
DatetimeIndex(['2020-01-05', ..., '2020-01-08'], dtype='datetime64[ns]', name='split_4'),
DatetimeIndex(['2020-01-06', ..., '2020-01-09'], dtype='datetime64[ns]', name='split_5'),
DatetimeIndex(['2020-01-07', ..., '2020-01-10'], dtype='datetime64[ns]', name='split_6')]
>>> range_df, range_indexes = sr.vbt.range_split(start_idxs=[0, 2], end_idxs=[5, 7])
>>> range_df
split_idx 0 1
0 0 2
1 1 3
2 2 4
3 3 5
4 4 6
5 5 7
>>> range_indexes
[DatetimeIndex(['2020-01-01', ..., '2020-01-06'], dtype='datetime64[ns]', name='split_0'),
DatetimeIndex(['2020-01-03', ..., '2020-01-08'], dtype='datetime64[ns]', name='split_1')]
>>> range_df, range_indexes = sr.vbt.range_split(start_idxs=[0], end_idxs=[2, 3, 4])
>>> range_df
split_idx 0 1 2
0 0.0 0.0 0
1 1.0 1.0 1
2 2.0 2.0 2
3 NaN 3.0 3
4 NaN NaN 4
>>> range_indexes
[DatetimeIndex(['2020-01-01', ..., '2020-01-03'], dtype='datetime64[ns]', name='split_0'),
DatetimeIndex(['2020-01-01', ..., '2020-01-04'], dtype='datetime64[ns]', name='split_1'),
DatetimeIndex(['2020-01-01', ..., '2020-01-05'], dtype='datetime64[ns]', name='split_2')]
>>> range_df, range_indexes = sr.vbt.range_split(
... start_idxs=pd.Index(['2020-01-01', '2020-01-02']),
... end_idxs=pd.Index(['2020-01-04', '2020-01-05'])
... )
>>> range_df
split_idx 0 1
0 0 1
1 1 2
2 2 3
3 3 4
>>> range_indexes
[DatetimeIndex(['2020-01-01', ..., '2020-01-04'], dtype='datetime64[ns]', name='split_0'),
DatetimeIndex(['2020-01-02', ..., '2020-01-05'], dtype='datetime64[ns]', name='split_1')]
>>> sr.vbt.range_split(
... start_idxs=pd.Index(['2020-01-01', '2020-01-02', '2020-01-01']),
... end_idxs=pd.Index(['2020-01-08', '2020-01-04', '2020-01-07']),
... plot=True
... )
```

"""
return self.split(RangeSplitter(), **kwargs)
def rolling_split(self, **kwargs) -> SplitOutputT:
"""Split using `GenericAccessor.split` on `vectorbt.generic.splitters.RollingSplitter`.
## Example
```python-repl
>>> train_set, valid_set, test_set = sr.vbt.rolling_split(
... window_len=5, set_lens=(1, 1), left_to_right=False)
>>> train_set[0]
split_idx 0 1 2 3 4 5
0 0 1 2 3 4 5
1 1 2 3 4 5 6
2 2 3 4 5 6 7
>>> valid_set[0]
split_idx 0 1 2 3 4 5
0 3 4 5 6 7 8
>>> test_set[0]
split_idx 0 1 2 3 4 5
0 4 5 6 7 8 9
>>> sr.vbt.rolling_split(
... window_len=5, set_lens=(1, 1), left_to_right=False,
... plot=True, trace_names=['train', 'valid', 'test'])
```

"""
return self.split(RollingSplitter(), **kwargs)
def expanding_split(self, **kwargs) -> SplitOutputT:
"""Split using `GenericAccessor.split` on `vectorbt.generic.splitters.ExpandingSplitter`.
## Example
```python-repl
>>> train_set, valid_set, test_set = sr.vbt.expanding_split(
... n=5, set_lens=(1, 1), min_len=3, left_to_right=False)
>>> train_set[0]
split_idx 0 1 2 3 4 5 6 7
0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0
1 NaN 1.0 1.0 1.0 1.0 1.0 1.0 1
2 NaN NaN 2.0 2.0 2.0 2.0 2.0 2
3 NaN NaN NaN 3.0 3.0 3.0 3.0 3
4 NaN NaN NaN NaN 4.0 4.0 4.0 4
5 NaN NaN NaN NaN NaN 5.0 5.0 5
6 NaN NaN NaN NaN NaN NaN 6.0 6
7 NaN NaN NaN NaN NaN NaN NaN 7
>>> valid_set[0]
split_idx 0 1 2 3 4 5 6 7
0 1 2 3 4 5 6 7 8
>>> test_set[0]
split_idx 0 1 2 3 4 5 6 7
0 2 3 4 5 6 7 8 9
>>> sr.vbt.expanding_split(
... set_lens=(1, 1), min_len=3, left_to_right=False,
... plot=True, trace_names=['train', 'valid', 'test'])
```

"""
return self.split(ExpandingSplitter(), **kwargs)
# ############# Plotting ############# #
def plot(self,
trace_names: tp.TraceNames = None,
x_labels: tp.Optional[tp.Labels] = None,
return_fig: bool = True,
**kwargs) -> tp.Union[tp.BaseFigure, plotting.Scatter]: # pragma: no cover
"""Create `vectorbt.generic.plotting.Scatter` and return the figure.
## Example
```python-repl
>>> df.vbt.plot()
```

"""
if x_labels is None:
x_labels = self.wrapper.index
if trace_names is None:
if self.is_frame() or (self.is_series() and self.wrapper.name is not None):
trace_names = self.wrapper.columns
scatter = plotting.Scatter(
data=self.to_2d_array(),
trace_names=trace_names,
x_labels=x_labels,
**kwargs
)
if return_fig:
return scatter.fig
return scatter
def lineplot(self, **kwargs) -> tp.Union[tp.BaseFigure, plotting.Scatter]: # pragma: no cover
"""`GenericAccessor.plot` with 'lines' mode.
## Example
```python-repl
>>> df.vbt.lineplot()
```

"""
return self.plot(**merge_dicts(dict(trace_kwargs=dict(mode='lines')), kwargs))
def scatterplot(self, **kwargs) -> tp.Union[tp.BaseFigure, plotting.Scatter]: # pragma: no cover
"""`GenericAccessor.plot` with 'markers' mode.
## Example
```python-repl
>>> df.vbt.scatterplot()
```

"""
return self.plot(**merge_dicts(dict(trace_kwargs=dict(mode='markers')), kwargs))
def barplot(self,
trace_names: tp.TraceNames = None,
x_labels: tp.Optional[tp.Labels] = None,
return_fig: bool = True,
**kwargs) -> tp.Union[tp.BaseFigure, plotting.Bar]: # pragma: no cover
"""Create `vectorbt.generic.plotting.Bar` and return the figure.
## Example
```python-repl
>>> df.vbt.barplot()
```

"""
if x_labels is None:
x_labels = self.wrapper.index
if trace_names is None:
if self.is_frame() or (self.is_series() and self.wrapper.name is not None):
trace_names = self.wrapper.columns
bar = plotting.Bar(
data=self.to_2d_array(),
trace_names=trace_names,
x_labels=x_labels,
**kwargs
)
if return_fig:
return bar.fig
return bar
def histplot(self,
trace_names: tp.TraceNames = None,
group_by: tp.GroupByLike = None,
return_fig: bool = True,
**kwargs) -> tp.Union[tp.BaseFigure, plotting.Histogram]: # pragma: no cover
"""Create `vectorbt.generic.plotting.Histogram` and return the figure.
## Example
```python-repl
>>> df.vbt.histplot()
```

"""
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.flatten_grouped(group_by=group_by).vbt.histplot(trace_names=trace_names, **kwargs)
if trace_names is None:
if self.is_frame() or (self.is_series() and self.wrapper.name is not None):
trace_names = self.wrapper.columns
hist = plotting.Histogram(
data=self.to_2d_array(),
trace_names=trace_names,
**kwargs
)
if return_fig:
return hist.fig
return hist
def boxplot(self,
trace_names: tp.TraceNames = None,
group_by: tp.GroupByLike = None,
return_fig: bool = True,
**kwargs) -> tp.Union[tp.BaseFigure, plotting.Box]: # pragma: no cover
"""Create `vectorbt.generic.plotting.Box` and return the figure.
## Example
```python-repl
>>> df.vbt.boxplot()
```

"""
if self.wrapper.grouper.is_grouped(group_by=group_by):
return self.flatten_grouped(group_by=group_by).vbt.boxplot(trace_names=trace_names, **kwargs)
if trace_names is None:
if self.is_frame() or (self.is_series() and self.wrapper.name is not None):
trace_names = self.wrapper.columns
box = plotting.Box(
data=self.to_2d_array(),
trace_names=trace_names,
**kwargs
)
if return_fig:
return box.fig
return box
class GenericSRAccessor(GenericAccessor, BaseSRAccessor):
"""Accessor on top of data of any type. For Series only.
Accessible through `pd.Series.vbt`."""
def __init__(self, obj: tp.Series, **kwargs) -> None:
if not checks.is_pandas(obj): # parent accessor
obj = obj._obj
BaseSRAccessor.__init__(self, obj, **kwargs)
GenericAccessor.__init__(self, obj, **kwargs)
def plot_against(self,
other: tp.ArrayLike,
trace_kwargs: tp.KwargsLike = None,
other_trace_kwargs: tp.Union[str, tp.KwargsLike] = None,
pos_trace_kwargs: tp.KwargsLike = None,
neg_trace_kwargs: tp.KwargsLike = None,
hidden_trace_kwargs: tp.KwargsLike = None,
add_trace_kwargs: tp.KwargsLike = None,
fig: tp.Optional[tp.BaseFigure] = None,
**layout_kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot Series as a line against another line.
Args:
other (array_like): Second array. Will broadcast.
trace_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Scatter`.
other_trace_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Scatter` for `other`.
Set to 'hidden' to hide.
pos_trace_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Scatter` for positive line.
neg_trace_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Scatter` for negative line.
hidden_trace_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Scatter` for hidden lines.
add_trace_kwargs (dict): Keyword arguments passed to `add_trace`.
fig (Figure or FigureWidget): Figure to add traces to.
**layout_kwargs: Keyword arguments for layout.
## Example
```python-repl
>>> df['a'].vbt.plot_against(df['b'])
```

"""
if trace_kwargs is None:
trace_kwargs = {}
if other_trace_kwargs is None:
other_trace_kwargs = {}
if pos_trace_kwargs is None:
pos_trace_kwargs = {}
if neg_trace_kwargs is None:
neg_trace_kwargs = {}
if hidden_trace_kwargs is None:
hidden_trace_kwargs = {}
obj, other = reshape_fns.broadcast(self._obj, other, columns_from='keep')
checks.assert_type(other, pd.Series)
if fig is None:
fig = make_figure()
fig.update_layout(**layout_kwargs)
# TODO: Using masks feels hacky
pos_mask = self._obj > other
if pos_mask.any():
# Fill positive area
pos_obj = self._obj.copy()
pos_obj[~pos_mask] = other[~pos_mask]
other.vbt.plot(
trace_kwargs=merge_dicts(dict(
line=dict(
color='rgba(0, 0, 0, 0)',
width=0
),
opacity=0,
hoverinfo='skip',
showlegend=False,
name=None,
), hidden_trace_kwargs),
add_trace_kwargs=add_trace_kwargs,
fig=fig
)
pos_obj.vbt.plot(
trace_kwargs=merge_dicts(dict(
fillcolor='rgba(0, 128, 0, 0.3)',
line=dict(
color='rgba(0, 0, 0, 0)',
width=0
),
opacity=0,
fill='tonexty',
connectgaps=False,
hoverinfo='skip',
showlegend=False,
name=None
), pos_trace_kwargs),
add_trace_kwargs=add_trace_kwargs,
fig=fig
)
neg_mask = self._obj < other
if neg_mask.any():
# Fill negative area
neg_obj = self._obj.copy()
neg_obj[~neg_mask] = other[~neg_mask]
other.vbt.plot(
trace_kwargs=merge_dicts(dict(
line=dict(
color='rgba(0, 0, 0, 0)',
width=0
),
opacity=0,
hoverinfo='skip',
showlegend=False,
name=None
), hidden_trace_kwargs),
add_trace_kwargs=add_trace_kwargs,
fig=fig
)
neg_obj.vbt.plot(
trace_kwargs=merge_dicts(dict(
line=dict(
color='rgba(0, 0, 0, 0)',
width=0
),
fillcolor='rgba(255, 0, 0, 0.3)',
opacity=0,
fill='tonexty',
connectgaps=False,
hoverinfo='skip',
showlegend=False,
name=None
), neg_trace_kwargs),
add_trace_kwargs=add_trace_kwargs,
fig=fig
)
# Plot main traces
self.plot(trace_kwargs=trace_kwargs, add_trace_kwargs=add_trace_kwargs, fig=fig)
if other_trace_kwargs == 'hidden':
other_trace_kwargs = dict(
line=dict(
color='rgba(0, 0, 0, 0)',
width=0
),
opacity=0.,
hoverinfo='skip',
showlegend=False,
name=None
)
other.vbt.plot(trace_kwargs=other_trace_kwargs, add_trace_kwargs=add_trace_kwargs, fig=fig)
return fig
def overlay_with_heatmap(self,
other: tp.ArrayLike,
trace_kwargs: tp.KwargsLike = None,
heatmap_kwargs: tp.KwargsLike = None,
add_trace_kwargs: tp.KwargsLike = None,
fig: tp.Optional[tp.BaseFigure] = None,
**layout_kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot Series as a line and overlays it with a heatmap.
Args:
other (array_like): Second array. Will broadcast.
trace_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Scatter`.
heatmap_kwargs (dict): Keyword arguments passed to `GenericDFAccessor.heatmap`.
add_trace_kwargs (dict): Keyword arguments passed to `add_trace`.
fig (Figure or FigureWidget): Figure to add traces to.
**layout_kwargs: Keyword arguments for layout.
## Example
```python-repl
>>> df['a'].vbt.overlay_with_heatmap(df['b'])
```

"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
if trace_kwargs is None:
trace_kwargs = {}
if heatmap_kwargs is None:
heatmap_kwargs = {}
if add_trace_kwargs is None:
add_trace_kwargs = {}
obj, other = reshape_fns.broadcast(self._obj, other, columns_from='keep')
checks.assert_type(other, pd.Series)
if fig is None:
fig = make_subplots(specs=[[{"secondary_y": True}]])
if 'width' in plotting_cfg['layout']:
fig.update_layout(width=plotting_cfg['layout']['width'] + 100)
fig.update_layout(**layout_kwargs)
other.vbt.ts_heatmap(**heatmap_kwargs, add_trace_kwargs=add_trace_kwargs, fig=fig)
self.plot(
trace_kwargs=merge_dicts(dict(line=dict(color=plotting_cfg['color_schema']['blue'])), trace_kwargs),
add_trace_kwargs=merge_dicts(dict(secondary_y=True), add_trace_kwargs),
fig=fig
)
return fig
def heatmap(self,
x_level: tp.Optional[tp.Level] = None,
y_level: tp.Optional[tp.Level] = None,
symmetric: bool = False,
sort: bool = True,
x_labels: tp.Optional[tp.Labels] = None,
y_labels: tp.Optional[tp.Labels] = None,
slider_level: tp.Optional[tp.Level] = None,
active: int = 0,
slider_labels: tp.Optional[tp.Labels] = None,
return_fig: bool = True,
fig: tp.Optional[tp.BaseFigure] = None,
**kwargs) -> tp.Union[tp.BaseFigure, plotting.Heatmap]: # pragma: no cover
"""Create a heatmap figure based on object's multi-index and values.
If index is not a multi-index, converts Series into a DataFrame and calls `GenericDFAccessor.heatmap`.
If multi-index contains more than two levels or you want them in specific order,
pass `x_level` and `y_level`, each (`int` if index or `str` if name) corresponding
to an axis of the heatmap. Optionally, pass `slider_level` to use a level as a slider.
Creates `vectorbt.generic.plotting.Heatmap` and returns the figure.
## Example
```python-repl
>>> multi_index = pd.MultiIndex.from_tuples([
... (1, 1),
... (2, 2),
... (3, 3)
... ])
>>> sr = pd.Series(np.arange(len(multi_index)), index=multi_index)
>>> sr
1 1 0
2 2 1
3 3 2
dtype: int64
>>> sr.vbt.heatmap()
```

Using one level as a slider:
```python-repl
>>> multi_index = pd.MultiIndex.from_tuples([
... (1, 1, 1),
... (1, 2, 2),
... (1, 3, 3),
... (2, 3, 3),
... (2, 2, 2),
... (2, 1, 1)
... ])
>>> sr = pd.Series(np.arange(len(multi_index)), index=multi_index)
>>> sr
1 1 1 0
2 2 1
3 3 2
2 3 3 3
2 2 4
1 1 5
dtype: int64
>>> sr.vbt.heatmap(slider_level=0)
```

"""
if not isinstance(self.wrapper.index, pd.MultiIndex):
return self._obj.to_frame().vbt.heatmap(
x_labels=x_labels, y_labels=y_labels,
return_fig=return_fig, fig=fig, **kwargs)
(x_level, y_level), (slider_level,) = index_fns.pick_levels(
self.wrapper.index,
required_levels=(x_level, y_level),
optional_levels=(slider_level,)
)
x_level_vals = self.wrapper.index.get_level_values(x_level)
y_level_vals = self.wrapper.index.get_level_values(y_level)
x_name = x_level_vals.name if x_level_vals.name is not None else 'x'
y_name = y_level_vals.name if y_level_vals.name is not None else 'y'
kwargs = merge_dicts(dict(
trace_kwargs=dict(
hovertemplate=f"{x_name}: %{{x}}<br>" +
f"{y_name}: %{{y}}<br>" +
"value: %{z}<extra></extra>"
),
xaxis_title=x_level_vals.name,
yaxis_title=y_level_vals.name
), kwargs)
if slider_level is None:
# No grouping
df = self.unstack_to_df(
index_levels=y_level, column_levels=x_level,
symmetric=symmetric, sort=sort
)
return df.vbt.heatmap(x_labels=x_labels, y_labels=y_labels, fig=fig, return_fig=return_fig, **kwargs)
# Requires grouping
# See https://plotly.com/python/sliders/
if not return_fig:
raise ValueError("Cannot use return_fig=False and slider_level simultaneously")
_slider_labels = []
for i, (name, group) in enumerate(self._obj.groupby(level=slider_level)):
if slider_labels is not None:
name = slider_labels[i]
_slider_labels.append(name)
df = group.vbt.unstack_to_df(
index_levels=y_level, column_levels=x_level,
symmetric=symmetric, sort=sort
)
if x_labels is None:
x_labels = df.columns
if y_labels is None:
y_labels = df.index
_kwargs = merge_dicts(dict(
trace_kwargs=dict(
name=str(name) if name is not None else None,
visible=False
),
), kwargs)
default_size = fig is None and 'height' not in _kwargs
fig = plotting.Heatmap(
data=df.vbt.to_2d_array(),
x_labels=x_labels,
y_labels=y_labels,
fig=fig,
**_kwargs
).fig
if default_size:
fig.layout['height'] += 100 # slider takes up space
fig.data[active].visible = True
steps = []
for i in range(len(fig.data)):
step = dict(
method="update",
args=[{"visible": [False] * len(fig.data)}, {}],
label=str(_slider_labels[i]) if _slider_labels[i] is not None else None
)
step["args"][0]["visible"][i] = True
steps.append(step)
prefix = f'{self.wrapper.index.names[slider_level]}: ' \
if self.wrapper.index.names[slider_level] is not None else None
sliders = [dict(
active=active,
currentvalue={"prefix": prefix},
pad={"t": 50},
steps=steps
)]
fig.update_layout(
sliders=sliders
)
return fig
def ts_heatmap(self, **kwargs) -> tp.Union[tp.BaseFigure, plotting.Heatmap]: # pragma: no cover
"""Heatmap of time-series data."""
return self._obj.to_frame().vbt.ts_heatmap(**kwargs)
def volume(self,
x_level: tp.Optional[tp.Level] = None,
y_level: tp.Optional[tp.Level] = None,
z_level: tp.Optional[tp.Level] = None,
x_labels: tp.Optional[tp.Labels] = None,
y_labels: tp.Optional[tp.Labels] = None,
z_labels: tp.Optional[tp.Labels] = None,
slider_level: tp.Optional[tp.Level] = None,
slider_labels: tp.Optional[tp.Labels] = None,
active: int = 0,
scene_name: str = 'scene',
fillna: tp.Optional[tp.Number] = None,
fig: tp.Optional[tp.BaseFigure] = None,
return_fig: bool = True,
**kwargs) -> tp.Union[tp.BaseFigure, plotting.Volume]: # pragma: no cover
"""Create a 3D volume figure based on object's multi-index and values.
If multi-index contains more than three levels or you want them in specific order, pass
`x_level`, `y_level`, and `z_level`, each (`int` if index or `str` if name) corresponding
to an axis of the volume. Optionally, pass `slider_level` to use a level as a slider.
Creates `vectorbt.generic.plotting.Volume` and returns the figure.
## Example
```python-repl
>>> multi_index = pd.MultiIndex.from_tuples([
... (1, 1, 1),
... (2, 2, 2),
... (3, 3, 3)
... ])
>>> sr = pd.Series(np.arange(len(multi_index)), index=multi_index)
>>> sr
1 1 1 0
2 2 2 1
3 3 3 2
dtype: int64
>>> sr.vbt.volume().show()
```

"""
(x_level, y_level, z_level), (slider_level,) = index_fns.pick_levels(
self.wrapper.index,
required_levels=(x_level, y_level, z_level),
optional_levels=(slider_level,)
)
x_level_vals = self.wrapper.index.get_level_values(x_level)
y_level_vals = self.wrapper.index.get_level_values(y_level)
z_level_vals = self.wrapper.index.get_level_values(z_level)
# Labels are just unique level values
if x_labels is None:
x_labels = np.unique(x_level_vals)
if y_labels is None:
y_labels = np.unique(y_level_vals)
if z_labels is None:
z_labels = np.unique(z_level_vals)
x_name = x_level_vals.name if x_level_vals.name is not None else 'x'
y_name = y_level_vals.name if y_level_vals.name is not None else 'y'
z_name = z_level_vals.name if z_level_vals.name is not None else 'z'
def_kwargs = dict()
def_kwargs['trace_kwargs'] = dict(
hovertemplate=f"{x_name}: %{{x}}<br>" +
f"{y_name}: %{{y}}<br>" +
f"{z_name}: %{{z}}<br>" +
"value: %{value}<extra></extra>"
)
def_kwargs[scene_name] = dict(
xaxis_title=x_level_vals.name,
yaxis_title=y_level_vals.name,
zaxis_title=z_level_vals.name
)
def_kwargs['scene_name'] = scene_name
kwargs = merge_dicts(def_kwargs, kwargs)
contains_nan = False
if slider_level is None:
# No grouping
v = self.unstack_to_array(levels=(x_level, y_level, z_level))
if fillna is not None:
v = np.nan_to_num(v, nan=fillna)
if np.isnan(v).any():
contains_nan = True
volume = plotting.Volume(
data=v,
x_labels=x_labels,
y_labels=y_labels,
z_labels=z_labels,
fig=fig,
**kwargs
)
if return_fig:
fig = volume.fig
else:
fig = volume
else:
# Requires grouping
# See https://plotly.com/python/sliders/
if not return_fig:
raise ValueError("Cannot use return_fig=False and slider_level simultaneously")
_slider_labels = []
for i, (name, group) in enumerate(self._obj.groupby(level=slider_level)):
if slider_labels is not None:
name = slider_labels[i]
_slider_labels.append(name)
v = group.vbt.unstack_to_array(levels=(x_level, y_level, z_level))
if fillna is not None:
v = np.nan_to_num(v, nan=fillna)
if np.isnan(v).any():
contains_nan = True
_kwargs = merge_dicts(dict(
trace_kwargs=dict(
name=str(name) if name is not None else None,
visible=False
)
), kwargs)
default_size = fig is None and 'height' not in _kwargs
fig = plotting.Volume(
data=v,
x_labels=x_labels,
y_labels=y_labels,
z_labels=z_labels,
fig=fig,
**_kwargs
).fig
if default_size:
fig.layout['height'] += 100 # slider takes up space
fig.data[active].visible = True
steps = []
for i in range(len(fig.data)):
step = dict(
method="update",
args=[{"visible": [False] * len(fig.data)}, {}],
label=str(_slider_labels[i]) if _slider_labels[i] is not None else None
)
step["args"][0]["visible"][i] = True
steps.append(step)
prefix = f'{self.wrapper.index.names[slider_level]}: ' \
if self.wrapper.index.names[slider_level] is not None else None
sliders = [dict(
active=active,
currentvalue={"prefix": prefix},
pad={"t": 50},
steps=steps
)]
fig.update_layout(
sliders=sliders
)
if contains_nan:
warnings.warn("Data contains NaNs. Use `fillna` argument or "
"`show` method in case of visualization issues.", stacklevel=2)
return fig
def qqplot(self,
sparams: tp.Union[tp.Iterable, tuple, None] = (),
dist: str = 'norm',
plot_line: bool = True,
line_shape_kwargs: tp.KwargsLike = None,
xref: str = 'x',
yref: str = 'y',
fig: tp.Optional[tp.BaseFigure] = None,
**kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot probability plot using `scipy.stats.probplot`.
`**kwargs` are passed to `GenericAccessor.scatterplot`.
## Example
```python-repl
>>> pd.Series(np.random.standard_normal(100)).vbt.qqplot()
```

"""
qq = stats.probplot(self._obj, sparams=sparams, dist=dist)
fig = pd.Series(qq[0][1], index=qq[0][0]).vbt.scatterplot(fig=fig, **kwargs)
if plot_line:
if line_shape_kwargs is None:
line_shape_kwargs = {}
x = np.array([qq[0][0][0], qq[0][0][-1]])
y = qq[1][1] + qq[1][0] * x
fig.add_shape(**merge_dicts(dict(
type="line",
xref=xref,
yref=yref,
x0=x[0],
y0=y[0],
x1=x[1],
y1=y[1],
line=dict(
color='red'
)
), line_shape_kwargs))
return fig
class GenericDFAccessor(GenericAccessor, BaseDFAccessor):
"""Accessor on top of data of any type. For DataFrames only.
Accessible through `pd.DataFrame.vbt`."""
def __init__(self, obj: tp.Frame, **kwargs) -> None:
if not checks.is_pandas(obj): # parent accessor
obj = obj._obj
BaseDFAccessor.__init__(self, obj, **kwargs)
GenericAccessor.__init__(self, obj, **kwargs)
def heatmap(self,
x_labels: tp.Optional[tp.Labels] = None,
y_labels: tp.Optional[tp.Labels] = None,
return_fig: bool = True,
**kwargs) -> tp.Union[tp.BaseFigure, plotting.Heatmap]: # pragma: no cover
"""Create `vectorbt.generic.plotting.Heatmap` and return the figure.
## Example
```python-repl
>>> df = pd.DataFrame([
... [0, np.nan, np.nan],
... [np.nan, 1, np.nan],
... [np.nan, np.nan, 2]
... ])
>>> df.vbt.heatmap()
```

"""
if x_labels is None:
x_labels = self.wrapper.columns
if y_labels is None:
y_labels = self.wrapper.index
heatmap = plotting.Heatmap(
data=self.to_2d_array(),
x_labels=x_labels,
y_labels=y_labels,
**kwargs
)
if return_fig:
return heatmap.fig
return heatmap
def ts_heatmap(self, is_y_category: bool = True,
**kwargs) -> tp.Union[tp.BaseFigure, plotting.Heatmap]: # pragma: no cover
"""Heatmap of time-series data."""
return self._obj.transpose().iloc[::-1].vbt.heatmap(is_y_category=is_y_category, **kwargs)
| 39.684426 | 119 | 0.555949 |
fdba70f3770fa06d062e01f1e1c25c3e133d16f3 | 15,812 | py | Python | ckanext-hdx_package/ckanext/hdx_package/helpers/helpers.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | null | null | null | ckanext-hdx_package/ckanext/hdx_package/helpers/helpers.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | null | null | null | ckanext-hdx_package/ckanext/hdx_package/helpers/helpers.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | null | null | null | import re
import urlparse
import uuid
import json
import logging
import ckan.lib.helpers as h
import ckan.model as model
import ckan.lib.base as base
import ckan.logic as logic
import ckan.plugins.toolkit as tk
import ckan.authz as new_authz
import ckan.lib.activity_streams as activity_streams
import ckan.model.package as package
import ckan.model.misc as misc
from pylons import config
from ckan.common import _, c, request
from ckanext.hdx_package.exceptions import NoOrganization
from ckanext.hdx_package.helpers.caching import cached_group_iso_to_title
import ckanext.hdx_package.helpers.custom_validator as vd
get_action = logic.get_action
log = logging.getLogger(__name__)
_check_access = logic.check_access
_get_or_bust = logic.get_or_bust
NotFound = logic.NotFound
ValidationError = logic.ValidationError
_get_action = logic.get_action
def build_additions(groups):
"""
Builds additions for solr searches
"""
countries = []
for g in groups:
try:
if 'name' in g:
# grp_list = cached_group_iso_to_title()
# country = grp_list[g.get('name')] if g.get('name') in grp_list else g.get('name')
countries.append(cached_group_iso_to_title()[g.get('name')])
except Exception, e:
ex_msg = e.message if hasattr(e, 'message') else str(e)
log.error(ex_msg)
return json.dumps({'countries':countries})
def hdx_user_org_num(user_id):
"""
Get number of orgs for a specific user
"""
context = {'model': model, 'session': model.Session,
'user': c.user or c.author}
try:
user = tk.get_action('organization_list_for_user')(
context, {'id': user_id, 'permission': 'create_dataset'})
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to see organization member list'))
return user
def hdx_organizations_available_with_roles():
"""
Gets roles of organizations the current user belongs to
"""
organizations_available = h.organizations_available('read')
if organizations_available and len(organizations_available) > 0:
orgs_where_editor = []
orgs_where_admin = []
am_sysadmin = new_authz.is_sysadmin(c.user)
if not am_sysadmin:
orgs_where_editor = set(
[org['id'] for org in h.organizations_available('create_dataset')])
orgs_where_admin = set([org['id']
for org in h.organizations_available('admin')])
for org in organizations_available:
org['has_add_dataset_rights'] = True
if am_sysadmin:
org['role'] = 'sysadmin'
elif org['id'] in orgs_where_admin:
org['role'] = 'admin'
elif org['id'] in orgs_where_editor:
org['role'] = 'editor'
else:
org['role'] = 'member'
org['has_add_dataset_rights'] = False
organizations_available.sort(key=lambda y:
y['display_name'].lower())
return organizations_available
def hdx_get_activity_list(context, data_dict):
"""
Get activity list for a given package
"""
try:
activity_stream = get_action('package_activity_list')(context, data_dict)
except Exception, ex:
log.exception(ex)
activity_stream = []
#activity_stream = package_activity_list(context, data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'package',
'action': 'activity',
'id': data_dict['id'],
'offset': offset,
}
return _activity_list(context, activity_stream, extra_vars)
def hdx_find_license_name(license_id, license_name):
"""
Look up license name by id
"""
if license_name == None or len(license_name) == 0 or license_name == license_id:
original_license_list = (
l.as_dict() for l in package.Package._license_register.licenses)
license_dict = {l['id']: l['title']
for l in original_license_list}
if license_id in license_dict:
return license_dict[license_id]
return license_name
# code copied from activity_streams.activity_list_to_html and modified to
# return only the activity list
def _activity_list(context, activity_stream, extra_vars):
'''Return the given activity stream
:param activity_stream: the activity stream to render
:type activity_stream: list of activity dictionaries
:param extra_vars: extra variables to pass to the activity stream items
template when rendering it
:type extra_vars: dictionary
'''
activity_list = [] # These are the activity stream messages.
for activity in activity_stream:
detail = None
activity_type = activity['activity_type']
# Some activity types may have details.
if activity_type in activity_streams.activity_stream_actions_with_detail:
details = logic.get_action('activity_detail_list')(context=context,
data_dict={'id': activity['id']})
# If an activity has just one activity detail then render the
# detail instead of the activity.
if len(details) == 1:
detail = details[0]
object_type = detail['object_type']
if object_type == 'PackageExtra':
object_type = 'package_extra'
new_activity_type = '%s %s' % (detail['activity_type'],
object_type.lower())
if new_activity_type in activity_streams.activity_stream_string_functions:
activity_type = new_activity_type
if not activity_type in activity_streams.activity_stream_string_functions:
raise NotImplementedError("No activity renderer for activity "
"type '%s'" % activity_type)
if activity_type in activity_streams.activity_stream_string_icons:
activity_icon = activity_streams.activity_stream_string_icons[
activity_type]
else:
activity_icon = activity_streams.activity_stream_string_icons[
'undefined']
activity_msg = activity_streams.activity_stream_string_functions[activity_type](context,
activity)
# Get the data needed to render the message.
matches = re.findall('\{([^}]*)\}', activity_msg)
data = {}
for match in matches:
snippet = activity_streams.activity_snippet_functions[
match](activity, detail)
data[str(match)] = snippet
activity_list.append({'msg': activity_msg,
'type': activity_type.replace(' ', '-').lower(),
'icon': activity_icon,
'data': data,
'timestamp': activity['timestamp'],
'is_new': activity.get('is_new', False)})
extra_vars['activities'] = activity_list
return extra_vars
def hdx_tag_autocomplete_list(context, data_dict):
'''Return a list of tag names that contain a given string.
By default only free tags (tags that don't belong to any vocabulary) are
searched. If the ``vocabulary_id`` argument is given then only tags
belonging to that vocabulary will be searched instead.
:param query: the string to search for
:type query: string
:param vocabulary_id: the id or name of the tag vocabulary to search in
(optional)
:type vocabulary_id: string
:param fields: deprecated
:type fields: dictionary
:param limit: the maximum number of tags to return
:type limit: int
:param offset: when ``limit`` is given, the offset to start returning tags
from
:type offset: int
:rtype: list of strings
'''
_check_access('tag_autocomplete', context, data_dict)
matching_tags, count = _tag_search(context, data_dict)
if matching_tags:
return [tag.name for tag in matching_tags]
else:
return []
# code copied from get.py line 1748
def _tag_search(context, data_dict):
"""
Searches tags for autocomplete, but makes sure only return active tags
"""
model = context['model']
terms = data_dict.get('query') or data_dict.get('q') or []
if isinstance(terms, basestring):
terms = [terms]
terms = [t.strip() for t in terms if t.strip()]
if 'fields' in data_dict:
log.warning('"fields" parameter is deprecated. '
'Use the "query" parameter instead')
fields = data_dict.get('fields', {})
offset = data_dict.get('offset')
limit = data_dict.get('limit')
# TODO: should we check for user authentication first?
q = model.Session.query(model.Tag)
if 'vocabulary_id' in data_dict:
# Filter by vocabulary.
vocab = model.Vocabulary.get(_get_or_bust(data_dict, 'vocabulary_id'))
if not vocab:
raise NotFound
q = q.filter(model.Tag.vocabulary_id == vocab.id)
# CHANGES to initial version
# else:
# If no vocabulary_name in data dict then show free tags only.
# q = q.filter(model.Tag.vocabulary_id == None)
# If we're searching free tags, limit results to tags that are
# currently applied to a package.
# q = q.distinct().join(model.Tag.package_tags)
for field, value in fields.items():
if field in ('tag', 'tags'):
terms.append(value)
if not len(terms):
return [], 0
for term in terms:
escaped_term = misc.escape_sql_like_special_characters(
term, escape='\\')
q = q.filter(model.Tag.name.ilike('%' + escaped_term + '%'))
q = q.join('package_tags').filter(model.PackageTag.state == 'active')
count = q.count()
q = q.offset(offset)
q = q.limit(limit)
tags = q.all()
return tags, count
def pkg_topics_list(data_dict):
"""
Get a list of topics
"""
pkg = model.Package.get(data_dict['id'])
vocabulary = model.Vocabulary.get('Topics')
topics = []
if vocabulary:
topics = pkg.get_tags(vocab=vocabulary)
return topics
def get_tag_vocabulary(tags):
"""
Get vocabulary for a given list of tags
"""
for item in tags:
tag_name = item['name'].lower()
vocabulary = model.Vocabulary.get('Topics')
if vocabulary:
topic = model.Tag.by_name(name=tag_name, vocab=vocabulary)
if topic:
item['vocabulary_id'] = vocabulary.id
item['name'] = tag_name
return tags
def hdx_unified_resource_format(format):
'''
This function is based on the unified_resource_format() function from ckan.lib.helpers.
As the one from core ckan it checks the resource formats configuration to translate the
format string to a standard format.
The difference is that in case nothing is found in 'resource_formats.json' then it's
turned to lowercase.
:param format: resource format as written by the user
:type format: string
:return:
:rtype:
'''
formats = h.resource_formats()
format_clean = format.lower()
if format_clean in formats:
format_new = formats[format_clean][1]
else:
format_new = format_clean
return format_new
def filesize_format(size_in_bytes):
try:
d = 1024.0
size = int(size_in_bytes)
# value = formatters.localised_filesize(size_in_bytes)
# return value
for unit in ['B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if size < d:
return "%3.1f%s" % (size, unit)
size /= d
return "%.1f%s" % (size, 'Yi')
except Exception, e:
log.warn('Error occured when formatting the numner {}. Error {}'.format(size_in_bytes, str(e)))
return size_in_bytes
def hdx_get_proxified_resource_url(data_dict, proxy_schemes=['http','https']):
'''
This function replaces the one with the similar name from ckanext.resourceproxy.plugin .
Changes:
1) Don't look at the protocol when checking if it is the same domain
2) Return a domain relative url (without schema, domain or port) for local resources.
:param data_dict: contains a resource and package dict
:type data_dict: dict
:param proxy_schemes: list of url schemes to proxy for.
:type data_dict: list
'''
same_domain = is_ckan_domain(data_dict['resource']['url'])
parsed_url = urlparse.urlparse(data_dict['resource']['url'])
scheme = parsed_url.scheme
if not same_domain and scheme in proxy_schemes:
url = h.url_for(
action='proxy_resource',
controller='ckanext.resourceproxy.controller:ProxyController',
id=data_dict['package']['name'],
resource_id=data_dict['resource']['id'])
log.info('Proxified url is {0}'.format(url))
else:
url = urlparse.urlunparse((None, None) + parsed_url[2:])
return url
def is_ckan_domain(url):
'''
:param url: url to check whether it's on the same domain as ckan
:type url: str
:return: True if it's the same domain. False otherwise
:rtype: bool
'''
ckan_url = config.get('ckan.site_url', '//localhost:5000')
parsed_url = urlparse.urlparse(url)
ckan_parsed_url = urlparse.urlparse(ckan_url)
same_domain = True if not parsed_url.hostname or parsed_url.hostname == ckan_parsed_url.hostname else False
return same_domain
def make_url_relative(url):
'''
Transforms something like http://testdomain.com/test to /test
:param url: url to check whether it's on the same domain as ckan
:type url: str
:return: the new url as a string
:rtype: str
'''
parsed_url = urlparse.urlparse(url)
return urlparse.urlunparse((None, None) + parsed_url[2:])
def generate_mandatory_fields():
'''
:return: dataset dict with mandatory fields filled
:rtype: dict
'''
user = c.user or c.author
# random_string = str(uuid.uuid4()).replace('-', '')[:8]
# dataset_name = 'autogenerated-{}-{}'.format(user, random_string)
selected_org = None
orgs = h.organizations_available('create_dataset')
if len(orgs) == 0:
raise NoOrganization(_('The user needs to belong to at least 1 organisation'))
else:
selected_org = orgs[0]
data_dict = {
'private': True,
# 'name': dataset_name,
# 'title': dataset_name,
'license_id': 'cc-by',
'owner_org': selected_org.get('id'),
'dataset_source': selected_org.get('title'),
'maintainer': user,
'subnational': 1,
'data_update_frequency': config.get('hdx.default_frequency', '-999'),
'dataset_preview_check': '1',
'dataset_preview': vd._DATASET_PREVIEW_FIRST_RESOURCE,
'dataset_preview_value': vd._DATASET_PREVIEW_FIRST_RESOURCE
}
return data_dict
def hdx_check_add_data():
data_dict = {}
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'save': 'save' in request.params}
dataset_dict = None
try:
logic.check_access("package_create", context, dataset_dict)
except logic.NotAuthorized, e:
if c.userobj or c.user:
data_dict['href'] = '/dashboard/organizations'
data_dict['onclick'] = ''
return data_dict
data_dict['href'] = '/contribute'
data_dict['onclick'] = ''
return data_dict
data_dict['href'] = '#'
data_dict['onclick'] = 'contributeAddDetails()'
return data_dict
| 33.571125 | 111 | 0.632811 |
149dc4c6c4bc1c55bfd2ed6798dd09e163cf6b43 | 28 | py | Python | rlib/algorithms/dqn/__init__.py | MarcioPorto/rlib | 5919f2dc52105000a23a25c31bbac260ca63565f | [
"MIT"
] | 1 | 2019-09-08T08:33:13.000Z | 2019-09-08T08:33:13.000Z | rlib/algorithms/dqn/__init__.py | MarcioPorto/rlib | 5919f2dc52105000a23a25c31bbac260ca63565f | [
"MIT"
] | 26 | 2019-03-15T03:11:21.000Z | 2022-03-11T23:42:46.000Z | rlib/algorithms/dqn/__init__.py | MarcioPorto/rlib | 5919f2dc52105000a23a25c31bbac260ca63565f | [
"MIT"
] | null | null | null | from .agent import DQNAgent
| 14 | 27 | 0.821429 |
6e1231a9751730f42907c5ad1cc9d361f8a9d6c5 | 14,718 | py | Python | nolearn/lasagne/tests/test_handlers.py | KEVINYZY/nolearn | 342915012081f31bb88f69daa8857fd2f4e15a1d | [
"MIT"
] | 968 | 2015-01-02T00:37:08.000Z | 2022-03-14T02:52:41.000Z | nolearn/lasagne/tests/test_handlers.py | KEVINYZY/nolearn | 342915012081f31bb88f69daa8857fd2f4e15a1d | [
"MIT"
] | 269 | 2015-01-01T20:44:58.000Z | 2021-11-21T19:59:00.000Z | nolearn/lasagne/tests/test_handlers.py | KEVINYZY/nolearn | 342915012081f31bb88f69daa8857fd2f4e15a1d | [
"MIT"
] | 315 | 2015-01-10T03:15:21.000Z | 2022-01-28T14:59:58.000Z | from collections import OrderedDict
import pickle
from lasagne.layers import ConcatLayer
from lasagne.layers import Conv2DLayer
from lasagne.layers import DenseLayer
from lasagne.layers import MaxPool2DLayer
from lasagne.layers import InputLayer
from lasagne.nonlinearities import softmax
from lasagne.updates import nesterov_momentum
from mock import patch
from mock import Mock
import numpy
import pytest
from nolearn._compat import builtins
def test_print_log(mnist):
from nolearn.lasagne import PrintLog
nn = Mock(
regression=False,
custom_scores=[('my1', 0.99)],
scores_train=[('my2', 0.98)],
scores_valid=[('my3', 0.33)],
)
train_history = [{
'epoch': 1,
'train_loss': 0.8,
'valid_loss': 0.7,
'train_loss_best': False,
'valid_loss_best': False,
'valid_accuracy': 0.9,
'my1': 0.99,
'my2': 0.98,
'my3': 0.33,
'dur': 1.0,
}]
output = PrintLog().table(nn, train_history)
assert output.split() == [
'epoch',
'trn',
'loss',
'val',
'loss',
'trn/val',
'valid',
'acc',
'my2',
'my3',
'my1',
'dur',
'-------',
'----------',
'----------',
'---------',
'-----------',
'-------',
'-------',
'-------',
'-----',
'1',
'0.80000',
'0.70000',
'1.14286',
'0.90000',
'0.98000',
'0.33000',
'0.99000',
'1.00s',
]
class TestSaveWeights():
@pytest.fixture
def SaveWeights(self):
from nolearn.lasagne import SaveWeights
return SaveWeights
def test_every_n_epochs_true(self, SaveWeights):
train_history = [{'epoch': 9, 'valid_loss': 1.1}]
nn = Mock()
handler = SaveWeights('mypath', every_n_epochs=3)
handler(nn, train_history)
assert nn.save_params_to.call_count == 1
nn.save_params_to.assert_called_with('mypath')
def test_every_n_epochs_false(self, SaveWeights):
train_history = [{'epoch': 9, 'valid_loss': 1.1}]
nn = Mock()
handler = SaveWeights('mypath', every_n_epochs=4)
handler(nn, train_history)
assert nn.save_params_to.call_count == 0
def test_only_best_true_single_entry(self, SaveWeights):
train_history = [{'epoch': 9, 'valid_loss': 1.1}]
nn = Mock()
handler = SaveWeights('mypath', only_best=True)
handler(nn, train_history)
assert nn.save_params_to.call_count == 1
def test_only_best_true_two_entries(self, SaveWeights):
train_history = [
{'epoch': 9, 'valid_loss': 1.2},
{'epoch': 10, 'valid_loss': 1.1},
]
nn = Mock()
handler = SaveWeights('mypath', only_best=True)
handler(nn, train_history)
assert nn.save_params_to.call_count == 1
def test_only_best_false_two_entries(self, SaveWeights):
train_history = [
{'epoch': 9, 'valid_loss': 1.2},
{'epoch': 10, 'valid_loss': 1.3},
]
nn = Mock()
handler = SaveWeights('mypath', only_best=True)
handler(nn, train_history)
assert nn.save_params_to.call_count == 0
def test_with_path_interpolation(self, SaveWeights):
train_history = [{'epoch': 9, 'valid_loss': 1.1}]
nn = Mock()
handler = SaveWeights('mypath-{epoch}-{timestamp}-{loss}.pkl')
handler(nn, train_history)
path = nn.save_params_to.call_args[0][0]
assert path.startswith('mypath-0009-2')
assert path.endswith('-1.1.pkl')
def test_pickle(self, SaveWeights):
train_history = [{'epoch': 9, 'valid_loss': 1.1}]
nn = Mock()
with patch('nolearn.lasagne.handlers.pickle') as pickle:
with patch.object(builtins, 'open') as mock_open:
handler = SaveWeights('mypath', every_n_epochs=3, pickle=True)
handler(nn, train_history)
mock_open.assert_called_with('mypath', 'wb')
pickle.dump.assert_called_with(nn, mock_open().__enter__(), -1)
class TestRememberBestWeights:
@pytest.fixture
def RememberBestWeights(self):
from nolearn.lasagne.handlers import RememberBestWeights
return RememberBestWeights
@pytest.fixture
def RestoreBestWeights(self):
from nolearn.lasagne.handlers import RestoreBestWeights
return RestoreBestWeights
@pytest.mark.parametrize('loss_name', ['valid_loss', 'my_loss'])
def test_simple(self, RememberBestWeights, loss_name):
nn1, nn2, nn3 = Mock(), Mock(), Mock()
rbw = RememberBestWeights(loss=loss_name)
train_history = []
train_history.append({'epoch': 1, loss_name: 1.0})
rbw(nn1, train_history)
assert rbw.best_weights is nn1.get_all_params_values()
train_history.append({'epoch': 2, loss_name: 1.1})
rbw(nn2, train_history)
assert rbw.best_weights is nn1.get_all_params_values()
train_history.append({'epoch': 3, loss_name: 0.9})
rbw(nn3, train_history)
assert rbw.best_weights is nn3.get_all_params_values()
def test_custom_score(self, RememberBestWeights):
nn1, nn2, nn3 = Mock(), Mock(), Mock()
rbw = RememberBestWeights(score='myscr')
train_history = []
train_history.append({'epoch': 1, 'myscr': 1.0})
rbw(nn1, train_history)
assert rbw.best_weights is nn1.get_all_params_values()
train_history.append({'epoch': 2, 'myscr': 1.1})
rbw(nn2, train_history)
assert rbw.best_weights is nn2.get_all_params_values()
train_history.append({'epoch': 3, 'myscr': 0.9})
rbw(nn3, train_history)
assert rbw.best_weights is nn2.get_all_params_values()
def test_restore(self, RememberBestWeights, RestoreBestWeights):
nn = Mock()
remember_best_weights = RememberBestWeights()
restore_best_weights = RestoreBestWeights(
remember=remember_best_weights)
train_history = []
train_history.append({'epoch': 1, 'valid_loss': 1.0})
remember_best_weights(nn, train_history)
restore_best_weights(nn, train_history)
nn.load_params_from.assert_called_with(nn.get_all_params_values())
nn.load_params_from.assert_called_with(
remember_best_weights.best_weights)
class TestPrintLayerInfo():
@pytest.fixture(scope='session')
def X_train(self, mnist):
X, y = mnist
return X[:100].reshape(-1, 1, 28, 28)
@pytest.fixture(scope='session')
def y_train(self, mnist):
X, y = mnist
return y[:100]
@pytest.fixture(scope='session')
def nn(self, NeuralNet, X_train, y_train):
nn = NeuralNet(
layers=[
('input', InputLayer),
('dense0', DenseLayer),
('dense1', DenseLayer),
('output', DenseLayer),
],
input_shape=(None, 1, 28, 28),
output_num_units=10,
output_nonlinearity=softmax,
more_params=dict(
dense0_num_units=16,
dense1_num_units=16,
),
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=3,
)
nn.initialize()
return nn
@pytest.fixture(scope='session')
def cnn(self, NeuralNet, X_train, y_train):
nn = NeuralNet(
layers=[
('input', InputLayer),
('conv1', Conv2DLayer),
('conv2', Conv2DLayer),
('pool2', MaxPool2DLayer),
('conv3', Conv2DLayer),
('output', DenseLayer),
],
input_shape=(None, 1, 28, 28),
output_num_units=10,
output_nonlinearity=softmax,
more_params=dict(
conv1_filter_size=5, conv1_num_filters=16,
conv2_filter_size=3, conv2_num_filters=16,
pool2_pool_size=8, pool2_ignore_border=False,
conv3_filter_size=3, conv3_num_filters=16,
hidden1_num_units=16,
),
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=3,
)
nn.initialize()
return nn
@pytest.fixture
def is_conv2d(self):
from nolearn.lasagne.util import is_conv2d
return is_conv2d
@pytest.fixture
def is_maxpool2d(self):
from nolearn.lasagne.util import is_maxpool2d
return is_maxpool2d
@pytest.fixture
def print_info(self):
from nolearn.lasagne.handlers import PrintLayerInfo
return PrintLayerInfo()
def test_is_conv2d_net_false(self, nn, is_conv2d):
assert is_conv2d(nn.layers_.values()) is False
def test_is_conv2d_net_true(self, cnn, is_conv2d):
assert is_conv2d(cnn.layers_.values()) is True
def test_is_conv2d_layer(self, nn, cnn, is_conv2d):
assert is_conv2d(nn.layers_['input']) is False
assert is_conv2d(cnn.layers_['pool2']) is False
assert is_conv2d(cnn.layers_['conv1']) is True
def test_is_maxpool2d_net_false(self, nn, is_maxpool2d):
assert is_maxpool2d(nn.layers_.values()) is False
def test_is_maxpool2d_net_true(self, cnn, is_maxpool2d):
assert is_maxpool2d(cnn.layers_.values()) is True
def test_is_maxpool2d_layer(self, nn, cnn, is_maxpool2d):
assert is_maxpool2d(nn.layers_['input']) is False
assert is_maxpool2d(cnn.layers_['pool2']) is True
assert is_maxpool2d(cnn.layers_['conv1']) is False
def test_print_layer_info_greeting(self, nn, print_info):
# number of learnable parameters is weights + biases:
# 28 * 28 * 16 + 16 + 16 * 16 + 16 + 16 * 10 + 10 = 13002
expected = '# Neural Network with 13002 learnable parameters\n'
message = print_info._get_greeting(nn)
assert message == expected
def test_print_layer_info_plain_nn(self, nn, print_info):
expected = """\
# name size
--- ------ -------
0 input 1x28x28
1 dense0 16
2 dense1 16
3 output 10"""
message = print_info._get_layer_info_plain(nn)
assert message == expected
def test_print_layer_info_plain_cnn(self, cnn, print_info):
expected = """\
# name size
--- ------ --------
0 input 1x28x28
1 conv1 16x24x24
2 conv2 16x22x22
3 pool2 16x3x3
4 conv3 16x1x1
5 output 10"""
message = print_info._get_layer_info_plain(cnn)
assert message == expected
def test_print_layer_info_conv_cnn(self, cnn, print_info):
expected = """\
name size total cap.Y cap.X cov.Y cov.X
------ -------- ------- ------- ------- ------- -------
input 1x28x28 784 100.00 100.00 100.00 100.00
conv1 16x24x24 9216 100.00 100.00 17.86 17.86
conv2 16x22x22 7744 42.86 42.86 25.00 25.00
pool2 16x3x3 144 42.86 42.86 25.00 25.00
conv3 16x1x1 16 104.35 104.35 82.14 82.14
output 10 10 100.00 100.00 100.00 100.00"""
message, legend = print_info._get_layer_info_conv(cnn)
assert message == expected
expected = """
Explanation
X, Y: image dimensions
cap.: learning capacity
cov.: coverage of image
\x1b[35mmagenta\x1b[0m: capacity too low (<1/6)
\x1b[36mcyan\x1b[0m: image coverage too high (>100%)
\x1b[31mred\x1b[0m: capacity too low and coverage too high
"""
assert legend == expected
def test_print_layer_info_with_empty_shape(self, print_info, NeuralNet):
# construct a net with both conv layer (to trigger
# get_conv_infos) and a layer with shape (None,).
l_img = InputLayer(shape=(None, 1, 28, 28))
l_conv = Conv2DLayer(l_img, num_filters=3, filter_size=3)
l0 = DenseLayer(l_conv, num_units=10)
l_inp = InputLayer(shape=(None,)) # e.g. vector input
l1 = DenseLayer(l_inp, num_units=10)
l_merge = ConcatLayer([l0, l1])
nn = NeuralNet(l_merge, update_learning_rate=0.1, verbose=2)
nn.initialize()
# used to raise TypeError
print_info(nn)
class TestWeightLog:
@pytest.fixture
def WeightLog(self):
from nolearn.lasagne import WeightLog
return WeightLog
@pytest.fixture
def nn(self):
nn = Mock()
nn.get_all_params_values.side_effect = [
OrderedDict([
('layer1', numpy.array([[-1, -2]])),
('layer2', numpy.array([[3, 4]])),
]),
OrderedDict([
('layer1', numpy.array([[-2, -3]])),
('layer2', numpy.array([[5, 7]])),
]),
]
return nn
def test_history(self, WeightLog, nn):
wl = WeightLog()
wl(nn, None)
wl(nn, None)
assert wl.history[0] == {
'layer1_0 wdiff': 0,
'layer1_0 wabsmean': 1.5,
'layer1_0 wmean': -1.5,
'layer2_0 wdiff': 0,
'layer2_0 wabsmean': 3.5,
'layer2_0 wmean': 3.5,
}
assert wl.history[1]['layer1_0 wdiff'] == 1.0
assert wl.history[1]['layer2_0 wdiff'] == 2.5
def test_save_to(self, WeightLog, nn, tmpdir):
save_to = tmpdir.join("hello.csv")
wl = WeightLog(save_to=save_to.strpath, write_every=1)
wl(nn, None)
wl(nn, None)
assert save_to.readlines() == [
'layer1_0 wdiff,layer1_0 wabsmean,layer1_0 wmean,'
'layer2_0 wdiff,layer2_0 wabsmean,layer2_0 wmean\n',
'0.0,1.5,-1.5,0.0,3.5,3.5\n',
'1.0,2.5,-2.5,2.5,6.0,6.0\n',
]
def test_pickle(self, WeightLog, nn, tmpdir):
save_to = tmpdir.join("hello.csv")
pkl = tmpdir.join("hello.pkl")
wl = WeightLog(save_to=save_to.strpath, write_every=1)
wl(nn, None)
with open(pkl.strpath, 'wb') as f:
pickle.dump(wl, f)
with open(pkl.strpath, 'rb') as f:
wl = pickle.load(f)
wl(nn, None)
assert save_to.readlines() == [
'layer1_0 wdiff,layer1_0 wabsmean,layer1_0 wmean,'
'layer2_0 wdiff,layer2_0 wabsmean,layer2_0 wmean\n',
'0.0,1.5,-1.5,0.0,3.5,3.5\n',
'1.0,2.5,-2.5,2.5,6.0,6.0\n',
]
| 32.276316 | 78 | 0.578951 |
11c80539e1f577ca6dd67fdb56d31c90276d1a98 | 1,518 | py | Python | modules/extra_tabs/messages.py | sharabeshj/course-editor-test | 9af15d10ef1f039fdf5758134a7cb72384ccf3f5 | [
"Apache-2.0"
] | 1 | 2021-01-06T17:58:30.000Z | 2021-01-06T17:58:30.000Z | modules/extra_tabs/messages.py | priyankagohil/coursebuilder-assessment | 559e867a2a846dd773471c6bc76cf6005a57098f | [
"Apache-2.0"
] | 27 | 2016-08-31T19:04:46.000Z | 2016-09-29T00:22:32.000Z | modules/extra_tabs/messages.py | priyankagohil/coursebuilder-assessment | 559e867a2a846dd773471c6bc76cf6005a57098f | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used by extra tabs pages."""
__author__ = 'Todd Larsen (tlarsen@google.com)'
EXTRA_TABS_DESCRIPTION = """
Extra tabs appear on the course navbar.
"""
EXTRA_TABS_TITLE_DESCRIPTION = """
This is the name of this tab displayed on the course navbar.
"""
EXTRA_TAB_POSITION_DESCRIPTION = """
This indicates if this tab is right or left aligned. Tabs aligned on the same
side are displayed in the order added here.
"""
EXTRA_TABS_VISIBILITY_DESCRIPTION = """
This indicates if this tab is visible to everyone or only registered students.
"""
EXTRA_TABS_URL_DESCRIPTION = """
If a URL is provided, this tab will link to that URL. Otherwise, it will
display the "Tab Content" in a page. Links to other sites must start with
"http" or "https".
"""
EXTRA_TABS_CONTENT_DESCRIPTION = """
This content will be displayed on a page accessed from the tab. If the
"Tab URL" is provided, that will be used instead.
"""
| 33 | 78 | 0.754941 |
d4c81433594931854624274697b1dba33c6f549a | 2,404 | py | Python | util/util_lldp.py | nocsysmars/xmppd | 386aacd71194fb072d9a9f45cbc19ca2f01e92e8 | [
"Apache-2.0"
] | null | null | null | util/util_lldp.py | nocsysmars/xmppd | 386aacd71194fb072d9a9f45cbc19ca2f01e92e8 | [
"Apache-2.0"
] | null | null | null | util/util_lldp.py | nocsysmars/xmppd | 386aacd71194fb072d9a9f45cbc19ca2f01e92e8 | [
"Apache-2.0"
] | 1 | 2021-11-24T02:22:03.000Z | 2021-11-24T02:22:03.000Z | #
# util_lldp.py
#
# lldp related APIs.
#
import util_utl, util_method_tbl, pdb
from xml.etree import cElementTree as ET
#
# set functions
#
#
# get functions
#
lldp_info_fake = [
{ "lldp_rem_port_id_subtype" : "2",
"lldp_rem_port_id" : "20",
"lldp_rem_chassis_id_subtype" : "4",
"lldp_rem_chassis_id" : "00:11:22:33:44:55",
"lldp_rem_index" : "1" },
{ "lldp_rem_port_id_subtype" : "2",
"lldp_rem_port_id" : "21",
"lldp_rem_chassis_id_subtype" : "4",
"lldp_rem_chassis_id" : "00:11:22:33:44:55",
"lldp_rem_index" : "2" }
]
def lldp_get_info_one_port(inf_name, lldp_info):
fld_map = [ {"fld" : "lldp_rem_port_id_subtype", "tag" : "remPortIdSubType" },
{"fld" : "lldp_rem_port_id", "tag" : "remPortId" },
{"fld" : "lldp_rem_chassis_id_subtype","tag" : "remChassSubType" },
{"fld" : "lldp_rem_chassis_id", "tag" : "remChassSubId" },
{"fld" : "lldp_rem_index", "tag" : "id"} ]
lldp_elm = ET.Element('lldp')
if_elm = ET.SubElement(lldp_elm, 'ifName')
if_elm.text = inf_name
for fld in fld_map:
tmp_elm = ET.SubElement(lldp_elm, fld["tag"])
if fld["fld"] in lldp_info:
tmp_elm.text = lldp_info[fld["fld"]]
return lldp_elm
@util_utl.utl_dbg
def lldp_get(ent_elm, db_args):
lldps_elm = ET.Element('lldps')
lldp_keys = db_args.appdb.keys(
db_args.appdb.APPL_DB, util_utl.APPDB_TABLE_NAME_LLDP + "*")
if lldp_keys != None:
db_sep = db_args.appdb.get_db_separator(db_args.appdb.APPL_DB)
for lldp_key in lldp_keys:
lldp_info = db_args.appdb.get_all(db_args.appdb.APPL_DB, lldp_key)
inf_name = lldp_key.split(db_sep)[1]
lldp_one = lldp_get_info_one_port(inf_name, lldp_info)
lldps_elm.append(lldp_one)
if util_utl.CFG_TBL["FAKE_DATA"] > 0:
# fake data
inf_fake_lst = [ "Ethernet1", "Ethernet2" ]
for idx in range(len(inf_fake_lst)):
lldp_one = lldp_get_info_one_port(inf_fake_lst[idx], lldp_info_fake[idx])
lldps_elm.append(lldp_one)
ent_elm.append(lldps_elm)
#
# register functions to method table
#
util_method_tbl.mtbl_register_method('get-lldp', lldp_get)
| 28.282353 | 85 | 0.596922 |
7491641098e49c2aa4c735afe0f16521c041943f | 6,602 | py | Python | base/car.py | Chadc265/DingusBot | 98a05fe6ef75e2b48038f9fbbfacc204e89d0d86 | [
"MIT"
] | null | null | null | base/car.py | Chadc265/DingusBot | 98a05fe6ef75e2b48038f9fbbfacc204e89d0d86 | [
"MIT"
] | null | null | null | base/car.py | Chadc265/DingusBot | 98a05fe6ef75e2b48038f9fbbfacc204e89d0d86 | [
"MIT"
] | null | null | null | from util.vec import Vec3
from util.orientation import Orientation
from util.math_funcs import turn_radius, clamp
from util import constants
from util.boost import Boost, BoostTracker
from base.ball import Ball
from base.goal import Goal
from rlbot.utils.structures.game_data_struct import GameTickPacket, BoostPad, PlayerInfo
import math
# Structure taken from GoslingUtils.objects.car_object and RLBotPythonExample
class Car:
def __init__(self, team, index, packet=None):
self.team = team
self.index = index
self.location = Vec3(0, 0, 0)
self.velocity = Vec3(0, 0, 0)
self.angular_velocity = Vec3(0, 0, 0)
self.orientation = None
self.dead = False
self.flying = False
self.supersonic = False
self.jumped = False
self.double_jumped = False
self.jump_timer = 0.0
self.boost = 0
if packet is not None:
self.update(packet)
@property
def forward(self):
return self.orientation.forward
@property
def right(self):
return self.orientation.right
@property
def up(self):
return self.orientation.up
@property
def side(self):
return 1 if self.team == 1 else -1
@property
def speed(self):
return self.velocity.length()
def stop_distance(self, coast=False):
if coast:
return (self.speed ** 2) / abs(constants.COAST_ACCELERATION)
else:
return (self.speed ** 2) / abs(constants.BREAK_ACCELERATION)
def onside(self, ball_location, threshold=350):
goal_location = Vec3(0, self.team*5120, 0)
goal_to_ball = (ball_location - goal_location).normalized()
ball_dist = (ball_location - goal_location).length()
goal_to_car = self.location - goal_location
car_dist = goal_to_ball.dot(goal_to_car)
return car_dist - threshold < ball_dist
def local(self, target):
return self.orientation.dot(target)
def velocity_to_target(self, target):
local_target = self.local(target - self.location)
local_target_norm = local_target.normalized()
try:
vel_towards_target = self.velocity.dot(local_target_norm) / local_target_norm.dot(local_target_norm)
# print("Velocity to target: (", vel_towards_target.x, ", ", vel_towards_target.y, ", ", vel_towards_target.z, ")")
except ZeroDivisionError: # On target
vel_towards_target = math.inf
return vel_towards_target
def is_facing_target(self, target, return_angle=False):
local_target = self.local(target)
angle = local_target.dot(self.forward)
if angle > 0:
if return_angle:
return True, angle
return True
if return_angle:
return False, angle
return False
def time_to_target(self, target):
current_speed_to_target = self.velocity_to_target(target)
distance = target - self.location
return distance / current_speed_to_target
def time_to_stop(self, coast=False):
# stopped
if self.speed == 0:
return 0
distance = self.stop_distance(coast=coast)
return distance / constants.MAX_DRIVING_SPEED
def get_closest_boosts(self, boosts:BoostTracker, in_current_path=False, path_angle_limit=0, return_time_to=False):
all_boosts = boosts.all_boost
car_location = self.location
closest_bean = None
closest_distance = math.inf
fallback_pad = None
fallback_distance = math.inf
for b in all_boosts:
if not in_current_path or (in_current_path and self.is_facing_target(b.location)):
test = (b.location - car_location).length()
if test < closest_distance and (b.is_full_boost and b.is_active):
closest_bean = b
closest_distance = test
elif test < fallback_distance and b.is_active:
fallback_pad = b
fallback_distance = test
if return_time_to:
return (closest_bean,
fallback_pad,
(closest_distance / self.velocity_to_target(closest_bean.location),
fallback_distance / self.velocity_to_target(fallback_pad.location)
)
)
else:
return closest_bean, fallback_pad
def update_jump_timer(self, packet_jumped, packed_doubled, dt):
# jump hasn't registered until now or we used a dodge or its too damn late, make sure timer is zero
if (not self.jumped and packet_jumped) or self.double_jumped:
self.jump_timer = 0.0
elif self.jumped and not self.double_jumped:
self.jump_timer += dt
def update(self, packet: GameTickPacket):
packet_car = packet.game_cars[self.index]
self.location = Vec3(packet_car.physics.location)
self.velocity = Vec3(packet_car.physics.velocity)
self.orientation = Orientation(packet_car.physics.rotation)
self.angular_velocity = Vec3(packet_car.physics.angular_velocity)
self.dead = packet_car.is_demolished
self.flying = not packet_car.has_wheel_contact
self.supersonic = packet_car.is_super_sonic
self.jumped = packet_car.jumped
self.double_jumped = packet_car.double_jumped
self.boost = packet_car.boost
# def intersects(self, ball:Ball):
# diff = ball.location.flat() - self.location.flat()
# distance = diff.length()
# direction = math.atan2(diff.y, diff.x)
# ball_direction = math.atan2(ball.location.y, ball.location.x)
# alpha = math.pi + direction - ball_direction
# ball_speed = ball.velocity.flat().length()
# car_speed = self.velocity.flat().length()
# if ball_speed == car_speed:
# if math.cos(alpha) < 0:
# return None, None
# return (direction + alpha) % (math.pi/2)
# a = car_speed ** 2 - ball_speed ** 2
# b = 2 * distance * ball_speed * math.cos(alpha)
# c = -(distance ** 2)
# discrim = (b ** 2) - (4 * a * c)
# if discrim < 0:
# return None, None
# time = (math.sqrt(discrim) / b) / (2 * a)
# x = ball.location.x + ball_speed * time * math.cos(direction)
# y = ball.location.y + ball_speed * time * math.sin(direction)
# intersect_diff = Vec3(x, y, 0) - self.location.flat()
# return Vec3(x, y, 0), time | 38.16185 | 127 | 0.625417 |
cd40031d0c4db219c451b9ee7cef9e1458d334c6 | 1,936 | py | Python | src/build_workflow/opensearch_dashboards/build_artifact_check_plugin.py | asifsmohammed/opensearch-build | f78859000d676d35c29b15e08bbf4310c4df05b9 | [
"Apache-2.0"
] | 62 | 2021-05-14T04:06:09.000Z | 2022-03-23T03:30:13.000Z | src/build_workflow/opensearch_dashboards/build_artifact_check_plugin.py | asifsmohammed/opensearch-build | f78859000d676d35c29b15e08bbf4310c4df05b9 | [
"Apache-2.0"
] | 1,590 | 2021-05-07T20:21:19.000Z | 2022-03-31T23:57:53.000Z | src/build_workflow/opensearch_dashboards/build_artifact_check_plugin.py | mch2/opensearch-build | 39464ae1ded2b628d5b6cacb22064b715906520d | [
"Apache-2.0"
] | 134 | 2021-05-07T19:27:56.000Z | 2022-03-24T23:06:17.000Z | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import logging
import os
import re
from typing import List
from zipfile import ZipFile
from build_workflow.build_artifact_check import BuildArtifactCheck
from system.config_file import ConfigFile
class BuildArtifactOpenSearchDashboardsCheckPlugin(BuildArtifactCheck):
def check(self, path: str) -> None:
if os.path.splitext(path)[1] != ".zip":
raise BuildArtifactCheck.BuildArtifactInvalidError(path, "Not a zip file.")
match = re.search(r"^(\w+)-[\d\.]*.*.zip$", os.path.basename(path))
if not match:
raise BuildArtifactCheck.BuildArtifactInvalidError(path, "Expected filename to be in the format of pluginName-1.1.0.zip.")
plugin_name = match.group(1)
valid_filenames = self.__valid_paths(plugin_name)
if not os.path.basename(path) in valid_filenames:
raise BuildArtifactCheck.BuildArtifactInvalidError(path, f"Expected filename to to be one of {valid_filenames}.")
with ZipFile(path, "r") as zip:
data = zip.read(f"opensearch-dashboards/{plugin_name}/opensearch_dashboards.json").decode("UTF-8")
config = ConfigFile(data)
try:
config.check_value_in("version", self.target.compatible_component_versions)
config.check_value_in("opensearchDashboardsVersion", self.target.compatible_versions)
except ConfigFile.CheckError as e:
raise BuildArtifactCheck.BuildArtifactInvalidError(path, e.__str__())
logging.info(f'Checked {path} ({config.get_value("version", "N/A")})')
def __valid_paths(self, pluginName: str) -> List[str]:
return list(map(lambda version: f"{pluginName}-{version}.zip", self.target.compatible_versions))
| 45.023256 | 134 | 0.704545 |
abc80fea1b475a0c9c64129e3defb51ad83383a6 | 2,684 | py | Python | inst/models.py | collinsbett29/insta | 2b78fbef9b4739cfd5fe6e6a86ccf4f850f33581 | [
"MIT"
] | null | null | null | inst/models.py | collinsbett29/insta | 2b78fbef9b4739cfd5fe6e6a86ccf4f850f33581 | [
"MIT"
] | 3 | 2020-06-06T00:04:18.000Z | 2021-09-08T01:25:53.000Z | inst/models.py | collinsbett29/Instagram | 2b78fbef9b4739cfd5fe6e6a86ccf4f850f33581 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
import datetime as dt
from django.db import models
from tinymce.models import HTMLField
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
"""
Class that contains profile details
"""
bio = HTMLField()
dp = models.ImageField(upload_to='images/', blank=True)
user = models.OneToOneField(User, on_delete=models.CASCADE, null="True")
def save_profile(self):
self.save()
def del_profile(self):
self.delete()
@classmethod
def search_profile(cls, name):
profile = cls.objects.filter(user__username__icontains=name)
return profile
@classmethod
def get_by_id(cls, id):
profile = Profile.objects.get(id=id)
return profile
class Image(models.Model):
"""
Class that contains image details
"""
post = models.ImageField(upload_to='images/', blank=True)
caption = HTMLField()
posted_on = models.DateTimeField(auto_now_add=True)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE, null="True")
def __str__(self):
return self.caption
class Meta:
ordering = ['posted_on']
def save_img(self):
self.save()
def del_img(self):
self.delete()
@classmethod
def get_images(cls):
images = Image.objects.all()
return images
@property
def count_likes(self):
likes = self.likes.count()
return likes
@classmethod
def get_image_by_id(cls, id):
image = Image.objects.filter(user_id=id).all()
return image
class Comments(models.Model):
"""
Class that contains comments details
"""
comment = HTMLField()
posted_on = models.DateTimeField(auto_now=True)
image = models.ForeignKey(Image, on_delete=models.CASCADE, related_name='comments')
user = models.ForeignKey(User, on_delete=models.CASCADE, null="True")
def __str__(self):
return self.comment
class Meta:
ordering = ['posted_on']
def save_comm(self):
self.save()
def del_comm(self):
self.delete()
@classmethod
def get_comments_by_image_id(cls, image):
comments = Comments.objects.get(image_id=image)
return comments
class Likes(models.Model):
user_like = models.ForeignKey( User, on_delete=models.CASCADE, related_name='likes')
liked_post = models.ForeignKey(Image, on_delete=models.CASCADE, related_name='likes')
def save_like(self):
self.save()
def __str__(self):
return self.user_like | 25.084112 | 89 | 0.665797 |
3db7eadc50270fb62a61df300a2d6571c1cd79bd | 718 | py | Python | ppo/Config.py | leonjovanovic/drl-ml-agents-3dball | d9f8241209bfeba210ac03baa37fbcea77536690 | [
"MIT"
] | null | null | null | ppo/Config.py | leonjovanovic/drl-ml-agents-3dball | d9f8241209bfeba210ac03baa37fbcea77536690 | [
"MIT"
] | null | null | null | ppo/Config.py | leonjovanovic/drl-ml-agents-3dball | d9f8241209bfeba210ac03baa37fbcea77536690 | [
"MIT"
] | null | null | null | import datetime
test_episodes = 100
total_steps = 350
update_steps = 5
batch_size = 2048
minibatch_size = 32
gae = True
gae_lambda = 0.85
seed = 0
policy_lr = 0.0003
critic_lr = 0.0004
max_grad_norm = 0.5
adam_eps = 1e-5
gamma = 0.99
write = True
now = datetime.datetime.now()
date_time = "{}.{}.{}.{}".format(now.day, now.hour, now.minute, now.second)
writer_name = 'PPO_3dBall' + '_' + str(seed) + "_" + str(total_steps) + "_" + str(batch_size) + "_" + \
str(minibatch_size) + "_" + str(update_steps) + "_" + "gae" + "_" + str(gamma) + "_" + \
str(policy_lr)[-2:] + "_" + str(critic_lr)[-2:] + "_" + \
str(adam_eps)[-2:] + "_" + date_time
| 24.758621 | 104 | 0.566852 |
6f782efd59e984d8844734980e002d93f2e7acea | 36,697 | py | Python | apps/oozie/src/oozie/tests2.py | dvopsway/hue | 3c0dbca37a21ec92cd0b987f80fba7e3fc3589a7 | [
"Apache-2.0"
] | 1 | 2020-04-10T07:54:39.000Z | 2020-04-10T07:54:39.000Z | apps/oozie/src/oozie/tests2.py | dvopsway/hue | 3c0dbca37a21ec92cd0b987f80fba7e3fc3589a7 | [
"Apache-2.0"
] | null | null | null | apps/oozie/src/oozie/tests2.py | dvopsway/hue | 3c0dbca37a21ec92cd0b987f80fba7e3fc3589a7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
## -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_permission, add_to_group, reformat_json, reformat_xml
from oozie.models2 import Job, Workflow, find_dollar_variables, find_dollar_braced_variables
LOG = logging.getLogger(__name__)
class TestEditor():
def setUp(self):
self.wf = Workflow()
def test_parsing(self):
assert_equal(['input', 'LIMIT', 'out'], find_dollar_variables("""
data = '$input';
$out = LIMIT data $LIMIT; -- ${nah}
$output = STORE "$out";
"""))
assert_equal(['max_salary', 'limit'], find_dollar_variables("""
SELECT sample_07.description, sample_07.salary
FROM
sample_07
WHERE
( sample_07.salary > $max_salary)
ORDER BY sample_07.salary DESC
LIMIT $limit"""))
def test_hive_script_parsing(self):
assert_equal(['field', 'tablename', 'LIMIT'], find_dollar_braced_variables("""
SELECT ${field}
FROM ${hivevar:tablename}
LIMIT ${hiveconf:LIMIT}
"""))
def test_workflow_gen_xml(self):
assert_equal([
u'<workflow-app', u'name="My_Workflow"', u'xmlns="uri:oozie:workflow:0.5">', u'<start', u'to="End"/>', u'<kill', u'name="Kill">', u'<message>Action', u'failed,',
u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>', u'</kill>', u'<end', u'name="End"/>', u'</workflow-app>'],
self.wf.to_xml({'output': '/path'}).split()
)
def test_workflow_map_reduce_gen_xml(self):
wf = Workflow(data="{\"layout\": [{\"oozieRows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"MapReduce job\", \"widgetType\": \"mapreduce-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"size\": 12}], \"id\": \"e2caca14-8afc-d7e0-287c-88accd0b4253\", \"columns\": []}], \"rows\": [{\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"ff63ee3f-df54-2fa3-477b-65f5e0f0632c\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"MapReduce job\", \"widgetType\": \"mapreduce-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"size\": 12}], \"id\": \"e2caca14-8afc-d7e0-287c-88accd0b4253\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"6a13d869-d04c-8431-6c5c-dbe67ea33889\", \"columns\": []}, {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"e3b56553-7a4f-43d2-b1e2-4dc433280095\", \"columns\": []}], \"oozieEndRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"End\", \"widgetType\": \"end-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"size\": 12}], \"id\": \"6a13d869-d04c-8431-6c5c-dbe67ea33889\", \"columns\": []}, \"oozieKillRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Kill\", \"widgetType\": \"kill-widget\", \"oozieMovable\": true, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"size\": 12}], \"id\": \"e3b56553-7a4f-43d2-b1e2-4dc433280095\", \"columns\": []}, \"enableOozieDropOnAfter\": true, \"oozieStartRow\": {\"enableOozieDropOnBefore\": true, \"enableOozieDropOnSide\": true, \"enableOozieDrop\": false, \"widgets\": [{\"status\": \"\", \"logsURL\": \"\", \"name\": \"Start\", \"widgetType\": \"start-widget\", \"oozieMovable\": false, \"ooziePropertiesExpanded\": false, \"properties\": {}, \"isLoading\": true, \"offset\": 0, \"actionURL\": \"\", \"progress\": 0, \"klass\": \"card card-widget span12\", \"oozieExpanded\": false, \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"size\": 12}], \"id\": \"ff63ee3f-df54-2fa3-477b-65f5e0f0632c\", \"columns\": []}, \"klass\": \"card card-home card-column span12\", \"enableOozieDropOnBefore\": true, \"drops\": [\"temp\"], \"id\": \"0c1908e7-0096-46e7-a16b-b17b1142a730\", \"size\": 12}], \"workflow\": {\"properties\": {\"job_xml\": \"\", \"description\": \"\", \"wf1_id\": null, \"sla_enabled\": false, \"deployment_dir\": \"/user/hue/oozie/workspaces/hue-oozie-1430228904.58\", \"schema_version\": \"uri:oozie:workflow:0.5\", \"sla\": [{\"key\": \"enabled\", \"value\": false}, {\"key\": \"nominal-time\", \"value\": \"${nominal_time}\"}, {\"key\": \"should-start\", \"value\": \"\"}, {\"key\": \"should-end\", \"value\": \"${30 * MINUTES}\"}, {\"key\": \"max-duration\", \"value\": \"\"}, {\"key\": \"alert-events\", \"value\": \"\"}, {\"key\": \"alert-contact\", \"value\": \"\"}, {\"key\": \"notification-msg\", \"value\": \"\"}, {\"key\": \"upstream-apps\", \"value\": \"\"}], \"show_arrows\": true, \"parameters\": [{\"name\": \"oozie.use.system.libpath\", \"value\": true}], \"properties\": []}, \"name\": \"My Workflow\", \"versions\": [\"uri:oozie:workflow:0.4\", \"uri:oozie:workflow:0.4.5\", \"uri:oozie:workflow:0.5\"], \"isDirty\": true, \"movedNode\": null, \"linkMapping\": {\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\": [\"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"], \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": [], \"3f107997-04cc-8733-60a9-a4bb62cebffc\": [\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"], \"17c9c895-5a16-7443-bb81-f34b30b21548\": []}, \"nodeIds\": [\"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"], \"nodes\": [{\"properties\": {}, \"name\": \"Start\", \"children\": [{\"to\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\"}], \"actionParametersFetched\": false, \"type\": \"start-widget\", \"id\": \"3f107997-04cc-8733-60a9-a4bb62cebffc\", \"actionParameters\": []}, {\"properties\": {}, \"name\": \"End\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"end-widget\", \"id\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\", \"actionParameters\": []}, {\"properties\": {\"message\": \"Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]\"}, \"name\": \"Kill\", \"children\": [], \"actionParametersFetched\": false, \"type\": \"kill-widget\", \"id\": \"17c9c895-5a16-7443-bb81-f34b30b21548\", \"actionParameters\": []}, {\"properties\": {\"retry_max\": [{\"value\": \"5\"}], \"files\": [], \"job_xml\": \"\", \"jar_path\": \"my_jar\", \"job_properties\": [{\"name\": \"prop_1_name\", \"value\": \"prop_1_value\"}], \"archives\": [], \"prepares\": [], \"credentials\": [], \"sla\": [{\"key\": \"enabled\", \"value\": false}, {\"key\": \"nominal-time\", \"value\": \"${nominal_time}\"}, {\"key\": \"should-start\", \"value\": \"\"}, {\"key\": \"should-end\", \"value\": \"${30 * MINUTES}\"}, {\"key\": \"max-duration\", \"value\": \"\"}, {\"key\": \"alert-events\", \"value\": \"\"}, {\"key\": \"alert-contact\", \"value\": \"\"}, {\"key\": \"notification-msg\", \"value\": \"\"}, {\"key\": \"upstream-apps\", \"value\": \"\"}]}, \"name\": \"mapreduce-0cf2\", \"children\": [{\"to\": \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\"}, {\"error\": \"17c9c895-5a16-7443-bb81-f34b30b21548\"}], \"actionParametersFetched\": false, \"type\": \"mapreduce-widget\", \"id\": \"0cf2d5d5-2315-0bda-bd53-0eec257e943f\", \"actionParameters\": []}], \"id\": 50019, \"nodeNamesMapping\": {\"0cf2d5d5-2315-0bda-bd53-0eec257e943f\": \"mapreduce-0cf2\", \"33430f0f-ebfa-c3ec-f237-3e77efa03d0a\": \"End\", \"3f107997-04cc-8733-60a9-a4bb62cebffc\": \"Start\", \"17c9c895-5a16-7443-bb81-f34b30b21548\": \"Kill\"}, \"uuid\": \"084f4d4c-00f1-62d2-e27e-e153c1f9acfb\"}}")
assert_equal([
u'<workflow-app', u'name="My_Workflow"', u'xmlns="uri:oozie:workflow:0.5">',
u'<start', u'to="mapreduce-0cf2"/>',
u'<kill', u'name="Kill">', u'<message>Action', u'failed,', u'error', u'message[${wf:errorMessage(wf:lastErrorNode())}]</message>', u'</kill>',
u'<action', u'name="mapreduce-0cf2"', 'retry-max="5">',
u'<map-reduce>',
u'<job-tracker>${jobTracker}</job-tracker>',
u'<name-node>${nameNode}</name-node>',
u'<configuration>',
u'<property>',
u'<name>prop_1_name</name>',
u'<value>prop_1_value</value>',
u'</property>',
u'</configuration>',
u'</map-reduce>',
u'<ok', u'to="End"/>',
u'<error', u'to="Kill"/>',
u'</action>',
u'<end', u'name="End"/>',
u'</workflow-app>'
],
wf.to_xml({'output': '/path'}).split()
)
def test_job_validate_xml_name(self):
job = Workflow()
job.update_name('a')
assert_equal('a', job.validated_name)
job.update_name('aa')
assert_equal('aa', job.validated_name)
job.update_name('%a')
assert_equal('_a', job.validated_name)
job.update_name('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaz')
assert_equal(len('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), len(job.validated_name))
job.update_name('My <...> 1st W$rkflow [With] (Bad) letter$')
assert_equal('My_______1st_W$rkflow__With___Bad__lette', job.validated_name)
class TestUtils():
def setUp(self):
self.wf = Workflow()
def test_gen_workflow_data_from_xml(self):
f = open('apps/oozie/src/oozie/test_data/xslt2/test-workflow.xml')
self.wf.definition = f.read()
node_list = "[{u'node_type': u'start', u'ok_to': u'fork-68d4', u'name': u''}, {u'node_type': u'kill', u'ok_to': u'', u'name': u'Kill'}, {u'path2': u'shell-0f44', u'node_type': u'fork', u'ok_to': u'', u'name': u'fork-68d4', u'path1': u'subworkflow-a13f'}, {u'node_type': u'join', u'ok_to': u'End', u'name': u'join-775e'}, {u'node_type': u'end', u'ok_to': u'', u'name': u'End'}, {u'node_type': u'sub-workflow', u'ok_to': u'join-775e', u'name': u'subworkflow-a13f', u'job_properties': [{u'name': u'hue-id-w', u'value': u'50001'}], u'error_to': u'Kill'}, {u'shell': {u'command': u'ls'}, u'node_type': u'shell', u'ok_to': u'join-775e', u'name': u'shell-0f44', u'error_to': u'Kill'}, {}]"
assert_equal(node_list, str(Workflow.gen_workflow_data_from_xml('admin', self.wf)))
# def test_workflow_name(self):
# try:
# workflow_dict = WORKFLOW_DICT.copy()
# workflow_count = Document.objects.available_docs(Workflow, self.user).count()
#
# workflow_dict['name'][0] = 'bad workflow name'
# response = self.c.post(reverse('oozie:create_workflow'), workflow_dict, follow=True)
# assert_equal(200, response.status_code)
# assert_equal(workflow_count, Document.objects.available_docs(Workflow, self.user).count(), response)
#
# workflow_dict['name'][0] = 'good-workflow-name'
# response = self.c.post(reverse('oozie:create_workflow'), workflow_dict, follow=True)
# assert_equal(200, response.status_code)
# assert_equal(workflow_count + 1, Document.objects.available_docs(Workflow, self.user).count(), response)
# finally:
# name = 'bad workflow name'
# if Workflow.objects.filter(name=name).exists():
# Node.objects.filter(workflow__name=name).delete()
# Workflow.objects.filter(name=name).delete()
# name = 'good-workflow-name'
# if Workflow.objects.filter(name=name).exists():
# Node.objects.filter(workflow__name=name).delete()
# Workflow.objects.filter(name=name).delete()
#
#
# def test_find_parameters(self):
# data = json.dumps({'sla': [
# {'key': 'enabled', 'value': True},
# {'key': 'nominal-time', 'value': '${time}'},]}
# )
# jobs = [Job(name="$a"),
# Job(name="foo ${b} $$"),
# Job(name="${foo}", description="xxx ${food}", data=data)]
#
# result = [find_parameters(job, ['name', 'description', 'sla']) for job in jobs]
# assert_equal(set(["b", "foo", "food", "time"]), reduce(lambda x, y: x | set(y), result, set()))
#
#
# def test_find_all_parameters(self):
# self.wf.data = json.dumps({'sla': [
# {'key': 'enabled', 'value': False},
# {'key': 'nominal-time', 'value': '${time}'},]}
# )
# assert_equal([{'name': u'output', 'value': u''}, {'name': u'SLEEP', 'value': ''}, {'name': u'market', 'value': u'US'}],
# self.wf.find_all_parameters())
#
# self.wf.data = json.dumps({'sla': [
# {'key': 'enabled', 'value': True},
# {'key': 'nominal-time', 'value': '${time}'},]}
# )
# assert_equal([{'name': u'output', 'value': u''}, {'name': u'SLEEP', 'value': ''}, {'name': u'market', 'value': u'US'}, {'name': u'time', 'value': u''}],
# self.wf.find_all_parameters())
#
#
# def test_workflow_has_cycle(self):
# action1 = Node.objects.get(workflow=self.wf, name='action-name-1')
# action3 = Node.objects.get(workflow=self.wf, name='action-name-3')
#
# assert_false(self.wf.has_cycle())
#
# ok = action3.get_link('ok')
# ok.child = action1
# ok.save()
#
# assert_true(self.wf.has_cycle())
#
#
# def test_workflow_gen_xml(self):
# assert_equal(
# '<workflow-app name="wf-name-1" xmlns="uri:oozie:workflow:0.4">\n'
# ' <global>\n'
# ' <job-xml>jobconf.xml</job-xml>\n'
# ' <configuration>\n'
# ' <property>\n'
# ' <name>sleep-all</name>\n'
# ' <value>${SLEEP}</value>\n'
# ' </property>\n'
# ' </configuration>\n'
# ' </global>\n'
# ' <start to="action-name-1"/>\n'
# ' <action name="action-name-1">\n'
# ' <map-reduce>\n'
# ' <job-tracker>${jobTracker}</job-tracker>\n'
# ' <name-node>${nameNode}</name-node>\n'
# ' <prepare>\n'
# ' <delete path="${nameNode}${output}"/>\n'
# ' <mkdir path="${nameNode}/test"/>\n'
# ' </prepare>\n'
# ' <configuration>\n'
# ' <property>\n'
# ' <name>sleep</name>\n'
# ' <value>${SLEEP}</value>\n'
# ' </property>\n'
# ' </configuration>\n'
# ' </map-reduce>\n'
# ' <ok to="action-name-2"/>\n'
# ' <error to="kill"/>\n'
# ' </action>\n'
# ' <action name="action-name-2">\n'
# ' <map-reduce>\n'
# ' <job-tracker>${jobTracker}</job-tracker>\n'
# ' <name-node>${nameNode}</name-node>\n'
# ' <prepare>\n'
# ' <delete path="${nameNode}${output}"/>\n'
# ' <mkdir path="${nameNode}/test"/>\n'
# ' </prepare>\n'
# ' <configuration>\n'
# ' <property>\n'
# ' <name>sleep</name>\n'
# ' <value>${SLEEP}</value>\n'
# ' </property>\n'
# ' </configuration>\n'
# ' </map-reduce>\n'
# ' <ok to="action-name-3"/>\n'
# ' <error to="kill"/>\n'
# ' </action>\n'
# ' <action name="action-name-3">\n'
# ' <map-reduce>\n'
# ' <job-tracker>${jobTracker}</job-tracker>\n'
# ' <name-node>${nameNode}</name-node>\n'
# ' <prepare>\n'
# ' <delete path="${nameNode}${output}"/>\n'
# ' <mkdir path="${nameNode}/test"/>\n'
# ' </prepare>\n'
# ' <configuration>\n'
# ' <property>\n'
# ' <name>sleep</name>\n'
# ' <value>${SLEEP}</value>\n'
# ' </property>\n'
# ' </configuration>\n'
# ' </map-reduce>\n'
# ' <ok to="end"/>\n'
# ' <error to="kill"/>\n'
# ' </action>\n'
# ' <kill name="kill">\n'
# ' <message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>\n'
# ' </kill>\n'
# ' <end name="end"/>\n'
# '</workflow-app>'.split(), self.wf.to_xml({'output': '/path'}).split())
#
#
# def test_workflow_java_gen_xml(self):
# self.wf.node_set.filter(name='action-name-1').delete()
#
# action1 = add_node(self.wf, 'action-name-1', 'java', [self.wf.start], {
# u'name': 'MyTeragen',
# "description":"Generate N number of records",
# "main_class":"org.apache.hadoop.examples.terasort.TeraGen",
# "args":"1000 ${output_dir}/teragen",
# "files":'["my_file","my_file2"]',
# "job_xml":"",
# "java_opts":"-Dexample-property=natty",
# "jar_path":"/user/hue/oozie/workspaces/lib/hadoop-examples.jar",
# "prepares":'[{"value":"/test","type":"mkdir"}]',
# "archives":'[{"dummy":"","name":"my_archive"},{"dummy":"","name":"my_archive2"}]',
# "capture_output": "on",
# })
# Link(parent=action1, child=self.wf.end, name="ok").save()
#
# xml = self.wf.to_xml({'output_dir': '/path'})
#
# assert_true("""
# <action name="MyTeragen">
# <java>
# <job-tracker>${jobTracker}</job-tracker>
# <name-node>${nameNode}</name-node>
# <prepare>
# <mkdir path="${nameNode}/test"/>
# </prepare>
# <main-class>org.apache.hadoop.examples.terasort.TeraGen</main-class>
# <java-opts>-Dexample-property=natty</java-opts>
# <arg>1000</arg>
# <arg>${output_dir}/teragen</arg>
# <file>my_file#my_file</file>
# <file>my_file2#my_file2</file>
# <archive>my_archive#my_archive</archive>
# <archive>my_archive2#my_archive2</archive>
# <capture-output/>
# </java>
# <ok to="end"/>
# <error to="kill"/>
# </action>""" in xml, xml)
#
#
# def test_workflow_streaming_gen_xml(self):
# self.wf.node_set.filter(name='action-name-1').delete()
#
# action1 = add_node(self.wf, 'action-name-1', 'streaming', [self.wf.start], {
# u'name': 'MyStreaming',
# "description": "Generate N number of records",
# "main_class": "org.apache.hadoop.examples.terasort.TeraGen",
# "mapper": "MyMapper",
# "reducer": "MyReducer",
# "files": '["my_file"]',
# "archives":'[{"dummy":"","name":"my_archive"}]',
# })
# Link(parent=action1, child=self.wf.end, name="ok").save()
#
# xml = self.wf.to_xml()
#
# assert_true("""
# <action name="MyStreaming">
# <map-reduce>
# <job-tracker>${jobTracker}</job-tracker>
# <name-node>${nameNode}</name-node>
# <streaming>
# <mapper>MyMapper</mapper>
# <reducer>MyReducer</reducer>
# </streaming>
# <file>my_file#my_file</file>
# <archive>my_archive#my_archive</archive>
# </map-reduce>
# <ok to="end"/>
# <error to="kill"/>
# </action>""" in xml, xml)
#
#
# def test_workflow_shell_gen_xml(self):
# self.wf.node_set.filter(name='action-name-1').delete()
#
# action1 = add_node(self.wf, 'action-name-1', 'shell', [self.wf.start], {
# u'job_xml': 'my-job.xml',
# u'files': '["hello.py"]',
# u'name': 'Shell',
# u'job_properties': '[]',
# u'capture_output': 'on',
# u'command': 'hello.py',
# u'archives': '[]',
# u'prepares': '[]',
# u'params': '[{"value":"World!","type":"argument"}]',
# u'description': 'Execute a Python script printing its arguments'
# })
# Link(parent=action1, child=self.wf.end, name="ok").save()
#
# xml = self.wf.to_xml()
#
# assert_true("""
# <shell xmlns="uri:oozie:shell-action:0.1">
# <job-tracker>${jobTracker}</job-tracker>
# <name-node>${nameNode}</name-node>
# <job-xml>my-job.xml</job-xml>
# <exec>hello.py</exec>
# <argument>World!</argument>
# <file>hello.py#hello.py</file>
# <capture-output/>
# </shell>""" in xml, xml)
#
# action1.capture_output = False
# action1.save()
#
# xml = self.wf.to_xml()
#
# assert_true("""
# <shell xmlns="uri:oozie:shell-action:0.1">
# <job-tracker>${jobTracker}</job-tracker>
# <name-node>${nameNode}</name-node>
# <job-xml>my-job.xml</job-xml>
# <exec>hello.py</exec>
# <argument>World!</argument>
# <file>hello.py#hello.py</file>
# </shell>""" in xml, xml)
#
#
# def test_workflow_fs_gen_xml(self):
# self.wf.node_set.filter(name='action-name-1').delete()
#
# action1 = add_node(self.wf, 'action-name-1', 'fs', [self.wf.start], {
# u'name': 'MyFs',
# u'description': 'Execute a Fs action that manage files',
# u'deletes': '[{"name":"/to/delete"},{"name":"to/delete2"}]',
# u'mkdirs': '[{"name":"/to/mkdir"},{"name":"${mkdir2}"}]',
# u'moves': '[{"source":"/to/move/source","destination":"/to/move/destination"},{"source":"/to/move/source2","destination":"/to/move/destination2"}]',
# u'chmods': '[{"path":"/to/chmod","recursive":true,"permissions":"-rwxrw-rw-"},{"path":"/to/chmod2","recursive":false,"permissions":"755"}]',
# u'touchzs': '[{"name":"/to/touchz"},{"name":"/to/touchz2"}]'
# })
# Link(parent=action1, child=self.wf.end, name="ok").save()
#
# xml = self.wf.to_xml({'mkdir2': '/path'})
#
# assert_true("""
# <action name="MyFs">
# <fs>
# <delete path='${nameNode}/to/delete'/>
# <delete path='${nameNode}/user/${wf:user()}/to/delete2'/>
# <mkdir path='${nameNode}/to/mkdir'/>
# <mkdir path='${nameNode}${mkdir2}'/>
# <move source='${nameNode}/to/move/source' target='${nameNode}/to/move/destination'/>
# <move source='${nameNode}/to/move/source2' target='${nameNode}/to/move/destination2'/>
# <chmod path='${nameNode}/to/chmod' permissions='-rwxrw-rw-' dir-files='true'/>
# <chmod path='${nameNode}/to/chmod2' permissions='755' dir-files='false'/>
# <touchz path='${nameNode}/to/touchz'/>
# <touchz path='${nameNode}/to/touchz2'/>
# </fs>
# <ok to="end"/>
# <error to="kill"/>
# </action>""" in xml, xml)
#
#
# def test_workflow_email_gen_xml(self):
# self.wf.node_set.filter(name='action-name-1').delete()
#
# action1 = add_node(self.wf, 'action-name-1', 'email', [self.wf.start], {
# u'name': 'MyEmail',
# u'description': 'Execute an Email action',
# u'to': 'hue@hue.org,django@python.org',
# u'cc': '',
# u'subject': 'My subject',
# u'body': 'My body'
# })
# Link(parent=action1, child=self.wf.end, name="ok").save()
#
# xml = self.wf.to_xml()
#
# assert_true("""
# <action name="MyEmail">
# <email xmlns="uri:oozie:email-action:0.1">
# <to>hue@hue.org,django@python.org</to>
# <subject>My subject</subject>
# <body>My body</body>
# </email>
# <ok to="end"/>
# <error to="kill"/>
# </action>""" in xml, xml)
#
# action1.cc = 'lambda@python.org'
# action1.save()
#
# xml = self.wf.to_xml()
#
# assert_true("""
# <action name="MyEmail">
# <email xmlns="uri:oozie:email-action:0.1">
# <to>hue@hue.org,django@python.org</to>
# <cc>lambda@python.org</cc>
# <subject>My subject</subject>
# <body>My body</body>
# </email>
# <ok to="end"/>
# <error to="kill"/>
# </action>""" in xml, xml)
#
#
# def test_workflow_subworkflow_gen_xml(self):
# self.wf.node_set.filter(name='action-name-1').delete()
#
# wf_dict = WORKFLOW_DICT.copy()
# wf_dict['name'] = [u'wf-name-2']
# wf2 = create_workflow(self.c, self.user, wf_dict)
#
# action1 = add_node(self.wf, 'action-name-1', 'subworkflow', [self.wf.start], {
# u'name': 'MySubworkflow',
# u'description': 'Execute a subworkflow action',
# u'sub_workflow': wf2,
# u'propagate_configuration': True,
# u'job_properties': '[{"value":"World!","name":"argument"}]'
# })
# Link(parent=action1, child=self.wf.end, name="ok").save()
#
# xml = self.wf.to_xml()
#
# assert_true(re.search(
# '<sub-workflow>\W+'
# '<app-path>\${nameNode}/user/hue/oozie/workspaces/_test_-oozie-(.+?)</app-path>\W+'
# '<propagate-configuration/>\W+'
# '<configuration>\W+'
# '<property>\W+'
# '<name>argument</name>\W+'
# '<value>World!</value>\W+'
# '</property>\W+'
# '</configuration>\W+'
# '</sub-workflow>', xml, re.MULTILINE), xml)
#
# wf2.delete(skip_trash=True)
#
# def test_workflow_flatten_list(self):
# assert_equal('[<Start: start>, <Mapreduce: action-name-1>, <Mapreduce: action-name-2>, <Mapreduce: action-name-3>, '
# '<Kill: kill>, <End: end>]',
# str(self.wf.node_list))
#
# # 1 2
# # 3
# self.setup_forking_workflow()
#
# assert_equal('[<Start: start>, <Fork: fork-name-1>, <Mapreduce: action-name-1>, <Mapreduce: action-name-2>, '
# '<Join: join-name-1>, <Mapreduce: action-name-3>, <Kill: kill>, <End: end>]',
# str(self.wf.node_list))
#
#
# def test_workflow_generic_gen_xml(self):
# self.wf.node_set.filter(name='action-name-1').delete()
#
# action1 = add_node(self.wf, 'action-name-1', 'generic', [self.wf.start], {
# u'name': 'Generic',
# u'description': 'Execute a Generic email action',
# u'xml': """
# <email xmlns="uri:oozie:email-action:0.1">
# <to>hue@hue.org,django@python.org</to>
# <subject>My subject</subject>
# <body>My body</body>
# </email>""",
# })
# Link(parent=action1, child=self.wf.end, name="ok").save()
#
# xml = self.wf.to_xml()
#
# assert_true("""
# <action name="Generic">
# <email xmlns="uri:oozie:email-action:0.1">
# <to>hue@hue.org,django@python.org</to>
# <subject>My subject</subject>
# <body>My body</body>
# </email>
# <ok to="end"/>
# <error to="kill"/>
# </action>""" in xml, xml)
#
#
# def test_workflow_hive_gen_xml(self):
# self.wf.node_set.filter(name='action-name-1').delete()
#
# action1 = add_node(self.wf, 'action-name-1', 'hive', [self.wf.start], {
# u'job_xml': 'my-job.xml',
# u'files': '["hello.py"]',
# u'name': 'MyHive',
# u'job_properties': '[]',
# u'script_path': 'hello.sql',
# u'archives': '[]',
# u'prepares': '[]',
# u'params': '[{"value":"World!","type":"argument"}]',
# u'description': ''
# })
# Link(parent=action1, child=self.wf.end, name="ok").save()
#
# xml = self.wf.to_xml()
#
# assert_true("""
#<workflow-app name="wf-name-1" xmlns="uri:oozie:workflow:0.4">
# <global>
# <job-xml>jobconf.xml</job-xml>
# <configuration>
# <property>
# <name>sleep-all</name>
# <value>${SLEEP}</value>
# </property>
# </configuration>
# </global>
# <start to="MyHive"/>
# <action name="MyHive">
# <hive xmlns="uri:oozie:hive-action:0.2">
# <job-tracker>${jobTracker}</job-tracker>
# <name-node>${nameNode}</name-node>
# <job-xml>my-job.xml</job-xml>
# <script>hello.sql</script>
# <argument>World!</argument>
# <file>hello.py#hello.py</file>
# </hive>
# <ok to="end"/>
# <error to="kill"/>
# </action>
# <kill name="kill">
# <message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
# </kill>
# <end name="end"/>
#</workflow-app>""" in xml, xml)
#
# import beeswax
# from beeswax.tests import hive_site_xml
#
# tmpdir = tempfile.mkdtemp()
# saved = None
# try:
# # We just replace the Beeswax conf variable
# class Getter(object):
# def get(self):
# return tmpdir
#
# xml = hive_site_xml(is_local=False, use_sasl=True, kerberos_principal='hive/_HOST@test.com')
# file(os.path.join(tmpdir, 'hive-site.xml'), 'w').write(xml)
#
# beeswax.hive_site.reset()
# saved = beeswax.conf.HIVE_CONF_DIR
# beeswax.conf.HIVE_CONF_DIR = Getter()
#
# action1 = Node.objects.get(workflow=self.wf, name='MyHive')
# action1.credentials = [{'name': 'hcat', 'value': True}, {'name': 'hbase', 'value': False}, {'name': 'hive2', 'value': True}]
# action1.save()
#
# xml = self.wf.to_xml(mapping={
# 'credentials': {
# 'hcat': {
# 'xml_name': 'hcat',
# 'properties': [
# ('hcat.metastore.uri', 'thrift://hue-koh-chang:9999'),
# ('hcat.metastore.principal', 'hive')
# ]
# },
# 'hive2': {
# 'xml_name': 'hive2',
# 'properties': [
# ('hive2.jdbc.url', 'jdbc:hive2://hue-koh-chang:8888'),
# ('hive2.server.principal', 'hive')
# ]
# }
# }
# }
# )
#
# assert_true("""
#<workflow-app name="wf-name-1" xmlns="uri:oozie:workflow:0.4">
# <global>
# <job-xml>jobconf.xml</job-xml>
# <configuration>
# <property>
# <name>sleep-all</name>
# <value>${SLEEP}</value>
# </property>
# </configuration>
# </global>
# <credentials>
# <credential name="hcat" type="hcat">
# <property>
# <name>hcat.metastore.uri</name>
# <value>thrift://hue-koh-chang:9999</value>
# </property>
# <property>
# <name>hcat.metastore.principal</name>
# <value>hive</value>
# </property>
# </credential>
# <credential name="hive2" type="hive2">
# <property>
# <name>hive2.jdbc.url</name>
# <value>jdbc:hive2://hue-koh-chang:8888</value>
# </property>
# <property>
# <name>hive2.server.principal</name>
# <value>hive</value>
# </property>
# </credential>
# </credentials>
# <start to="MyHive"/>
# <action name="MyHive" cred="hcat,hive2">
# <hive xmlns="uri:oozie:hive-action:0.2">
# <job-tracker>${jobTracker}</job-tracker>
# <name-node>${nameNode}</name-node>
# <job-xml>my-job.xml</job-xml>
# <script>hello.sql</script>
# <argument>World!</argument>
# <file>hello.py#hello.py</file>
# </hive>
# <ok to="end"/>
# <error to="kill"/>
# </action>
# <kill name="kill">
# <message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
# </kill>
# <end name="end"/>
#</workflow-app>""" in xml, xml)
#
# finally:
# beeswax.hive_site.reset()
# if saved is not None:
# beeswax.conf.HIVE_CONF_DIR = saved
# shutil.rmtree(tmpdir)
#
# self.wf.node_set.filter(name='action-name-1').delete()
#
#
# def test_workflow_gen_workflow_sla(self):
# xml = self.wf.to_xml({'output': '/path'})
# assert_false('<sla' in xml, xml)
# assert_false('xmlns="uri:oozie:workflow:0.5"' in xml, xml)
# assert_false('xmlns:sla="uri:oozie:sla:0.2"' in xml, xml)
#
# sla = self.wf.sla
# sla[0]['value'] = True
# sla[1]['value'] = 'now' # nominal-time
# sla[3]['value'] = '${ 10 * MINUTES}' # should-end
# self.wf.sla = sla
# self.wf.save()
#
# xml = self.wf.to_xml({'output': '/path'})
# assert_true('xmlns="uri:oozie:workflow:0.5"' in xml, xml)
# assert_true('xmlns:sla="uri:oozie:sla:0.2"' in xml, xml)
# assert_true("""<end name="end"/>
# <sla:info>
# <sla:nominal-time>now</sla:nominal-time>
# <sla:should-end>${ 10 * MINUTES}</sla:should-end>
# </sla:info>
#</workflow-app>""" in xml, xml)
#
#
# def test_workflow_gen_action_sla(self):
# xml = self.wf.to_xml({'output': '/path'})
# assert_false('<sla' in xml, xml)
# assert_false('xmlns="uri:oozie:workflow:0.5"' in xml, xml)
# assert_false('xmlns:sla="uri:oozie:sla:0.2"' in xml, xml)
#
# self.wf.node_set.filter(name='action-name-1').delete()
#
# action1 = add_node(self.wf, 'action-name-1', 'hive', [self.wf.start], {
# u'job_xml': 'my-job.xml',
# u'files': '["hello.py"]',
# u'name': 'MyHive',
# u'job_properties': '[]',
# u'script_path': 'hello.sql',
# u'archives': '[]',
# u'prepares': '[]',
# u'params': '[{"value":"World!","type":"argument"}]',
# u'description': ''
# })
# Link(parent=action1, child=self.wf.end, name="ok").save()
#
# xml = self.wf.to_xml()
#
# sla = action1.sla
# sla[0]['value'] = True
# sla[1]['value'] = 'now' # nominal-time
# sla[3]['value'] = '${ 10 * MINUTES}' # should-end
# action1.sla = sla
# action1.save()
#
# xml = self.wf.to_xml({'output': '/path'})
# assert_true('xmlns="uri:oozie:workflow:0.5"' in xml, xml)
# assert_true('xmlns:sla="uri:oozie:sla:0.2"' in xml, xml)
# assert_true("""<error to="kill"/>
# <sla:info>
# <sla:nominal-time>now</sla:nominal-time>
# <sla:should-end>${ 10 * MINUTES}</sla:should-end>
# </sla:info>
# </action>""" in xml, xml)
| 48.095675 | 8,323 | 0.55198 |
2b80025269d014114bb0c0c22c74e1ef00685887 | 3,220 | py | Python | facebook_business/adobjects/shop.py | GDGSNF/facebook-python-business-sdk | 95e64a10d987d7a53963d17036b6730d07f84ab5 | [
"CNRI-Python"
] | 576 | 2018-05-01T19:09:32.000Z | 2022-03-31T11:45:11.000Z | facebook_business/adobjects/shop.py | GDGSNF/facebook-python-business-sdk | 95e64a10d987d7a53963d17036b6730d07f84ab5 | [
"CNRI-Python"
] | 217 | 2018-05-03T07:31:59.000Z | 2022-03-29T14:19:52.000Z | facebook_business/adobjects/shop.py | GDGSNF/facebook-python-business-sdk | 95e64a10d987d7a53963d17036b6730d07f84ab5 | [
"CNRI-Python"
] | 323 | 2018-05-01T20:32:26.000Z | 2022-03-29T07:05:12.000Z | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class Shop(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isShop = True
super(Shop, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
fb_sales_channel = 'fb_sales_channel'
id = 'id'
ig_sales_channel = 'ig_sales_channel'
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=Shop,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'fb_sales_channel': 'Object',
'id': 'string',
'ig_sales_channel': 'Object',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| 36.179775 | 103 | 0.695963 |
0f50b2de440707e50fc0f3e64567409763c0748a | 2,048 | py | Python | templateReader.py | adamrees89/PythonExcelReader | f51dbf4ab9fd0c9e2cfd43a2ec25f8312f5ecf19 | [
"MIT"
] | 1 | 2019-11-13T05:36:58.000Z | 2019-11-13T05:36:58.000Z | templateReader.py | adamrees89/PythonExcelReader | f51dbf4ab9fd0c9e2cfd43a2ec25f8312f5ecf19 | [
"MIT"
] | 3 | 2017-08-22T08:06:57.000Z | 2021-07-13T08:34:07.000Z | templateReader.py | adamrees89/PythonExcelReader | f51dbf4ab9fd0c9e2cfd43a2ec25f8312f5ecf19 | [
"MIT"
] | null | null | null | import sqlite3
import openpyxl
import sys
import os
#Create the cell function
def templateCell(s,sn,col,r):
conn = sqlite3.connect("template.db")
c = conn.cursor()
cellRef = col + r
val = s[cellRef].value
fname = s[cellRef].font.name
fsize = s[cellRef].font.size
fbold = int(s[cellRef].font.bold == 'true')
fital = int(s[cellRef].font.italic == 'true')
ccolour = s[cellRef].fill.start_color.index
data = [cellRef, val, fname, fsize, fbold, fital, ccolour]
c.execute("INSERT INTO "+sn+" VALUES (?,?,?,?,?,?,?)", data)
conn.commit()
conn.close()
'''This function will call the template cell class from its method,
and from init will create the sql table'''
def templateSheet(s):
conn = sqlite3.connect("template.db")
c = conn.cursor()
sn = s.title
sn = sn.replace(" ", "").replace("+", "").replace("-", "")\
.replace("/", "").replace("_", "").replace("&", "")\
.replace("%", "")
column2 = 'Cell'
column3 = 'Value'
column4 = 'Font_Name'
column5 = 'Font_Size'
column6 = 'Font_Bold'
column7 = 'Font_Italic'
column8 = 'Cell_Colour'
fieldtype1 = 'INTEGER'
fieldtype2 = 'TEXT'
try:
c.execute('CREATE TABLE {tn}({c2}{ft2}, {c3}{ft2}, {c4}{ft2},'
' {c5}{ft2}, {c6}{ft1}, {c7}{ft1}, {c8}{ft1})'
.format(tn=sn, ft1=fieldtype1, ft2=fieldtype2, c2=column2,
c3=column3, c4=column4, c5=column5, c6=column6, c7=column7,
c8=column8))
except sqlite3.Error:
print('There was a problem with the sql database, is the'
' database already open, or does the sheet already exist?')
sys.exit(5)
ExtentRow = s.max_row
ExtentColumn = s.max_column
rows = list(range(1, ExtentRow))
column = list(range(1, ExtentColumn))
for co in column:
coL = openpyxl.utils.get_column_letter(co)
for ro in rows:
templateCell(s, sn, str(coL), str(ro))
conn.commit()
conn.close()
| 33.032258 | 71 | 0.57959 |
25c01a732ea1c72fe6c1435e910e1e5705997771 | 2,414 | py | Python | src/cybersource/settings.py | thelabnyc/django-oscar-cybersource | 510ecdc045edcf93ff7a62a120cb1eeaa56f40a9 | [
"0BSD"
] | 3 | 2016-06-18T01:37:50.000Z | 2021-02-08T04:07:11.000Z | src/cybersource/settings.py | thelabnyc/django-oscar-cybersource | 510ecdc045edcf93ff7a62a120cb1eeaa56f40a9 | [
"0BSD"
] | 24 | 2019-12-04T21:37:21.000Z | 2022-03-11T23:15:43.000Z | src/cybersource/settings.py | thelabnyc/django-oscar-cybersource | 510ecdc045edcf93ff7a62a120cb1eeaa56f40a9 | [
"0BSD"
] | 3 | 2016-05-31T10:02:30.000Z | 2017-09-01T10:55:20.000Z | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import warnings
def overridable(name, default=None, required=False):
if required:
if not hasattr(settings, name) or not getattr(settings, name):
raise ImproperlyConfigured("%s must be defined in Django settings" % name)
return getattr(settings, name, default)
DEFAULT_CURRENCY = overridable("OSCAR_DEFAULT_CURRENCY", required=True)
PROFILE = overridable("CYBERSOURCE_PROFILE")
ACCESS = overridable("CYBERSOURCE_ACCESS")
SECRET = overridable("CYBERSOURCE_SECRET")
if PROFILE:
warnings.warn(
"CYBERSOURCE_PROFILE setting is deprecated. Use cybersource.SecureAcceptanceProfile model instead.",
DeprecationWarning,
)
if ACCESS:
warnings.warn(
"CYBERSOURCE_ACCESS setting is deprecated. Use cybersource.SecureAcceptanceProfile model instead.",
DeprecationWarning,
)
if SECRET:
warnings.warn(
"CYBERSOURCE_SECRET setting is deprecated. Use cybersource.SecureAcceptanceProfile model instead.",
DeprecationWarning,
)
ORG_ID = overridable("CYBERSOURCE_ORG_ID", required=True)
MERCHANT_ID = overridable("CYBERSOURCE_MERCHANT_ID")
CYBERSOURCE_SOAP_KEY = overridable("CYBERSOURCE_SOAP_KEY")
CYBERSOURCE_WSDL = overridable(
"CYBERSOURCE_WSDL",
"https://ics2wstesta.ic3.com/commerce/1.x/transactionProcessor/CyberSourceTransaction_1.155.wsdl",
)
REDIRECT_PENDING = overridable("CYBERSOURCE_REDIRECT_PENDING", required=True)
REDIRECT_SUCCESS = overridable("CYBERSOURCE_REDIRECT_SUCCESS", required=True)
REDIRECT_FAIL = overridable("CYBERSOURCE_REDIRECT_FAIL", required=True)
ENDPOINT_PAY = overridable(
"CYBERSOURCE_ENDPOINT_PAY",
"https://testsecureacceptance.cybersource.com/silent/pay",
)
DATE_FORMAT = overridable("CYBERSOURCE_DATE_FORMAT", "%Y-%m-%dT%H:%M:%SZ")
LOCALE = overridable("CYBERSOURCE_LOCALE", "en")
FINGERPRINT_PROTOCOL = overridable("CYBERSOURCE_FINGERPRINT_PROTOCOL", "https")
FINGERPRINT_HOST = overridable("CYBERSOURCE_FINGERPRINT_HOST", "h.online-metrix.net")
SOURCE_TYPE = overridable("CYBERSOURCE_SOURCE_TYPE", "Cybersource Secure Acceptance")
DECISION_MANAGER_KEYS = overridable("CYBERSOURCE_DECISION_MANAGER_KEYS", [])
SHIPPING_METHOD_DEFAULT = overridable("CYBERSOURCE_SHIPPING_METHOD_DEFAULT", "none")
SHIPPING_METHOD_MAPPING = overridable("CYBERSOURCE_SHIPPING_METHOD_MAPPING", {})
| 38.31746 | 108 | 0.783761 |
f00756f9f48fc1af2dbe3b1998483a17f5014ff7 | 258 | py | Python | neurokit2/eeg/__init__.py | 1110sillabo/NeuroKit | b12315465db559ef0228470a4c31b85371775896 | [
"MIT"
] | 1 | 2021-11-14T21:18:43.000Z | 2021-11-14T21:18:43.000Z | neurokit2/eeg/__init__.py | 1110sillabo/NeuroKit | b12315465db559ef0228470a4c31b85371775896 | [
"MIT"
] | null | null | null | neurokit2/eeg/__init__.py | 1110sillabo/NeuroKit | b12315465db559ef0228470a4c31b85371775896 | [
"MIT"
] | 1 | 2021-11-14T21:18:48.000Z | 2021-11-14T21:18:48.000Z | """Submodule for NeuroKit."""
from .mne_channel_add import mne_channel_add
from .mne_channel_extract import mne_channel_extract
from .mne_to_df import mne_to_df, mne_to_dict
__all__ = ["mne_channel_add", "mne_channel_extract", "mne_to_df", "mne_to_dict"]
| 28.666667 | 80 | 0.806202 |
8e5d7523e5de4613b5226ef151ee10aa818b58ac | 434 | py | Python | demo.py | tomviner/parkrun-tools | 43d3b2937813e836ab40af2d1549dde2760388cd | [
"MIT"
] | 1 | 2019-09-17T07:10:35.000Z | 2019-09-17T07:10:35.000Z | demo.py | tomviner/parkrun-tools | 43d3b2937813e836ab40af2d1549dde2760388cd | [
"MIT"
] | null | null | null | demo.py | tomviner/parkrun-tools | 43d3b2937813e836ab40af2d1549dde2760388cd | [
"MIT"
] | null | null | null | import stackprinter
import parkrun
stackprinter.set_excepthook()
inputs = {
'hh': 'hh-2019-05-11-tabs.txt',
'tw': 'tw-2019-06-01-tabs.txt',
}
for infile in inputs.values():
results = parkrun.importResults(
infile, skiprows=12, skipfooter=38, report=True
)
parkrun.print_stats(results)
# parkrun.time_hist(results, style='ggplot').show()
parkrun.ageGrade_hist(results, style='ggplot').show()
| 19.727273 | 57 | 0.677419 |
c87471ba657cc0f976b159996aefdf8dab5e6d99 | 22,405 | py | Python | references/classification/train.py | nlgranger/vision | eba853e1089ee6235e3717344e8ffde445058406 | [
"BSD-3-Clause"
] | 2 | 2022-01-28T08:27:25.000Z | 2022-01-28T08:27:38.000Z | references/classification/train.py | nlgranger/vision | eba853e1089ee6235e3717344e8ffde445058406 | [
"BSD-3-Clause"
] | null | null | null | references/classification/train.py | nlgranger/vision | eba853e1089ee6235e3717344e8ffde445058406 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import os
import time
import warnings
import presets
import torch
import torch.utils.data
import torchvision
import transforms
import utils
from sampler import RASampler
from torch import nn
from torch.utils.data.dataloader import default_collate
from torchvision.transforms.functional import InterpolationMode
try:
from torchvision import prototype
except ImportError:
prototype = None
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args, model_ema=None, scaler=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}"))
metric_logger.add_meter("img/s", utils.SmoothedValue(window_size=10, fmt="{value}"))
header = f"Epoch: [{epoch}]"
for i, (image, target) in enumerate(metric_logger.log_every(data_loader, args.print_freq, header)):
start_time = time.time()
image, target = image.to(device), target.to(device)
with torch.cuda.amp.autocast(enabled=scaler is not None):
output = model(image)
loss = criterion(output, target)
optimizer.zero_grad()
if scaler is not None:
scaler.scale(loss).backward()
if args.clip_grad_norm is not None:
# we should unscale the gradients of optimizer's assigned params if do gradient clipping
scaler.unscale_(optimizer)
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if args.clip_grad_norm is not None:
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
optimizer.step()
if model_ema and i % args.model_ema_steps == 0:
model_ema.update_parameters(model)
if epoch < args.lr_warmup_epochs:
# Reset ema buffer to keep copying weights during warmup period
model_ema.n_averaged.fill_(0)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
metric_logger.meters["img/s"].update(batch_size / (time.time() - start_time))
def evaluate(model, criterion, data_loader, device, print_freq=100, log_suffix=""):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = f"Test: {log_suffix}"
num_processed_samples = 0
with torch.inference_mode():
for image, target in metric_logger.log_every(data_loader, print_freq, header):
image = image.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(image)
loss = criterion(output, target)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
num_processed_samples += batch_size
# gather the stats from all processes
num_processed_samples = utils.reduce_across_processes(num_processed_samples)
if (
hasattr(data_loader.dataset, "__len__")
and len(data_loader.dataset) != num_processed_samples
and torch.distributed.get_rank() == 0
):
# See FIXME above
warnings.warn(
f"It looks like the dataset has {len(data_loader.dataset)} samples, but {num_processed_samples} "
"samples were used for the validation, which might bias the results. "
"Try adjusting the batch size and / or the world size. "
"Setting the world size to 1 is always a safe bet."
)
metric_logger.synchronize_between_processes()
print(f"{header} Acc@1 {metric_logger.acc1.global_avg:.3f} Acc@5 {metric_logger.acc5.global_avg:.3f}")
return metric_logger.acc1.global_avg
def _get_cache_path(filepath):
import hashlib
h = hashlib.sha1(filepath.encode()).hexdigest()
cache_path = os.path.join("~", ".torch", "vision", "datasets", "imagefolder", h[:10] + ".pt")
cache_path = os.path.expanduser(cache_path)
return cache_path
def load_data(traindir, valdir, args):
# Data loading code
print("Loading data")
val_resize_size, val_crop_size, train_crop_size = args.val_resize_size, args.val_crop_size, args.train_crop_size
interpolation = InterpolationMode(args.interpolation)
print("Loading training data")
st = time.time()
cache_path = _get_cache_path(traindir)
if args.cache_dataset and os.path.exists(cache_path):
# Attention, as the transforms are also cached!
print(f"Loading dataset_train from {cache_path}")
dataset, _ = torch.load(cache_path)
else:
auto_augment_policy = getattr(args, "auto_augment", None)
random_erase_prob = getattr(args, "random_erase", 0.0)
dataset = torchvision.datasets.ImageFolder(
traindir,
presets.ClassificationPresetTrain(
crop_size=train_crop_size,
interpolation=interpolation,
auto_augment_policy=auto_augment_policy,
random_erase_prob=random_erase_prob,
),
)
if args.cache_dataset:
print(f"Saving dataset_train to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset, traindir), cache_path)
print("Took", time.time() - st)
print("Loading validation data")
cache_path = _get_cache_path(valdir)
if args.cache_dataset and os.path.exists(cache_path):
# Attention, as the transforms are also cached!
print(f"Loading dataset_test from {cache_path}")
dataset_test, _ = torch.load(cache_path)
else:
if not args.prototype:
preprocessing = presets.ClassificationPresetEval(
crop_size=val_crop_size, resize_size=val_resize_size, interpolation=interpolation
)
else:
if args.weights:
weights = prototype.models.get_weight(args.weights)
preprocessing = weights.transforms()
else:
preprocessing = prototype.transforms.ImageNetEval(
crop_size=val_crop_size, resize_size=val_resize_size, interpolation=interpolation
)
dataset_test = torchvision.datasets.ImageFolder(
valdir,
preprocessing,
)
if args.cache_dataset:
print(f"Saving dataset_test to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset_test, valdir), cache_path)
print("Creating data loaders")
if args.distributed:
if args.ra_sampler:
train_sampler = RASampler(dataset, shuffle=True, repetitions=args.ra_reps)
else:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
return dataset, dataset_test, train_sampler, test_sampler
def main(args):
if args.prototype and prototype is None:
raise ImportError("The prototype module couldn't be found. Please install the latest torchvision nightly.")
if not args.prototype and args.weights:
raise ValueError("The weights parameter works only in prototype mode. Please pass the --prototype argument.")
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
if args.use_deterministic_algorithms:
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
train_dir = os.path.join(args.data_path, "train")
val_dir = os.path.join(args.data_path, "val")
dataset, dataset_test, train_sampler, test_sampler = load_data(train_dir, val_dir, args)
collate_fn = None
num_classes = len(dataset.classes)
mixup_transforms = []
if args.mixup_alpha > 0.0:
mixup_transforms.append(transforms.RandomMixup(num_classes, p=1.0, alpha=args.mixup_alpha))
if args.cutmix_alpha > 0.0:
mixup_transforms.append(transforms.RandomCutmix(num_classes, p=1.0, alpha=args.cutmix_alpha))
if mixup_transforms:
mixupcutmix = torchvision.transforms.RandomChoice(mixup_transforms)
collate_fn = lambda batch: mixupcutmix(*default_collate(batch)) # noqa: E731
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=train_sampler,
num_workers=args.workers,
pin_memory=True,
collate_fn=collate_fn,
)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=args.batch_size, sampler=test_sampler, num_workers=args.workers, pin_memory=True
)
print("Creating model")
if not args.prototype:
model = torchvision.models.__dict__[args.model](pretrained=args.pretrained, num_classes=num_classes)
else:
model = prototype.models.__dict__[args.model](weights=args.weights, num_classes=num_classes)
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
criterion = nn.CrossEntropyLoss(label_smoothing=args.label_smoothing)
if args.norm_weight_decay is None:
parameters = model.parameters()
else:
param_groups = torchvision.ops._utils.split_normalization_params(model)
wd_groups = [args.norm_weight_decay, args.weight_decay]
parameters = [{"params": p, "weight_decay": w} for p, w in zip(param_groups, wd_groups) if p]
opt_name = args.opt.lower()
if opt_name.startswith("sgd"):
optimizer = torch.optim.SGD(
parameters,
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov="nesterov" in opt_name,
)
elif opt_name == "rmsprop":
optimizer = torch.optim.RMSprop(
parameters, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, eps=0.0316, alpha=0.9
)
elif opt_name == "adamw":
optimizer = torch.optim.AdamW(parameters, lr=args.lr, weight_decay=args.weight_decay)
else:
raise RuntimeError(f"Invalid optimizer {args.opt}. Only SGD, RMSprop and AdamW are supported.")
scaler = torch.cuda.amp.GradScaler() if args.amp else None
args.lr_scheduler = args.lr_scheduler.lower()
if args.lr_scheduler == "steplr":
main_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
elif args.lr_scheduler == "cosineannealinglr":
main_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=args.epochs - args.lr_warmup_epochs
)
elif args.lr_scheduler == "exponentiallr":
main_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.lr_gamma)
else:
raise RuntimeError(
f"Invalid lr scheduler '{args.lr_scheduler}'. Only StepLR, CosineAnnealingLR and ExponentialLR "
"are supported."
)
if args.lr_warmup_epochs > 0:
if args.lr_warmup_method == "linear":
warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=args.lr_warmup_decay, total_iters=args.lr_warmup_epochs
)
elif args.lr_warmup_method == "constant":
warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer, factor=args.lr_warmup_decay, total_iters=args.lr_warmup_epochs
)
else:
raise RuntimeError(
f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported."
)
lr_scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer, schedulers=[warmup_lr_scheduler, main_lr_scheduler], milestones=[args.lr_warmup_epochs]
)
else:
lr_scheduler = main_lr_scheduler
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
model_ema = None
if args.model_ema:
# Decay adjustment that aims to keep the decay independent from other hyper-parameters originally proposed at:
# https://github.com/facebookresearch/pycls/blob/f8cd9627/pycls/core/net.py#L123
#
# total_ema_updates = (Dataset_size / n_GPUs) * epochs / (batch_size_per_gpu * EMA_steps)
# We consider constant = Dataset_size for a given dataset/setup and ommit it. Thus:
# adjust = 1 / total_ema_updates ~= n_GPUs * batch_size_per_gpu * EMA_steps / epochs
adjust = args.world_size * args.batch_size * args.model_ema_steps / args.epochs
alpha = 1.0 - args.model_ema_decay
alpha = min(1.0, alpha * adjust)
model_ema = utils.ExponentialMovingAverage(model_without_ddp, device=device, decay=1.0 - alpha)
if args.resume:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
if not args.test_only:
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
args.start_epoch = checkpoint["epoch"] + 1
if model_ema:
model_ema.load_state_dict(checkpoint["model_ema"])
if scaler:
scaler.load_state_dict(checkpoint["scaler"])
if args.test_only:
# We disable the cudnn benchmarking because it can noticeably affect the accuracy
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if model_ema:
evaluate(model_ema, criterion, data_loader_test, device=device, log_suffix="EMA")
else:
evaluate(model, criterion, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args, model_ema, scaler)
lr_scheduler.step()
evaluate(model, criterion, data_loader_test, device=device)
if model_ema:
evaluate(model_ema, criterion, data_loader_test, device=device, log_suffix="EMA")
if args.output_dir:
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
}
if model_ema:
checkpoint["model_ema"] = model_ema.state_dict()
if scaler:
checkpoint["scaler"] = scaler.state_dict()
utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth"))
utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"Training time {total_time_str}")
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(description="PyTorch Classification Training", add_help=add_help)
parser.add_argument("--data-path", default="/datasets01/imagenet_full_size/061417/", type=str, help="dataset path")
parser.add_argument("--model", default="resnet18", type=str, help="model name")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument(
"-b", "--batch-size", default=32, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
parser.add_argument("--epochs", default=90, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument(
"-j", "--workers", default=16, type=int, metavar="N", help="number of data loading workers (default: 16)"
)
parser.add_argument("--opt", default="sgd", type=str, help="optimizer")
parser.add_argument("--lr", default=0.1, type=float, help="initial learning rate")
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"--norm-weight-decay",
default=None,
type=float,
help="weight decay for Normalization layers (default: None, same value as --wd)",
)
parser.add_argument(
"--label-smoothing", default=0.0, type=float, help="label smoothing (default: 0.0)", dest="label_smoothing"
)
parser.add_argument("--mixup-alpha", default=0.0, type=float, help="mixup alpha (default: 0.0)")
parser.add_argument("--cutmix-alpha", default=0.0, type=float, help="cutmix alpha (default: 0.0)")
parser.add_argument("--lr-scheduler", default="steplr", type=str, help="the lr scheduler (default: steplr)")
parser.add_argument("--lr-warmup-epochs", default=0, type=int, help="the number of epochs to warmup (default: 0)")
parser.add_argument(
"--lr-warmup-method", default="constant", type=str, help="the warmup method (default: constant)"
)
parser.add_argument("--lr-warmup-decay", default=0.01, type=float, help="the decay for lr")
parser.add_argument("--lr-step-size", default=30, type=int, help="decrease lr every step-size epochs")
parser.add_argument("--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma")
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
parser.add_argument(
"--cache-dataset",
dest="cache_dataset",
help="Cache the datasets for quicker initialization. It also serializes the transforms",
action="store_true",
)
parser.add_argument(
"--sync-bn",
dest="sync_bn",
help="Use sync batch norm",
action="store_true",
)
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
parser.add_argument("--auto-augment", default=None, type=str, help="auto augment policy (default: None)")
parser.add_argument("--random-erase", default=0.0, type=float, help="random erasing probability (default: 0.0)")
# Mixed precision training parameters
parser.add_argument("--amp", action="store_true", help="Use torch.cuda.amp for mixed precision training")
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
parser.add_argument(
"--model-ema", action="store_true", help="enable tracking Exponential Moving Average of model parameters"
)
parser.add_argument(
"--model-ema-steps",
type=int,
default=32,
help="the number of iterations that controls how often to update the EMA model (default: 32)",
)
parser.add_argument(
"--model-ema-decay",
type=float,
default=0.99998,
help="decay factor for Exponential Moving Average of model parameters (default: 0.99998)",
)
parser.add_argument(
"--use-deterministic-algorithms", action="store_true", help="Forces the use of deterministic algorithms only."
)
parser.add_argument(
"--interpolation", default="bilinear", type=str, help="the interpolation method (default: bilinear)"
)
parser.add_argument(
"--val-resize-size", default=256, type=int, help="the resize size used for validation (default: 256)"
)
parser.add_argument(
"--val-crop-size", default=224, type=int, help="the central crop size used for validation (default: 224)"
)
parser.add_argument(
"--train-crop-size", default=224, type=int, help="the random crop size used for training (default: 224)"
)
parser.add_argument("--clip-grad-norm", default=None, type=float, help="the maximum gradient norm (default None)")
parser.add_argument("--ra-sampler", action="store_true", help="whether to use Repeated Augmentation in training")
parser.add_argument(
"--ra-reps", default=3, type=int, help="number of repetitions for Repeated Augmentation (default: 3)"
)
# Prototype models only
parser.add_argument(
"--prototype",
dest="prototype",
help="Use prototype model builders instead those from main area",
action="store_true",
)
parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
return parser
if __name__ == "__main__":
args = get_args_parser().parse_args()
main(args)
| 43.504854 | 120 | 0.666146 |
fa10c1fb129b1a11f1c52dd7b5195e576cce0d28 | 1,030 | py | Python | commandment/enroll/__init__.py | pythonModule/commandment | 32ca167e44753014857a3f0329c8662f0df97f02 | [
"MIT"
] | 138 | 2017-11-21T17:57:59.000Z | 2022-02-16T13:08:39.000Z | commandment/enroll/__init__.py | pythonModule/commandment | 32ca167e44753014857a3f0329c8662f0df97f02 | [
"MIT"
] | 24 | 2017-11-12T01:42:13.000Z | 2022-02-24T21:53:28.000Z | commandment/enroll/__init__.py | pythonModule/commandment | 32ca167e44753014857a3f0329c8662f0df97f02 | [
"MIT"
] | 35 | 2017-11-15T12:14:44.000Z | 2022-02-26T00:44:36.000Z | from enum import Enum
class DeviceAttributes(Enum):
"""This enumeration describes all of the device attributes available to OTA profile enrolment.
"""
UDID = 'UDID'
VERSION = 'VERSION'
PRODUCT = 'PRODUCT'
DEVICE_NAME = 'DEVICE_NAME'
SERIAL = 'SERIAL'
MODEL = 'MODEL'
MAC_ADDRESS_EN0 = 'MAC_ADDRESS_EN0'
MEID = 'MEID'
IMEI = 'IMEI'
ICCID = 'ICCID'
COMPROMISED = 'COMPROMISED'
DeviceID = 'DeviceID'
# SPIROM = 'SPIROM'
# MLB = 'MLB'
AllDeviceAttributes = {
DeviceAttributes.UDID.value,
DeviceAttributes.VERSION.value,
DeviceAttributes.PRODUCT.value,
DeviceAttributes.DEVICE_NAME.value,
DeviceAttributes.SERIAL.value,
DeviceAttributes.MODEL.value,
# DeviceAttributes.MAC_ADDRESS_EN0.value,
DeviceAttributes.MEID.value,
DeviceAttributes.IMEI.value,
DeviceAttributes.ICCID.value,
DeviceAttributes.COMPROMISED.value,
DeviceAttributes.DeviceID.value,
# DeviceAttributes.SPIROM.value,
# DeviceAttributes.MLB.value,
}
| 25.75 | 98 | 0.700971 |
2ff1a9aacdf8482b9290491e78148a6f22869af8 | 211 | py | Python | train/cmpnn/data/__init__.py | prokia/DL-Anti-Osteoporosis | 351e3d15b3a86df95833ad8fb9e86485c41cd0e6 | [
"MIT"
] | 4 | 2020-01-18T04:39:07.000Z | 2021-09-11T15:45:14.000Z | train/cmpnn/data/__init__.py | prokia/DL-Anti-Osteoporosis | 351e3d15b3a86df95833ad8fb9e86485c41cd0e6 | [
"MIT"
] | 1 | 2021-05-10T06:10:08.000Z | 2021-05-10T06:10:08.000Z | train/cmpnn/data/__init__.py | prokia/DL-Anti-Osteoporosis | 351e3d15b3a86df95833ad8fb9e86485c41cd0e6 | [
"MIT"
] | 3 | 2020-02-24T01:46:59.000Z | 2021-05-11T06:18:32.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 27 19:12:55 2019
@author: SY
"""
from .data import MoleculeDatapoint, MoleculeDataset
from .scaffold import scaffold_to_smiles
from .scaler import StandardScaler
| 19.181818 | 52 | 0.744076 |
27e854c6bf62b720e214208a74a16423157a4f47 | 1,213 | py | Python | setup.py | repelista/aptlyapi | 59f58c0b8cb9036d5b25acae295848515ca0143d | [
"MIT"
] | 3 | 2016-09-15T22:18:03.000Z | 2016-09-16T21:23:27.000Z | setup.py | repelista/aptlyapi | 59f58c0b8cb9036d5b25acae295848515ca0143d | [
"MIT"
] | null | null | null | setup.py | repelista/aptlyapi | 59f58c0b8cb9036d5b25acae295848515ca0143d | [
"MIT"
] | 2 | 2016-12-07T20:25:29.000Z | 2019-10-21T20:29:23.000Z | #!/usr/bin/env python
import uuid
from setuptools import setup, find_packages
from pip.req import parse_requirements
from pyptly import __version__
install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
reqs = [str(req.req) for req in install_reqs]
setup(name="pyptly",
version=__version__,
description="Python wrapper for the Aptly API",
license="MIT",
author="Nikolai Nozhenko",
author_email="nik.nozhenko@gmail.com",
url="http://github.com/repelista/pyaptly",
packages=find_packages(),
install_requires=reqs,
keywords="aptly library",
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
zip_safe=True)
| 34.657143 | 75 | 0.629843 |
70875721911955ad3ace205e86bf085539429a08 | 23,162 | py | Python | var/spack/repos/builtin/packages/root/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/root/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17 | 2018-09-20T18:32:50.000Z | 2019-12-04T16:58:12.000Z | var/spack/repos/builtin/packages/root/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
class Root(CMakePackage):
"""ROOT is a data analysis framework."""
homepage = "https://root.cern.ch"
url = "https://root.cern/download/root_v6.16.00.source.tar.gz"
# ###################### Versions ##########################
# Master branch
version('master', git="https://github.com/root-project/root.git",
branch='master')
# Development version (when more recent than production).
# Production version
version('6.16.00', sha256='2a45055c6091adaa72b977c512f84da8ef92723c30837c7e2643eecc9c5ce4d8', preferred=True)
# Old versions
version('6.14.08', sha256='1b63b51cfb4dc20f1f5749faac6bbd1098eccab777f8b49911257d77186c73c4')
version('6.14.06', sha256='0fb943b61396f282b289e35c455a9ab60126229be1bd3f04a8f00b37c13ab432')
version('6.14.04', sha256='463ec20692332a422cfb5f38c78bedab1c40ab4d81be18e99b50cf9f53f596cf')
version('6.14.02', sha256='93816519523e87ac75924178d87112d1573eaa108fc65691aea9a9dd5bc05b3e')
version('6.14.00', sha256='7946430373489310c2791ff7a3520e393dc059db1371272bcd9d9cf0df347a0b')
version('6.12.06', sha256='aedcfd2257806e425b9f61b483e25ba600eb0ea606e21262eafaa9dc745aa794')
version('6.10.08', sha256='2cd276d2ac365403c66f08edd1be62fe932a0334f76349b24d8c737c0d6dad8a')
version('6.08.06', sha256='ea31b047ba6fc04b0b312667349eaf1498a254ccacd212144f15ffcb3f5c0592')
version('6.06.08', sha256='7cb836282014cce822ef589cad27811eb7a86d7fad45a871fa6b0e6319ec201a')
version('6.06.06', sha256='0a7d702a130a260c72cb6ea754359eaee49a8c4531b31f23de0bfcafe3ce466b')
version('6.06.04', sha256='ab86dcc80cbd8e704099af0789e23f49469932ac4936d2291602301a7aa8795b')
version('6.06.02', sha256='18a4ce42ee19e1a810d5351f74ec9550e6e422b13b5c58e0c3db740cdbc569d1')
version('5.34.38', sha256='2c3bda69601d94836bdd88283a6585b4774eafc813deb6aa348df0af2922c4d2')
# ###################### Patches ##########################
# Widely used patch (CMS, FNAL) to increase the size of static
# buffers used to improve the operation of TString.
patch('format-stringbuf-size.patch', level=0)
# Support use of `mariadb-c-client` and `mariadb` to provide the
# MySQL API _cf_
# https://github.com/root-project/root/commit/9c0fa8c554a569c971185249f9acfff4418c0c13.
patch('find-mysql.patch', level=1, when='@:6.16.00')
# Some ROOT versions did not honor the option to avoid building an
# internal version of unuran, _cf_
# https://github.com/root-project/ROOT/commit/3e60764f133218b6938e5aa4986de760e8f058d9.
patch('honor-unuran-switch.patch', level=1, when='@:6.13.99')
# 6.16.00 fails to handle particular build option combinations, _cf_
# https://github.com/root-project/ROOT/commit/e0ae0483985d90a71a6cabd10d3622dfd1c15611.
patch('root7-webgui.patch', level=1, when='@6.16.00')
if sys.platform == 'darwin':
# Resolve non-standard use of uint, _cf_
# https://sft.its.cern.ch/jira/browse/ROOT-7886.
patch('math_uint.patch', when='@6.06.02')
# Resolve circular dependency, _cf_
# https://sft.its.cern.ch/jira/browse/ROOT-8226.
patch('root6-60606-mathmore.patch', when='@6.06.06')
# ###################### Variants ##########################
variant('avahi', default=False,
description='Compile with avahi')
variant('aqua', default=False,
description='Enable Aqua interface')
# No need for a specific variant: libafterimage is not provided by spack
# By default always true, we get the builtin included in the source
# variant('asimage', default=True,
# description='Enable image processing support')
variant('davix', default=True,
description='Compile with external Davix')
variant('emacs', default=False,
description='Enable Emacs support')
variant('examples', default=True,
description='Install examples')
variant('fftw', default=False,
description='Enable Fast Fourier Transform support')
variant('fits', default=False,
description='Enable support for images and data from FITS files')
variant('fortran', default=False,
description='Enable the Fortran components of ROOT')
variant('graphviz', default=False,
description='Enable graphviz support')
variant('gdml', default=True,
description='Enable GDML writer and reader')
variant('gminimal', default=True,
description='Ignore most of Root\'s feature defaults except for '
'basic graphic options')
variant('gsl', default=True,
description='Enable linking against shared libraries for GSL')
variant('http', default=False,
description='Enable HTTP server support')
variant('jemalloc', default=False,
description='Enable using the jemalloc allocator')
variant('kerberos', default=False,
description='Enable Kerberos support')
variant('ldap', default=False,
description='Enable LDAP support')
variant('libcxx', default=False,
description='Build using libc++')
variant('math', default=True,
description='Build the new libMathMore extended math library')
variant('memstat', default=False,
description='Enable a memory stats utility to detect memory leaks')
# Minuit must not be installed as a dependency of root
# otherwise it crashes with the internal minuit library
variant('minuit', default=True,
description='Automatically search for support libraries')
variant('mysql', default=False)
variant('odbc', default=False,
description='Enable ODBC support')
variant('opengl', default=True,
description='Enable OpenGL support')
# variant('oracle', default=False) - not supported by spack
variant('postgres', default=False,
description='Enable postgres support')
variant('pythia6', default=False,
description='Enable pythia6 support')
# variant('pythia8', default=False, - not suported by spack
# description='Enable pythia8 support')
variant('python', default=True,
description='Enable Python ROOT bindings')
variant('qt4', default=False,
description='Enable Qt graphics backend')
variant('r', default=False,
description='Enable R ROOT bindings')
variant('rpath', default=True,
description='Enable RPATH')
variant('rootfit', default=True,
description='Build the libRooFit advanced fitting package')
variant('root7', default=False,
description='Enable ROOT 7 support')
variant('shadow', default=False,
description='Enable shadow password support')
variant('sqlite', default=False,
description='Enable SQLite support')
variant('ssl', default=False,
description='Enable SSL encryption support')
variant('table', default=False,
description='Build libTable contrib library')
variant('tbb', default=True,
description='TBB multi-threading support')
variant('test', default=False,
description='Enable test suit of ROOT with CTest')
variant('threads', default=True,
description='Enable using thread library')
variant('tiff', default=True,
description='Include Tiff support in image processing')
variant('tmva', default=True,
description='Build TMVA multi variate analysis library')
variant('unuran', default=True,
description='Use UNURAN for random number generation')
variant('vc', default=False,
description='Enable Vc for adding new types for SIMD programming')
variant('vdt', default=True,
description='Enable set of fast and vectorisable math functions')
variant('x', default=True,
description='Enable set of graphical options')
# variant('xinetd', default=False, - not supported by spack
# description='Enable a daemon process manager')
variant('xml', default=True,
description='Enable XML parser interface')
variant('xrootd', default=False,
description='Build xrootd file server and its client')
# ###################### Compiler variants ########################
variant('cxxstd',
default='11',
values=('11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
# ###################### Dependencies ######################
depends_on('cmake@3.4.3:', type='build')
depends_on('pkgconfig', type='build')
depends_on('blas')
depends_on('freetype')
depends_on('jpeg')
depends_on('libice')
depends_on('libpng')
depends_on('lz4', when='@6.13.02:') # See cmake_args, below.
depends_on('ncurses')
depends_on('pcre')
depends_on('xxhash', when='@6.13.02:') # See cmake_args, below.
depends_on('xz')
depends_on('zlib')
# X-Graphics
depends_on('libx11', when="+x")
depends_on('libxext', when="+x")
depends_on('libxft', when="+x")
depends_on('libxpm', when="+x")
depends_on('libsm', when="+x")
# OpenGL
depends_on('ftgl@2.1.3-rc5', when="+x+opengl")
depends_on('glew', when="+x+opengl")
depends_on('gl', when="+x+opengl")
depends_on('glu', when="+x+opengl")
depends_on('gl2ps', when="+x+opengl")
# Qt4
depends_on('qt@:4.999', when='+qt4')
# TMVA
depends_on('py-numpy', when='+tmva')
# Asimage variant would need one of these two
# For the moment, we use the libafterimage provided by the root sources
# depends_on('libafterimage', when='+asimage') - not supported
# depends_on('afterstep@2.2.11', when='+asimage') - not supported
# Optional dependencies
depends_on('avahi', when='+avahi')
depends_on('davix', when='+davix')
depends_on('cfitsio', when='+fits')
depends_on('fftw', when='+fftw')
depends_on('graphviz', when='+graphviz')
depends_on('gsl', when='+gsl')
depends_on('http', when='+http')
depends_on('jemalloc', when='+jemalloc')
depends_on('kerberos', when='+kerberos')
depends_on('ldap', when='+ldap')
depends_on('libcxx', when='+libcxx')
depends_on('mysql-client', when='+mysql')
depends_on('odbc', when='+odbc')
# depends_on('oracle', when='+oracle')
depends_on('openssl', when='+ssl')
depends_on('openssl', when='+davix') # Also with davix
depends_on('postgresql', when='+postgres')
depends_on('pythia6+root', when='+pythia6')
# depends_on('pythia@8:8.999', when='+pythia8') - not supported on Spack
depends_on('python@2.7:', when='+python', type=('build', 'run'))
depends_on('r', when='+r', type=('build', 'run'))
depends_on('r-cpp', when='+r', type=('build', 'run'))
depends_on('r-inside', when='+r', type=('build', 'run'))
depends_on('shadow', when='+shadow')
depends_on('sqlite', when='+sqlite')
depends_on('tbb', when='+tbb')
depends_on('unuran', when='+unuran')
depends_on('vc', when='+vc')
depends_on('veccore', when='+veccore')
depends_on('vdt', when='+vdt')
depends_on('libxml2+python', when='+xml+python')
depends_on('libxml2~python', when='+xml~python')
depends_on('xrootd', when='+xrootd')
# depends_on('hdfs') - supported (TODO)
# Not supported
# depends_on('monalisa')
# Grid packages - not supported yet by Spack
# depends_on('castor')
# depends_on('chirp')
# depends_on('dcap')
# depends_on('gfal')
# depends_on('ldap')
# depends_on('rfio')
# ###################### Conflicts ######################
# I was unable to build root with any Intel compiler
# See https://sft.its.cern.ch/jira/browse/ROOT-7517
conflicts('%intel')
# Incompatible variants
conflicts('+tmva', when='~gsl', msg="TVMA requires GSL")
conflicts('cxxstd=11', when='+root7', msg="root7 requires at least C++14")
# Feature removed:
conflicts('+memstat', when='@6.18.00:',
msg="Obsolete option +memstat selected.")
conflicts('+memstat', when='@master',
msg="Obsolete option +memstat selected.")
def cmake_args(self):
spec = self.spec
options = []
# #################### Base Settings #######################
# ROOT should not download its own dependencies
options = [
'-Dexplicitlink=ON',
'-Dexceptions=ON',
'-Dfail-on-missing=ON',
'-Dshared=ON',
'-Dsoversion=ON',
'-Dbuiltin_llvm=ON',
'-Dbuiltin_afterimage=ON',
'-Dasimage:BOOL=ON', # if afterimage is taken from builtin
'-Dastiff:BOOL=ON', # asimage and astiff must be ON too
'-Dbuiltin_cfitsio:BOOL=OFF',
'-Dbuiltin_davix:BOOL=OFF',
'-Dbuiltin_fftw3:BOOL=OFF',
'-Dbuiltin_freetype:BOOL=OFF',
'-Dbuiltin_ftgl:BOOL=OFF',
'-Dbuiltin_gl2ps:BOOL=OFF',
'-Dbuiltin_glew:BOOL=OFF',
'-Dbuiltin_gsl:BOOL=OFF',
'-Dbuiltin_lzma:BOOL=OFF',
'-Dbuiltin_openssl:BOOL=OFF',
'-Dbuiltin_pcre:BOOL=OFF',
'-Dbuiltin_tbb:BOOL=OFF',
'-Dbuiltin_unuran:BOOL=OFF',
'-Dbuiltin_vc:BOOL=OFF',
'-Dbuiltin_vdt:BOOL=OFF',
'-Dbuiltin_veccore:BOOL=OFF',
'-Dbuiltin_xrootd:BOOL=OFF',
'-Dbuiltin_zlib:BOOL=OFF'
]
# LZ4 and xxhash do not work as external deps for older versions
options.extend([
'-Dbuiltin_lz4:BOOL=%s' % (
'ON' if self.spec.satisfies('@6.12.02:6.12.99') else 'OFF'),
'-Dbuiltin_xxhash:BOOL=%s' % (
'ON' if self.spec.satisfies('@6.12.02:6.12.99') else 'OFF'),
])
# #################### ROOT options #######################
options.extend([
'-Dx11:BOOL=%s' % (
'ON' if '+x' in spec else 'OFF'),
'-Dxft:BOOL=%s' % (
'ON' if '+x' in spec else 'OFF'),
'-Dbonjour:BOOL=%s' % (
'ON' if '+avahi' in spec else 'OFF'),
'-Dcocoa:BOOL=%s' % (
'ON' if '+aqua' in spec else 'OFF'),
# -Dcxxmodules=OFF # use clang C++ modules
'-Ddavix:BOOL=%s' % (
'ON' if '+davix' in spec else 'OFF'),
'-Dfftw3:BOOL=%s' % (
'ON' if '+fftw' in spec else 'OFF'),
'-Dfitsio:BOOL=%s' % (
'ON' if '+fits' in spec else 'OFF'),
'-Dfortran:BOOL=%s' % (
'ON' if '+fortran' in spec else 'OFF'),
'-Dftgl:BOOL=%s' % (
'ON' if '+opengl' in spec else 'OFF'),
'-Dgdml:BOOL=%s' % (
'ON' if '+gdml' in spec else 'OFF'),
'-Dgl2ps:BOOL=%s' % (
'ON' if '+opengl' in spec else 'OFF'),
'-Dgenvector:BOOL=%s' % (
'ON' if '+math' in spec else 'OFF'), # default ON
'-Dgminimal:BOOL=%s' % ( # Reduce unwanted surprises
'ON' if '+gminimal' in spec else 'OFF'), # Default ON
'-Dgsl_shared:BOOL=%s' % (
'ON' if '+gsl' in spec else 'OFF'),
'-Dgviz:BOOL=%s' % (
'ON' if '+graphviz' in spec else 'OFF'),
'-Dhttp:BOOL=%s' % (
'ON' if '+http' in spec else 'OFF'),
'-Dimt:BOOL=%s' % (
'ON' if '+tbb' in spec else 'OFF'),
'-Djemalloc:BOOL=%s' % (
'ON' if '+jemalloc' in spec else 'OFF'),
'-Dkrb5:BOOL=%s' % (
'ON' if '+kerberos' in spec else 'OFF'),
'-Dldap:BOOL=%s' % (
'ON' if '+ldap' in spec else 'OFF'),
'-Dlibcxx:BOOL=%s' % (
'ON' if '+libcxx' in spec else 'OFF'),
'-Dmathmore:BOOL=%s' % (
'ON' if '+math' in spec else 'OFF'),
'-Dmemstat:BOOL=%s' % (
'ON' if '+memstat' in spec else 'OFF'),
'-Dminimal:BOOL=%s' % (
'ON' if '+minimal' in spec else 'OFF'),
'-Dminuit:BOOL=%s' % (
'ON' if '+minuit' in spec else 'OFF'),
'-Dminuit2:BOOL=%s' % (
'ON' if '+minuit' in spec else 'OFF'),
'-Dmysql:BOOL=%s' % (
'ON' if '+mysql' in spec else 'OFF'),
'-Dodbc:BOOL=%s' % (
'ON' if '+odbc' in spec else 'OFF'),
'-Dopengl:BOOL=%s' % (
'ON' if '+opengl' in spec else 'OFF'),
'-Doracle:BOOL=%s' % (
'ON' if '+oracle' in spec else 'OFF'), # not supported
'-Dpch:BOOL=%s' % (
'ON' if '+pch' in spec else 'OFF'), # needs cling
'-Dpgsql:BOOL=%s' % (
'ON' if '+postgres' in spec else 'OFF'),
'-Dpythia6:BOOL=%s' % (
'ON' if '+pythia6' in spec else 'OFF'),
# Force not to build pythia8 (not supported yet by spack), to avoid
# wrong defaults from ROOT at build time
'-Dpythia8:BOOL=%s' % (
'ON' if '+pythia8' in spec else 'OFF'),
'-Dpython:BOOL=%s' % (
'ON' if self.spec.satisfies('+python ^python@2.7:2.99.99')
else 'OFF'),
'-Dpython3:BOOL=%s' % (
'ON' if self.spec.satisfies('+python ^python@3.0:')
else 'OFF'),
'-Dqt:BOOL=%s' % (
'ON' if '+qt4' in spec else 'OFF'),
'-Dqtgsi:BOOL=%s' % (
'ON' if '+qt4' in spec else 'OFF'),
'-Dr:BOOL=%s' % (
'ON' if '+R' in spec else 'OFF'),
'-Droofit:BOOL=%s' % (
'ON' if '+roofit' in spec else 'OFF'),
'-Droot7:BOOL=%s' % (
'ON' if '+root7' in spec else 'OFF'), # requires C++14
'-Dwebui:BOOL=%s' % (
'ON' if '+root7' in spec else 'OFF'), # requires root7
'-Drpath:BOOL=%s' % (
'ON' if '+rpath' in spec else 'OFF'),
'-Dshadowpw:BOOL=%s' % (
'ON' if '+shadow' in spec else 'OFF'),
'-Dsqlite:BOOL=%s' % (
'ON' if '+sqlite' in spec else 'OFF'),
'-Dssl:BOOL=%s' % (
'ON' if '+ssl' in spec else 'OFF'),
'-Dtable:BOOL=%s' % (
'ON' if '+table' in spec else 'OFF'),
'-Dtbb:BOOL=%s' % (
'ON' if '+tbb' in spec else 'OFF'),
'-Dtesting:BOOL=%s' % (
'ON' if '+test' in spec else 'OFF'),
'-Dthread:BOOL=%s' % (
'ON' if '+threads' in spec else 'OFF'),
'-Dtmva:BOOL=%s' % (
'ON' if '+tmva' in spec else 'OFF'),
'-Dunuran:BOOL=%s' % (
'ON' if '+unuran' in spec else 'OFF'),
'-Dvc:BOOL=%s' % (
'ON' if '+vc' in spec else 'OFF'),
'-Dveccore:BOOL=%s' % (
'ON' if '+veccore' in spec else 'OFF'), # not supported
'-Dvdt:BOOL=%s' % (
'ON' if '+vdt' in spec else 'OFF'),
'-Dxml:BOOL=%s' % (
'ON' if '+xml' in spec else 'OFF'), # default ON
'-Dxrootd:BOOL=%s' % (
'ON' if '+xrootd' in spec else 'OFF'), # default ON
# Fixed options
'-Dafdsmrgd:BOOL=OFF', # not supported
'-Dafs:BOOL=OFF', # not supported
'-Dalien:BOOL=OFF',
'-Dcastor:BOOL=OFF', # not supported
'-Dccache:BOOL=OFF', # not supported
'-Dchirp:BOOL=OFF',
'-Dcling:BOOL=ON',
'-Ddcache:BOOL=OFF', # not supported
'-Dgeocad:BOOL=OFF', # not supported
'-Dgfal:BOOL=OFF', # not supported
'-Dglite:BOOL=OFF', # not supported
'-Dglobus:BOOL=OFF',
'-Dgnuinstall:BOOL=OFF',
'-Dhdfs:BOOL=OFF', # TODO pending to add
'-Dmonalisa:BOOL=OFF', # not supported
'-Drfio:BOOL=OFF', # not supported
'-Droottest:BOOL=OFF', # requires network
'-Druby:BOOL=OFF', # unmantained upstream
# Use clang C++ modules, experimental
'-Druntime_cxxmodules:BOOL=OFF',
'-Dsapdb:BOOL=OFF', # option not implemented
'-Dsrp:BOOL=OFF', # option not implemented
'-Dtcmalloc:BOOL=OFF'
])
# #################### Compiler options ####################
if sys.platform == 'darwin':
if self.compiler.cc == 'gcc':
options.extend([
'-DCMAKE_C_FLAGS=-D__builtin_unreachable=__builtin_trap',
'-DCMAKE_CXX_FLAGS=-D__builtin_unreachable=__builtin_trap',
])
options.append(
'-Dcxx{0}=ON'.format(self.spec.variants['cxxstd'].value)
)
if 'mysql-client' in self.spec:
options.append('-DCMAKE_PROGRAM_PATH={0}'.format(
self.spec['mysql-client'].prefix.bin))
if '+x+opengl' in self.spec:
options.append('-DFTGL_ROOT_DIR={0}'.format(
self.spec['ftgl'].prefix))
options.append('-DFTGL_INCLUDE_DIR={0}'.format(
self.spec['ftgl'].prefix.include))
# see https://github.com/spack/spack/pull/11579
if '+python' in self.spec:
options.append('-DPYTHON_EXECUTABLE=%s' %
spec['python'].command.path)
return options
def setup_environment(self, spack_env, run_env):
run_env.set('ROOTSYS', self.prefix)
run_env.set('ROOT_VERSION', 'v{0}'.format(self.version.up_to(1)))
run_env.prepend_path('PYTHONPATH', self.prefix.lib)
if 'lz4' in self.spec:
spack_env.append_path('CMAKE_PREFIX_PATH',
self.spec['lz4'].prefix)
spack_env.set('SPACK_INCLUDE_DIRS', '', force=True)
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
spack_env.set('ROOTSYS', self.prefix)
spack_env.set('ROOT_VERSION', 'v{0}'.format(self.version.up_to(1)))
spack_env.prepend_path('PYTHONPATH', self.prefix.lib)
spack_env.prepend_path('PATH', self.prefix.bin)
spack_env.append_path('CMAKE_MODULE_PATH', '{0}/cmake'
.format(self.prefix))
run_env.set('ROOTSYS', self.prefix)
run_env.set('ROOT_VERSION', 'v{0}'.format(self.version.up_to(1)))
run_env.prepend_path('PYTHONPATH', self.prefix.lib)
run_env.prepend_path('PATH', self.prefix.bin)
| 43.701887 | 113 | 0.560228 |
17a24a98683e5da2f44cdfcee4e32fbc432577d3 | 5,988 | py | Python | napari/utils/_injection.py | Napari/napari | 2dc5aa659f875c353bfbde3b20d8f07a664ed8a8 | [
"BSD-3-Clause"
] | 7 | 2018-07-03T17:35:46.000Z | 2018-11-07T15:48:58.000Z | napari/utils/_injection.py | guiwitz/napari | 1546f18ecc13364d5415623a9c11ed760ff043e2 | [
"BSD-3-Clause"
] | 120 | 2018-09-04T22:05:13.000Z | 2019-03-02T01:13:57.000Z | napari/utils/_injection.py | guiwitz/napari | 1546f18ecc13364d5415623a9c11ed760ff043e2 | [
"BSD-3-Clause"
] | 8 | 2018-09-04T21:48:26.000Z | 2019-01-29T04:48:30.000Z | from functools import wraps
from inspect import isgeneratorfunction, signature
from typing import Any, Callable, Dict, Optional, Type, TypeVar
from typing_extensions import get_type_hints
from .. import components, layers, viewer
from ..viewer import current_viewer
T = TypeVar("T")
_NULL = object()
def _get_active_layer() -> Optional[layers.Layer]:
return v.layers.selection.active if (v := current_viewer()) else None
def _get_active_layer_list() -> Optional[components.LayerList]:
return v.layers if (v := current_viewer()) else None
# registry of Type -> "accessor function"
# where each value is a function that is capable
# of retrieving an instance of its corresponding key type.
_ACCESSORS: Dict[Type, Callable[..., Optional[object]]] = {
layers.Layer: _get_active_layer,
viewer.Viewer: current_viewer,
components.LayerList: _get_active_layer_list,
}
def get_accessor(type_: Type[T]) -> Optional[Callable[..., Optional[T]]]:
"""Return object accessor function given a type.
An object accessor is a function that returns an instance of a
particular object type. For example, given type `napari.Viewer`, we return
a function that can be called to get the current viewer.
This is a form of dependency injection, and, along with
`inject_napari_dependencies`, allows us to inject current napari objects
into functions based on type hints.
"""
if type_ in _ACCESSORS:
return _ACCESSORS[type_]
if isinstance(type_, type):
for key, val in _ACCESSORS.items():
if issubclass(type_, key):
return val # type: ignore [return-type]
return None
class set_accessor:
"""Set accessor(s) for given type(s).
"Accessors" are functions that can retrieve an instance of a given type.
For instance, `napari.viewer.current_viewer` is a function that can
retrieve an instance of `napari.Viewer`.
This is a class that behaves as a function or a context manager, that
allows one to set an accessor function for a given type.
Parameters
----------
mapping : Dict[Type[T], Callable[..., Optional[T]]]
a map of type -> accessor function, where each value is a function
that is capable of retrieving an instance of the associated key/type.
clobber : bool, optional
Whether to override any existing accessor function, by default False.
Raises
------
ValueError
if clobber is `False` and one of the keys in `mapping` is already
registered.
"""
def __init__(
self, mapping: Dict[Type[T], Callable[..., Optional[T]]], clobber=False
):
self._before = {}
for k in mapping:
if k in _ACCESSORS and not clobber:
raise ValueError(
f"Class {k} already has an accessor and clobber is False"
)
self._before[k] = _ACCESSORS.get(k, _NULL)
_ACCESSORS.update(mapping)
def __enter__(self):
return None
def __exit__(self, *_):
for key, val in self._before.items():
if val is _NULL:
del _ACCESSORS[key]
else:
_ACCESSORS[key] = val
def napari_type_hints(obj: Any) -> Dict[str, Any]:
"""variant of get_type_hints with napari namespace awareness."""
import napari
return get_type_hints(
obj,
{
'napari': napari,
**viewer.__dict__,
**layers.__dict__,
**components.__dict__,
},
)
def inject_napari_dependencies(func: Callable) -> Callable:
"""Create callable that can access napari objects based on type hints.
This is form of dependency injection. If a function includes a parameter
that has a recognized napari type (e.g. `Viewer`, or `Layer`), then this
function will return a new version of the input function that can be called
*without* that particular parameter.
Examples
--------
>>> def f(viewer: Viewer): ...
>>> inspect.signature(f)
<Signature (x: 'Viewer')>
>>> f2 = inject_napari_dependencies(f)
>>> inspect.signature(f2)
<Signature (x: typing.Optional[napari.viewer.Viewer] = None)>
# if f2 is called without x, the current_viewer will be provided for x
Parameters
----------
func : Callable
A function with napari type hints.
Returns
-------
Callable
A function with napari dependencies injected
"""
if not func.__code__.co_argcount:
return func
sig = signature(func)
# get type hints for the object, with forward refs of napari hints resolved
hints = napari_type_hints(func)
# get accessor functions for each required parameter
required = {}
for name, hint in hints.items():
if sig.parameters[name].default is sig.empty:
required[name] = hint
@wraps(func)
def _exec(*args, **kwargs):
# when we call the function, we call the accessor functions to get
# the current napari objects
_kwargs = {}
for n, hint in required.items():
if accessor := get_accessor(hint):
_kwargs[n] = accessor()
# but we use bind_partial to allow the caller to still provide
# their own objects if desired.
# (i.e. the injected deps are only used if needed)
_kwargs.update(**sig.bind_partial(*args, **kwargs).arguments)
return func(**_kwargs)
out = _exec
# if it came in as a generatorfunction, it needs to go out as one.
if isgeneratorfunction(func):
@wraps(func)
def _gexec(*args, **kwargs):
yield from _exec(*args, **kwargs)
out = _gexec
# update the signature
p = [
p.replace(default=None, annotation=Optional[hints[p.name]])
if p.name in required
else p
for p in sig.parameters.values()
]
out.__signature__ = sig.replace(parameters=p)
return out
| 31.025907 | 79 | 0.641951 |
215718f0a82ec16cbbae3bc9a18a25f77badd202 | 8,058 | py | Python | lib/rucio/tests/test_judge_injector.py | efajardo/rucio | 460f394715568b937584ef671382b2b93add1758 | [
"Apache-2.0"
] | 1 | 2019-03-04T09:09:42.000Z | 2019-03-04T09:09:42.000Z | lib/rucio/tests/test_judge_injector.py | pujanm/rucio | 355a997a5ea213c427a5d841ab151ceb01073eb4 | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_judge_injector.py | pujanm/rucio | 355a997a5ea213c427a5d841ab151ceb01073eb4 | [
"Apache-2.0"
] | 1 | 2021-06-17T14:15:15.000Z | 2021-06-17T14:15:15.000Z | # Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Martin Barisits, <martin.barisits@cern.ch>, 2015-2016
from nose.tools import assert_raises
from rucio.common.exception import RuleNotFound
from rucio.common.utils import generate_uuid as uuid
from rucio.core.account_limit import set_account_limit
from rucio.core.did import add_did, attach_dids
from rucio.core.lock import get_replica_locks
from rucio.core.rse import add_rse_attribute, get_rse
from rucio.core.rule import add_rule, get_rule, approve_rule, deny_rule, list_rules
from rucio.daemons.judge.injector import rule_injector
from rucio.db.sqla.constants import DIDType, RuleState
from rucio.tests.test_rule import create_files, tag_generator
class TestJudgeEvaluator():
@classmethod
def setUpClass(cls):
# Add test RSE
cls.rse1 = 'MOCK'
cls.rse3 = 'MOCK3'
cls.rse4 = 'MOCK4'
cls.rse5 = 'MOCK5'
cls.rse1_id = get_rse(cls.rse1).id
cls.rse3_id = get_rse(cls.rse3).id
cls.rse4_id = get_rse(cls.rse4).id
cls.rse5_id = get_rse(cls.rse5).id
# Add Tags
cls.T1 = tag_generator()
cls.T2 = tag_generator()
add_rse_attribute(cls.rse1, cls.T1, True)
add_rse_attribute(cls.rse3, cls.T1, True)
add_rse_attribute(cls.rse4, cls.T2, True)
add_rse_attribute(cls.rse5, cls.T1, True)
# Add fake weights
add_rse_attribute(cls.rse1, "fakeweight", 10)
add_rse_attribute(cls.rse3, "fakeweight", 0)
add_rse_attribute(cls.rse4, "fakeweight", 0)
add_rse_attribute(cls.rse5, "fakeweight", 0)
# Add quota
set_account_limit('jdoe', cls.rse1_id, -1)
set_account_limit('jdoe', cls.rse3_id, -1)
set_account_limit('jdoe', cls.rse4_id, -1)
set_account_limit('jdoe', cls.rse5_id, -1)
set_account_limit('root', cls.rse1_id, -1)
set_account_limit('root', cls.rse3_id, -1)
set_account_limit('root', cls.rse4_id, -1)
set_account_limit('root', cls.rse5_id, -1)
def test_judge_inject_rule(self):
""" JUDGE INJECTOR: Test the judge when injecting a rule"""
scope = 'mock'
files = create_files(3, scope, self.rse1)
dataset = 'dataset_' + str(uuid())
add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
attach_dids(scope, dataset, files, 'jdoe')
# Add a first rule to the DS
rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, asynchronous=True)[0]
assert(get_rule(rule_id)['state'] == RuleState.INJECT)
rule_injector(once=True)
# Check if the Locks are created properly
for file in files:
assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2)
assert(get_rule(rule_id)['state'] == RuleState.REPLICATING)
def test_judge_ask_approval(self):
""" JUDGE INJECTOR: Test the judge when asking approval for a rule"""
scope = 'mock'
files = create_files(3, scope, self.rse1)
dataset = 'dataset_' + str(uuid())
add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
attach_dids(scope, dataset, files, 'jdoe')
# Add a first rule to the DS
rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse4, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, ask_approval=True)[0]
assert(get_rule(rule_id)['state'] == RuleState.WAITING_APPROVAL)
approve_rule(rule_id=rule_id, approver='root')
assert(get_rule(rule_id)['state'] == RuleState.INJECT)
rule_injector(once=True)
# Check if the Locks are created properly
for file in files:
assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 1)
assert(get_rule(rule_id)['state'] == RuleState.REPLICATING)
def test_judge_deny_rule(self):
""" JUDGE INJECTOR: Test the judge when asking approval for a rule and denying it"""
scope = 'mock'
files = create_files(3, scope, self.rse1)
dataset = 'dataset_' + str(uuid())
add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
attach_dids(scope, dataset, files, 'jdoe')
# Add a first rule to the DS
rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse4, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, ask_approval=True)[0]
assert(get_rule(rule_id)['state'] == RuleState.WAITING_APPROVAL)
deny_rule(rule_id=rule_id, approver='root')
assert_raises(RuleNotFound, get_rule, rule_id)
def test_add_rule_with_r2d2_container_treating(self):
""" JUDGE INJECTOR (CORE): Add a replication rule with an r2d2 container treatment"""
scope = 'mock'
container = 'asdf.r2d2_request.2016-04-01-15-00-00.ads.' + str(uuid())
add_did(scope, container, DIDType.from_sym('CONTAINER'), 'jdoe')
datasets = []
for i in range(3):
files = create_files(3, scope, self.rse1)
dataset = 'dataset_' + str(uuid())
datasets.append(dataset)
add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
attach_dids(scope, dataset, files, 'jdoe')
attach_dids(scope, container, [{'scope': scope, 'name': dataset}], 'jdoe')
rule_id = add_rule(dids=[{'scope': scope, 'name': container}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=100, locked=False, subscription_id=None, ask_approval=True)[0]
approve_rule(rule_id, approver='root')
assert(get_rule(rule_id)['state'] == RuleState.INJECT)
rule_injector(once=True)
# Check if there is a rule for each file
with assert_raises(RuleNotFound):
get_rule(rule_id)
for dataset in datasets:
assert(len([r for r in list_rules({'scope': scope, 'name': dataset})]) > 0)
def test_add_rule_with_r2d2_container_treating_and_duplicate_rule(self):
""" JUDGE INJECTOR (CORE): Add a replication rule with an r2d2 container treatment and duplicate rule"""
scope = 'mock'
container = 'asdf.r2d2_request.2016-04-01-15-00-00.ads.' + str(uuid())
add_did(scope, container, DIDType.from_sym('CONTAINER'), 'jdoe')
datasets = []
for i in range(3):
files = create_files(3, scope, self.rse1)
dataset = 'dataset_' + str(uuid())
datasets.append(dataset)
add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
attach_dids(scope, dataset, files, 'jdoe')
attach_dids(scope, container, [{'scope': scope, 'name': dataset}], 'jdoe')
add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=100, locked=False, subscription_id=None, ask_approval=False)
rule_id = add_rule(dids=[{'scope': scope, 'name': container}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=100, locked=False, subscription_id=None, ask_approval=True)[0]
approve_rule(rule_id, approver='root')
assert(get_rule(rule_id)['state'] == RuleState.INJECT)
rule_injector(once=True)
# Check if there is a rule for each file
with assert_raises(RuleNotFound):
get_rule(rule_id)
for dataset in datasets:
assert(len([r for r in list_rules({'scope': scope, 'name': dataset})]) > 0)
| 46.848837 | 227 | 0.657111 |
ec37d0edf5d3d5e5456eeafde80463e6a20eb512 | 9,835 | py | Python | test/tst_slicing.py | sridish123/netcdf4-python | c9520f4e697d6ba1ba170748547bafa681bf5177 | [
"MIT"
] | null | null | null | test/tst_slicing.py | sridish123/netcdf4-python | c9520f4e697d6ba1ba170748547bafa681bf5177 | [
"MIT"
] | null | null | null | test/tst_slicing.py | sridish123/netcdf4-python | c9520f4e697d6ba1ba170748547bafa681bf5177 | [
"MIT"
] | 1 | 2021-05-17T07:15:47.000Z | 2021-05-17T07:15:47.000Z | from netCDF4 import Dataset
from numpy.random import seed, randint
from numpy.testing import assert_array_equal, assert_equal,\
assert_array_almost_equal
import tempfile, unittest, os, random
import numpy as np
file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
xdim=9; ydim=10; zdim=11
#seed(9) # fix seed
data = randint(0,10,size=(xdim,ydim,zdim)).astype('u1')
datarev = data[:,::-1,:]
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = file_name
f = Dataset(file_name,'w')
f.createDimension('x',xdim)
f.createDimension('xu',None)
f.createDimension('y',ydim)
f.createDimension('z',zdim)
f.createDimension('zu',None)
v = f.createVariable('data','u1',('x','y','z'))
vu = f.createVariable('datau','u1',('xu','y','zu'))
v1 = f.createVariable('data1d', 'u1', ('x',))
# variable with no unlimited dim.
# write slice in reverse order
v[:,::-1,:] = data
# variable with an unlimited dimension.
# write slice in reverse order
#vu[0:xdim,::-1,0:zdim] = data
vu[:,::-1,:] = data
v1[:] = data[:, 0, 0]
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def test_3d(self):
"""testing variable slicing"""
f = Dataset(self.file, 'r')
v = f.variables['data']
vu = f.variables['datau']
# test return of array scalar.
assert_equal(v[0,0,0].shape,())
assert_array_equal(v[:], datarev)
# test reading of slices.
# negative value means count back from end.
assert_array_equal(v[:-1,:-2,:-3],datarev[:-1,:-2,:-3])
# every other element (positive step)
assert_array_equal(v[2:-1:2,2:-2:2,2:-3:2],datarev[2:-1:2,2:-2:2,2:-3:2])
# every other element (negative step)
assert_array_equal(v[-1:2:-2,-2:2:-2,-3:2:-2],datarev[-1:2:-2,-2:2:-2,-3:2:-2])
# read elements in reverse order
assert_array_equal(v[:,::-1,:],data)
assert_array_equal(v[::-1,:,::-1],datarev[::-1,:,::-1])
assert_array_equal(v[xdim-1::-3,:,zdim-1::-3],datarev[xdim-1::-3,:,zdim-1::-3])
# ellipsis slice.
assert_array_equal(v[...,2:],datarev[...,2:])
# variable with an unlimited dimension.
assert_array_equal(vu[:], data[:,::-1,:])
# read data in reverse order
assert_array_equal(vu[:,::-1,:],data)
# index using an integer array scalar
i = np.ones(1,'i4')[0]
assert_array_equal(v[i],datarev[1])
f.close()
def test_1d(self):
f = Dataset(self.file, 'r')
v1 = f.variables['data1d']
d = data[:,0,0]
assert_equal(v1[:], d)
assert_equal(v1[4:], d[4:])
# test return of array scalar.
assert_equal(v1[0].shape, ())
i1 = np.array([2,3,4])
assert_equal(v1[i1], d[i1])
i2 = np.array([2,3,5])
assert_equal(v1[i2], d[i2])
assert_equal(v1[d<5], d[d<5])
assert_equal(v1[5], d[5])
f.close()
def test_0d(self):
f = Dataset(self.file, 'w')
v = f.createVariable('data', float)
v[...] = 10
assert_array_equal(v[...], 10)
assert_equal(v.shape, v[...].shape)
# issue #785: always return masked array
#assert(type(v[...]) == np.ndarray)
assert(type(v[...]) == np.ma.core.MaskedArray)
f.set_auto_mask(False)
assert(type(v[...]) == np.ndarray)
f.close()
def test_issue259(self):
dset = Dataset(self.file, 'w', format='NETCDF4_CLASSIC')
dset.createDimension('dim', None)
a = dset.createVariable('a', 'i', ('dim',))
b = dset.createVariable('b', 'i', ('dim',))
c = dset.createVariable('c', 'i', ('dim',))
c[:] = 1 # c initially is empty, new entry created
assert_array_equal(c[...], np.array([1]))
b[:] = np.array([1,1])
a[:] = 1 # a should be same as b
assert_array_equal(a[...], b[...])
dset.close()
def test_issue371(self):
dataset = Dataset(self.file, 'w')
dataset.createDimension('dim', 5)
var = dataset.createVariable('bar', 'i8', ('dim', ))
data = [1, 2, 3, 4, 5]
var[..., :] = data
assert_array_equal(var[..., :], np.array(data))
dataset.close()
def test_issue306(self):
f = Dataset(self.file,'w')
nlats = 7; lat = f.createDimension('lat',nlats)
nlons = 12; lon = f.createDimension('lon',nlons)
nlevs = 1; lev = f.createDimension('lev',nlevs)
time = f.createDimension('time',None)
var = f.createVariable('var',np.float64,('time','lev','lat','lon'))
a = np.random.uniform(size=(10,nlevs,nlats,nlons))
var[0:10] = a
f.close()
f = Dataset(self.file)
aa = f.variables['var'][4,-1,:,:]
assert_array_almost_equal(a[4,-1,:,:],aa)
v = f.variables['var']
try:
aa = v[4,-2,:,:] # -2 when dimension is length 1
except IndexError:
pass
else:
raise IndexError('This test should have failed.')
try:
aa = v[4,...,...,:] # more than one Ellipsis
except IndexError:
pass
else:
raise IndexError('This test should have failed.')
try:
aa = v[:,[True,True],:,:] # boolean array too long.
except IndexError:
pass
else:
raise IndexError('This test should have failed.')
try:
aa = v[:,[0,1],:,:] # integer index too large
except IndexError:
pass
else:
raise IndexError('This test should have failed.')
f.close()
def test_issue300(self):
f = Dataset(self.file,'w')
nlats = 11; lat = f.createDimension('lat',nlats)
nlons = 20; lon = f.createDimension('lon',nlons)
time = f.createDimension('time',None)
var = f.createVariable('var',np.float64,('time','lat','lon'))
a = np.random.uniform(size=(3,nlats,nlons))
var[[True,True,False,False,False,True]] = a
var[0,2.0,"-1"] = 0 # issue 312
a[0,2,-1]=0
f.close()
f = Dataset(self.file)
var = f.variables['var']
aa = var[[0,1,5]]
bb = var[[True,True,False,False,False,True]]
lats = np.arange(nlats); lons = np.arange(nlons)
cc = var[-1,lats > 2,lons < 6]
assert_array_almost_equal(a,aa)
assert_array_almost_equal(bb,aa)
assert_array_almost_equal(cc,a[-1,3:,:6])
f.close()
def test_retain_single_dims(self):
f = Dataset(self.file, 'r')
v = f.variables['data']
keys = ((0, 1, 2, 3, 4, 5, 6, 7, 8), (5,), (4,))
shape = (9, 1, 1)
data = v[keys]
assert_equal(data.shape, shape)
keys = ((0, 1, 2, 3, 4, 5, 6, 7, 8), 5, 4,)
shape = (9,)
data = v[keys]
assert_equal(data.shape, shape)
f.close()
def test_issue743(self):
nc = Dataset(self.file,'w',format='NETCDF3_CLASSIC')
td = nc.createDimension('t',None)
xd = nc.createDimension('x',33)
yd = nc.createDimension('y',4)
v = nc.createVariable('v',np.float64,('t','x','y'))
nc.close()
nc = Dataset(self.file)
data = np.empty(nc['v'].shape, nc['v'].dtype)
data2 = nc['v'][...]
assert_array_equal(data,data2)
nc.close()
def test_issue906(self):
f = Dataset(self.file,'w')
f.createDimension('d1',3)
f.createDimension('d2',None)
f.createDimension('d3',5)
f.createVariable('v2',np.float64,('d1','d2','d3'))
f['v2'][:] = np.zeros((3,4,5))
f['v2'][0,:,0] = np.arange(4)
f['v2'][0,:,:] = np.ones((4,5))
f.close()
def test_issue919(self):
with Dataset(self.file,'w') as f:
f.createDimension('time',2)
f.createDimension('lat',10)
f.createDimension('lon',9)
f.createVariable('v1',np.int64,('time', 'lon','lat',))
arr = np.arange(9*10).reshape((9, 10))
f['v1'][:] = arr
assert_array_equal(f['v1'][:],np.broadcast_to(arr,f['v1'].shape))
arr = np.arange(10)
f['v1'][:] = arr
assert_array_equal(f['v1'][:],np.broadcast_to(arr,f['v1'].shape))
def test_issue922(self):
with Dataset(self.file,'w') as f:
f.createDimension('d1',3)
f.createDimension('d2',None)
f.createVariable('v1',np.int64,('d2','d1',))
f['v1'][0] = np.arange(3,dtype=np.int64)
f['v1'][1:3] = np.arange(3,dtype=np.int64)
assert_array_equal(f['v1'][:], np.broadcast_to(np.arange(3),(3,3)))
f.createVariable('v2',np.int64,('d1','d2',))
f['v2'][:,0] = np.arange(3,dtype=np.int64)
f['v2'][:,1:3] = np.arange(6,dtype=np.int64).reshape(3,2)
assert_array_equal(f['v2'][:,1:3],np.arange(6,dtype=np.int64).reshape(3,2))
assert_array_equal(f['v2'][:,0],np.arange(3,dtype=np.int64))
def test_issue1083(self):
with Dataset(self.file, "w") as nc:
nc.createDimension("test", 5)
v = nc.createVariable("var", "f8", ("test", "test", "test"))
v[:] = 1 # works
v[:] = np.ones(()) # works
v[:] = np.ones((1,)) # works
v[:] = np.ones((5,)) # works
v[:] = np.ones((5,5,5)) # works
v[:] = np.ones((5,1,1)) # fails (before PR #1084)
v[:] = np.ones((5,1,5)) # fails (before PR #1084)
v[:] = np.ones((5,5,1)) # fails (before PR #1084)
if __name__ == '__main__':
unittest.main()
| 36.835206 | 87 | 0.525572 |
c7d0691bba174e5f070dad0b0b9d66c529352d0f | 2,082 | py | Python | aliyun-python-sdk-schedulerx2/aliyunsdkschedulerx2/request/v20190430/DisableWorkflowRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-schedulerx2/aliyunsdkschedulerx2/request/v20190430/DisableWorkflowRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-schedulerx2/aliyunsdkschedulerx2/request/v20190430/DisableWorkflowRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkschedulerx2.endpoint import endpoint_data
class DisableWorkflowRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'schedulerx2', '2019-04-30', 'DisableWorkflow')
self.set_protocol_type('https')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NamespaceSource(self): # String
return self.get_query_params().get('NamespaceSource')
def set_NamespaceSource(self, NamespaceSource): # String
self.add_query_param('NamespaceSource', NamespaceSource)
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_Namespace(self): # String
return self.get_query_params().get('Namespace')
def set_Namespace(self, Namespace): # String
self.add_query_param('Namespace', Namespace)
def get_WorkflowId(self): # Long
return self.get_query_params().get('WorkflowId')
def set_WorkflowId(self, WorkflowId): # Long
self.add_query_param('WorkflowId', WorkflowId)
| 37.854545 | 76 | 0.760807 |
e71905cf7deea3e60dbe9e904ce429b2dcfdc088 | 4,028 | py | Python | Tests/fTestDependencies.py | SkyLined/mFileSystem2 | b0e70564f6dcd31f20d4d722e2a5aba6ccec9d12 | [
"CC-BY-4.0"
] | 2 | 2019-07-23T06:49:49.000Z | 2021-01-30T07:29:30.000Z | Tests/fTestDependencies.py | SkyLined/mFileSystem2 | b0e70564f6dcd31f20d4d722e2a5aba6ccec9d12 | [
"CC-BY-4.0"
] | null | null | null | Tests/fTestDependencies.py | SkyLined/mFileSystem2 | b0e70564f6dcd31f20d4d722e2a5aba6ccec9d12 | [
"CC-BY-4.0"
] | null | null | null | def fTestDependencies():
import sys;
# Save the list of names of default loaded modules:
def fasGetLoadedModulesNames():
return set([
sModuleName.split(".", 1)[0].lstrip("_")
for sModuleName in sys.modules.keys()
]);
asOriginalModuleNames = fasGetLoadedModulesNames();
import json, os;
# Augment the search path to make the test subject a package and have access to its modules folder.
sTestsFolderPath = os.path.dirname(os.path.abspath(__file__));
sMainFolderPath = os.path.dirname(sTestsFolderPath);
sParentFolderPath = os.path.dirname(sMainFolderPath);
sModulesFolderPath = os.path.join(sMainFolderPath, "modules");
asOriginalSysPath = sys.path[:];
sys.path = [sParentFolderPath, sModulesFolderPath] + asOriginalSysPath;
# Load product details
oProductDetailsFile = open(os.path.join(sMainFolderPath, "dxProductDetails.json"), "rb");
try:
dxProductDetails = json.load(oProductDetailsFile);
finally:
oProductDetailsFile.close();
# Load list of dependencies on python internal modules:
sInternalPythonModuleDepenciesListFilePath = os.path.join(sTestsFolderPath, "internal-python-module-dependencies.txt");
if os.path.isfile(sInternalPythonModuleDepenciesListFilePath):
oInternalPythonModuleDepenciesListFile = open(sInternalPythonModuleDepenciesListFilePath, "rb");
try:
sInternalPythonModuleDepenciesList = oInternalPythonModuleDepenciesListFile.read();
finally:
oInternalPythonModuleDepenciesListFile.close();
asInternalPythonModuleDepencies = [s.rstrip("\r") for s in sInternalPythonModuleDepenciesList.split("\n") if s.rstrip("\r")];
else:
asInternalPythonModuleDepencies = [];
# We loaded these ourselves, so they cannot be checked and do not need to be
# specified in the list:
asAlwaysLoadedPythonModules = ["os", "sys", "json"];
# Load the module and all its dependencies:
__import__(dxProductDetails["sProductName"], globals(), locals(), [], -1);
# Determine which modules were loaded as dependencies.
asAdditionalLoadedModuleNames = [
sModuleName
for sModuleName in fasGetLoadedModulesNames()
if (
sModuleName != dxProductDetails["sProductName"]
and sModuleName not in asOriginalModuleNames
)
];
# Make sure nothing is loaded that is not expected to be loaded to detect new dependencies.
asUnexpectedlyLoadedModules = list(set([
sModuleName
for sModuleName in asAdditionalLoadedModuleNames
if sModuleName not in (
dxProductDetails.get("asDependentOnProductNames", []) +
dxProductDetails.get("asOptionalProductNames", []) +
asAlwaysLoadedPythonModules +
asInternalPythonModuleDepencies
)
]));
assert len(asUnexpectedlyLoadedModules) == 0, \
"The following modules are NOT listed as a dependency but were loaded:\r\n%s" % \
"\r\n".join(sorted(asUnexpectedlyLoadedModules, key = lambda s: unicode(s).lower()));
# Make sure that all dependencies are in fact loaded to detect stale dependencies.
asSuperflousDependencies = [
sModuleName
for sModuleName in dxProductDetails.get("asDependentOnProductNames", [])
if sModuleName not in asAdditionalLoadedModuleNames
];
assert len(asSuperflousDependencies) == 0, \
"The following modules are listed as a dependency but not loaded:\r\n%s" % \
"\r\n".join(sorted(asSuperflousDependencies, key = lambda s: unicode(s).lower()));
# Make sure that all internal python modules dependencies are in fact loaded
# to detect stale dependencies.
asSuperflousInternalDependencies = [
sModuleName
for sModuleName in asInternalPythonModuleDepencies
if (
sModuleName not in asAdditionalLoadedModuleNames
and sModuleName not in asAlwaysLoadedPythonModules
)
];
assert len(asSuperflousInternalDependencies) == 0, \
"The following modules are listed as an internal python module dependency but not loaded:\r\n%s" % \
"\r\n".join(sorted(asSuperflousInternalDependencies, key = lambda s: unicode(s).lower()));
| 46.298851 | 129 | 0.741063 |
781c2379eae7e20edec9263003327da7af3dff41 | 1,567 | py | Python | azure-mgmt-resource/azure/mgmt/resource/resources/v2018_05_01/models/dependency_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-resource/azure/mgmt/resource/resources/v2018_05_01/models/dependency_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-resource/azure/mgmt/resource/resources/v2018_05_01/models/dependency_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Dependency(Model):
"""Deployment dependency information.
:param depends_on: The list of dependencies.
:type depends_on:
list[~azure.mgmt.resource.resources.v2018_05_01.models.BasicDependency]
:param id: The ID of the dependency.
:type id: str
:param resource_type: The dependency resource type.
:type resource_type: str
:param resource_name: The dependency resource name.
:type resource_name: str
"""
_attribute_map = {
'depends_on': {'key': 'dependsOn', 'type': '[BasicDependency]'},
'id': {'key': 'id', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
}
def __init__(self, *, depends_on=None, id: str=None, resource_type: str=None, resource_name: str=None, **kwargs) -> None:
super(Dependency, self).__init__(**kwargs)
self.depends_on = depends_on
self.id = id
self.resource_type = resource_type
self.resource_name = resource_name
| 37.309524 | 125 | 0.610721 |
20e695391e240fb4e6e80c1a51444f690b3e82c9 | 5,564 | py | Python | docs/conf.py | whikloj/lakesuperior | 733ac54e9525dcb7c3161bc70f04415e81650c06 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | whikloj/lakesuperior | 733ac54e9525dcb7c3161bc70f04415e81650c06 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | whikloj/lakesuperior | 733ac54e9525dcb7c3161bc70f04415e81650c06 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# lakesuperior documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 24 23:05:46 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys
from os import path
import lakesuperior
import lakesuperior.env_setup
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinxcontrib.autoyaml',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'lakesuperior'
copyright = '2019, Knowledge Transfer, LLC'
author = 'Stefano Cossu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Version and release are the same.
version = lakesuperior.version
release = lakesuperior.release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'lakesuperiordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'lakesuperior.tex', 'lakesuperior Documentation',
'Stefano Cossu', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lakesuperior', 'lakesuperior Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'lakesuperior', 'lakesuperior Documentation',
author, 'lakesuperior', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Autoyaml extension
autoyaml_root = path.join(lakesuperior.basedir, 'etc.defaults')
| 30.23913 | 79 | 0.691229 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.