hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acde35927db05962fdf0cd827acba46d15ccaf33 | 8,153 | py | Python | sdks/python/setup.py | leo-unc/beam | 6b43523e976b211f25449fa54d5c57bafe09f0a9 | [
"Apache-2.0"
] | null | null | null | sdks/python/setup.py | leo-unc/beam | 6b43523e976b211f25449fa54d5c57bafe09f0a9 | [
"Apache-2.0"
] | null | null | null | sdks/python/setup.py | leo-unc/beam | 6b43523e976b211f25449fa54d5c57bafe09f0a9 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK for Python setup file."""
from __future__ import absolute_import
from __future__ import print_function
import os
import platform
import sys
import warnings
from distutils.version import StrictVersion
# Pylint and isort disagree here.
# pylint: disable=ungrouped-imports
import setuptools
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
from setuptools.command.build_py import build_py
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from setuptools.command.sdist import sdist
from setuptools.command.test import test
def get_version():
global_names = {}
exec( # pylint: disable=exec-used
open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'apache_beam/version.py')
).read(),
global_names
)
return global_names['__version__']
PACKAGE_NAME = 'apache-beam'
PACKAGE_VERSION = get_version()
PACKAGE_DESCRIPTION = 'Apache Beam SDK for Python'
PACKAGE_URL = 'https://beam.apache.org'
PACKAGE_DOWNLOAD_URL = 'https://pypi.python.org/pypi/apache-beam'
PACKAGE_AUTHOR = 'Apache Software Foundation'
PACKAGE_EMAIL = 'dev@beam.apache.org'
PACKAGE_KEYWORDS = 'apache beam'
PACKAGE_LONG_DESCRIPTION = '''
Apache Beam is a unified programming model for both batch and streaming
data processing, enabling efficient execution across diverse distributed
execution engines and providing extensibility points for connecting to
different technologies and user communities.
'''
REQUIRED_PIP_VERSION = '7.0.0'
_PIP_VERSION = get_distribution('pip').version
if StrictVersion(_PIP_VERSION) < StrictVersion(REQUIRED_PIP_VERSION):
warnings.warn(
"You are using version {0} of pip. " \
"However, version {1} is recommended.".format(
_PIP_VERSION, REQUIRED_PIP_VERSION
)
)
REQUIRED_CYTHON_VERSION = '0.28.1'
try:
_CYTHON_VERSION = get_distribution('cython').version
if StrictVersion(_CYTHON_VERSION) < StrictVersion(REQUIRED_CYTHON_VERSION):
warnings.warn(
"You are using version {0} of cython. " \
"However, version {1} is recommended.".format(
_CYTHON_VERSION, REQUIRED_CYTHON_VERSION
)
)
except DistributionNotFound:
# do nothing if Cython is not installed
pass
# Currently all compiled modules are optional (for performance only).
if platform.system() == 'Windows':
# Windows doesn't always provide int64_t.
cythonize = lambda *args, **kwargs: []
else:
try:
# pylint: disable=wrong-import-position
from Cython.Build import cythonize
except ImportError:
cythonize = lambda *args, **kwargs: []
REQUIRED_PACKAGES = [
'avro>=1.8.1,<2.0.0; python_version < "3.0"',
'avro-python3>=1.8.1,<2.0.0; python_version >= "3.0"',
'crcmod>=1.7,<2.0',
'dill>=0.2.9,<0.2.10',
'fastavro>=0.21.4,<0.22',
'future>=0.16.0,<1.0.0',
'futures>=3.2.0,<4.0.0; python_version < "3.0"',
'grpcio>=1.8,<2',
'hdfs>=2.1.0,<3.0.0',
'httplib2>=0.8,<=0.12.0',
'mock>=1.0.1,<3.0.0',
'oauth2client>=2.0.1,<4',
# grpcio 1.8.1 and above requires protobuf 3.5.0.post1.
'protobuf>=3.5.0.post1,<4',
# [BEAM-6287] pyarrow is not supported on Windows for Python 2
('pyarrow>=0.11.1,<0.12.0; python_version >= "3.0" or '
'platform_system != "Windows"'),
'pydot>=1.2.0,<1.3',
'pytz>=2018.3',
# [BEAM-5628] Beam VCF IO is not supported in Python 3.
'pyvcf>=0.6.8,<0.7.0; python_version < "3.0"',
'pyyaml>=3.12,<4.0.0',
'typing>=3.6.0,<3.7.0; python_version < "3.5.0"',
]
REQUIRED_TEST_PACKAGES = [
'nose>=1.3.7',
'numpy>=1.14.3,<2',
'pandas>=0.23.4,<0.24',
'parameterized>=0.6.0,<0.7.0',
'pyhamcrest>=1.9,<2.0',
'tenacity>=5.0.2,<6.0',
]
GCP_REQUIREMENTS = [
'cachetools>=3.1.0,<4',
# google-apitools 0.5.23 and above has important Python 3 supports.
'google-apitools>=0.5.26,<0.5.27',
# [BEAM-4543] googledatastore is not supported in Python 3.
'proto-google-cloud-datastore-v1>=0.90.0,<=0.90.4; python_version < "3.0"',
# [BEAM-4543] googledatastore is not supported in Python 3.
'googledatastore>=7.0.1,<7.1; python_version < "3.0"',
'google-cloud-datastore==1.7.1',
'google-cloud-pubsub==0.39.0',
# GCP packages required by tests
'google-cloud-bigquery>=1.6.0,<1.7.0',
'google-cloud-core==0.28.1',
'google-cloud-bigtable==0.31.1',
]
# We must generate protos after setup_requires are installed.
def generate_protos_first(original_cmd):
try:
# See https://issues.apache.org/jira/browse/BEAM-2366
# pylint: disable=wrong-import-position
import gen_protos
class cmd(original_cmd, object):
def run(self):
gen_protos.generate_proto_files()
super(cmd, self).run()
return cmd
except ImportError:
warnings.warn("Could not import gen_protos, skipping proto generation.")
return original_cmd
python_requires = '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*'
if sys.version_info[0] == 3:
warnings.warn(
'Python 3 support for the Apache Beam SDK is not yet fully supported. '
'You may encounter buggy behavior or missing features.')
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=PACKAGE_DESCRIPTION,
long_description=PACKAGE_LONG_DESCRIPTION,
url=PACKAGE_URL,
download_url=PACKAGE_DOWNLOAD_URL,
author=PACKAGE_AUTHOR,
author_email=PACKAGE_EMAIL,
packages=setuptools.find_packages(),
package_data={'apache_beam': [
'*/*.pyx', '*/*/*.pyx', '*/*.pxd', '*/*/*.pxd', 'testing/data/*.yaml',
'portability/api/*.yaml']},
ext_modules=cythonize([
'apache_beam/**/*.pyx',
'apache_beam/coders/coder_impl.py',
'apache_beam/metrics/execution.py',
'apache_beam/runners/common.py',
'apache_beam/runners/worker/logger.py',
'apache_beam/runners/worker/opcounters.py',
'apache_beam/runners/worker/operations.py',
'apache_beam/transforms/cy_combiners.py',
'apache_beam/utils/counters.py',
'apache_beam/utils/windowed_value.py',
]),
install_requires=REQUIRED_PACKAGES,
python_requires=python_requires,
test_suite='nose.collector',
tests_require=REQUIRED_TEST_PACKAGES,
extras_require={
'docs': ['Sphinx>=1.5.2,<2.0'],
'test': REQUIRED_TEST_PACKAGES,
'gcp': GCP_REQUIREMENTS,
},
zip_safe=False,
# PyPI package information.
classifiers=[
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache License, Version 2.0',
keywords=PACKAGE_KEYWORDS,
entry_points={
'nose.plugins.0.10': [
'beam_test_plugin = test_config:BeamTestPlugin',
]},
cmdclass={
'build_py': generate_protos_first(build_py),
'develop': generate_protos_first(develop),
'egg_info': generate_protos_first(egg_info),
'sdist': generate_protos_first(sdist),
'test': generate_protos_first(test),
},
)
| 34.112971 | 79 | 0.679137 |
acde364f2542b681d954e717efb88b0e24024206 | 5,686 | py | Python | sl4atools/fullscreenwrapper2/examples/gyro_sl4a_test/gyro_sl4a_test.py | stormtheh4ck3r/python-for-android | b9ea9161392f60566b81482b1e25cd77004d5c45 | [
"Apache-2.0"
] | 267 | 2015-03-22T15:23:48.000Z | 2022-03-05T21:57:34.000Z | sl4atools/fullscreenwrapper2/examples/gyro_sl4a_test/gyro_sl4a_test.py | stormtheh4ck3r/python-for-android | b9ea9161392f60566b81482b1e25cd77004d5c45 | [
"Apache-2.0"
] | 133 | 2015-03-21T15:13:43.000Z | 2021-12-11T23:37:58.000Z | sl4atools/fullscreenwrapper2/examples/gyro_sl4a_test/gyro_sl4a_test.py | stormtheh4ck3r/python-for-android | b9ea9161392f60566b81482b1e25cd77004d5c45 | [
"Apache-2.0"
] | 119 | 2015-04-28T16:07:10.000Z | 2022-03-18T03:49:48.000Z | '''
@copyright: Hariharan Srinath, 2012
@license: This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/
'''
xmldata = """<?xml version="1.0" encoding="utf-8"?>
<LinearLayout
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff314859"
android:orientation="vertical"
xmlns:android="http://schemas.android.com/apk/res/android">
<TextView
android:layout_width="fill_parent"
android:layout_height="0px"
android:textSize="20dp"
android:id="@+id/txt_logo"
android:text="Gyro Test"
android:textColor="#ffffffff"
android:layout_weight="19"
android:gravity="center"
/>
<LinearLayout
android:layout_width="fill_parent"
android:layout_height="0px"
android:orientation="horizontal"
android:layout_weight="27">
<TextView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff314859"
android:layout_weight="1"
android:gravity="center"/>
<TextView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff66a3d2"
android:id="@+id/txt_top"
android:layout_weight="1"
android:gravity="center"/>
<TextView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff314859"
android:layout_weight="1"
android:gravity="center"/>
</LinearLayout>
<LinearLayout
android:layout_width="fill_parent"
android:layout_height="0px"
android:orientation="horizontal"
android:layout_weight="27">
<TextView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff66a3d2"
android:id="@+id/txt_left"
android:layout_weight="1"
android:gravity="center"/>
<TextView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff314859"
android:layout_weight="1"
android:gravity="center"/>
<TextView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff66a3d2"
android:layout_weight="1"
android:id="@+id/txt_right"
android:gravity="center"/>
</LinearLayout>
<LinearLayout
android:layout_width="fill_parent"
android:layout_height="0px"
android:orientation="horizontal"
android:layout_weight="27">
<TextView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff314859"
android:layout_weight="1"
android:gravity="center"/>
<TextView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff66a3d2"
android:id="@+id/txt_bottom"
android:layout_weight="1"
android:gravity="center"/>
<TextView
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff314859"
android:layout_weight="1"
android:gravity="center"/>
</LinearLayout>
</LinearLayout>"""
import android
import math
droid = android.Android()
from fullscreenwrapper2 import *
class GyroTestLayout(Layout):
def __init__(self):
super(GyroTestLayout,self).__init__(xmldata,"GyroTest")
def on_show(self):
self.add_event(key_EventHandler(handler_function=self.close_app))
self.add_event(EventHandler("sensors", None, None, None, self.gyro))
def on_close(self):
pass
def gyro(self,view,event):
value = int(event["data"]["pitch"]*60.0/math.pi*2)
#self.views.txt_logo.text = str(value)
color, basecolor = self.get_color(abs(value))
if value > 0:
self.views.txt_top.background = color
self.views.txt_bottom.background = basecolor
else:
self.views.txt_top.background = basecolor
self.views.txt_bottom.background = color
value = int(event["data"]["roll"]*60.0/math.pi*4)
#self.views.txt_logo.text = str(value)
color, basecolor = self.get_color(abs(value))
if value > 0:
self.views.txt_right.background = color
self.views.txt_left.background = basecolor
else:
self.views.txt_right.background = basecolor
self.views.txt_left.background = color
def close_app(self,view,event):
FullScreenWrapper2App.exit_FullScreenWrapper2App()
colorvals = ["#ff66a3d2","#FF63BE7B", "#FF83C77D","#FFA2D07F", "#FFC1DA81", "#FFE0E383", "#FFFFEB84", "#FFFDD17F", "#FFFCB77A", "#FFFA9D75", "#FFF98370", "#FFF8696B"]
def get_color(self,value):
value = abs(value)
if value >59:
value = 59
if value < 0:
value = 0
return self.colorvals[int(value/5)],self.colorvals[0]
if __name__ == '__main__':
FullScreenWrapper2App.initialize(droid)
FullScreenWrapper2App.show_layout(GyroTestLayout())
droid.startSensingTimed(1,200)
FullScreenWrapper2App.eventloop()
| 34.883436 | 171 | 0.607985 |
acde38250bd7c437428a3e309e37e8960293a134 | 2,669 | py | Python | py/dirbalak/server/spawngithubwebeventlistener.py | shlomimatichin/dirbalak | 218441fe55715c0602dd41142ae6a34ddfef6b38 | [
"Apache-2.0"
] | null | null | null | py/dirbalak/server/spawngithubwebeventlistener.py | shlomimatichin/dirbalak | 218441fe55715c0602dd41142ae6a34ddfef6b38 | [
"Apache-2.0"
] | null | null | null | py/dirbalak/server/spawngithubwebeventlistener.py | shlomimatichin/dirbalak | 218441fe55715c0602dd41142ae6a34ddfef6b38 | [
"Apache-2.0"
] | null | null | null | from dirbalak.server import githubwebeventlistener
import logging
import threading
import atexit
import signal
import os
import sys
import select
import subprocess
class SpawnGithubWebEventListener(threading.Thread):
def __init__(self, callback, port=60004, downgradeUID=10000, downgradeGID=10000):
self._callback = callback
self._port = port
self._downgradeUID = downgradeUID
self._downgradeGID = downgradeGID
threading.Thread.__init__(self)
self._readPipe, self._writePipe = os.pipe()
self._read = os.fdopen(self._readPipe)
self._childPid = os.fork()
if self._childPid == 0:
self._child()
sys.exit()
logging.info("forked github webevent listener at pid %(pid)s", dict(pid=self._childPid))
atexit.register(self._exit)
self.daemon = True
threading.Thread.start(self)
def _child(self):
try:
os.setgid(self._downgradeGID)
os.setuid(self._downgradeUID)
sys.stdout = os.fdopen(self._writePipe, "w")
githubwebeventlistener.main(self._port)
except:
import traceback
open("/tmp/stack", "w").write(traceback.format_exc())
raise
def run(self):
read = os.fdopen(self._readPipe, "r")
try:
while True:
ready, unused, unused = select.select([read], [], [], 30)
if read in ready:
repo = read.readline().strip()
if repo == '':
raise Exception("EOF reading from github web event listener")
self._callback(repo.strip())
else:
output = subprocess.check_output(
['netstat', '-n', '-t', '-l'], stderr=subprocess.STDOUT, close_fds=True)
if (":%d" % self._port) not in output:
logging.error("TCP server on port '%(port)d' was not found" % dict(port=self._port))
raise Exception("TCP server on port '%d' was not found" % self._port)
except:
logging.exception("Child event listener died, commiting suicide")
try:
os.kill(self._childPid, signal.SIGKILL)
except:
logging.exception("Unable to kill child")
os.kill(os.getpid(), signal.SIGTERM)
raise
def _exit(self):
os.kill(self._childPid, signal.SIGKILL)
if __name__ == "__main__":
import time
def printCallback(repo):
print repo
SpawnGithubWebEventListener(printCallback)
time.sleep(1000)
| 35.118421 | 108 | 0.576995 |
acde3ac07075e1cffdd4b489c77a34ad1c77e4b9 | 6,849 | py | Python | bindings/python/ensmallen_graph/datasets/string/clostridiumsaccharogumia.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/clostridiumsaccharogumia.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/clostridiumsaccharogumia.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Clostridium saccharogumia.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:34:33.075960
The undirected graph Clostridium saccharogumia has 2857 nodes and 261899
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.06419 and has 18 connected components, where the component
with most nodes has 2818 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 154, the mean node degree is 183.34,
and the node degree mode is 2. The top 5 most central nodes are 1121333.JMLH01000017_gene2806
(degree 1236), 1121333.JMLH01000043_gene2585 (degree 962), 1121333.JMLH01000034_gene2863
(degree 938), 1121333.JMLH01000002_gene166 (degree 924) and 1121333.JMLH01000126_gene1511
(degree 899).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ClostridiumSaccharogumia
# Then load the graph
graph = ClostridiumSaccharogumia()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def ClostridiumSaccharogumia(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Clostridium saccharogumia graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Clostridium saccharogumia graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:34:33.075960
The undirected graph Clostridium saccharogumia has 2857 nodes and 261899
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.06419 and has 18 connected components, where the component
with most nodes has 2818 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 154, the mean node degree is 183.34,
and the node degree mode is 2. The top 5 most central nodes are 1121333.JMLH01000017_gene2806
(degree 1236), 1121333.JMLH01000043_gene2585 (degree 962), 1121333.JMLH01000034_gene2863
(degree 938), 1121333.JMLH01000002_gene166 (degree 924) and 1121333.JMLH01000126_gene1511
(degree 899).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ClostridiumSaccharogumia
# Then load the graph
graph = ClostridiumSaccharogumia()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="ClostridiumSaccharogumia",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.858639 | 223 | 0.710615 |
acde3b5f6539fe08ce90f119e2dc8d597753b8a7 | 97 | py | Python | project/blueprints/bp1/__init__.py | takwas/flask-layout-demo | 493a7fed52acaa893c986cb5aaddb708f8bd3508 | [
"MIT"
] | null | null | null | project/blueprints/bp1/__init__.py | takwas/flask-layout-demo | 493a7fed52acaa893c986cb5aaddb708f8bd3508 | [
"MIT"
] | null | null | null | project/blueprints/bp1/__init__.py | takwas/flask-layout-demo | 493a7fed52acaa893c986cb5aaddb708f8bd3508 | [
"MIT"
] | null | null | null | from flask import Blueprint
bp_obj = Blueprint('bp_obj', __name__)
from . import controllers
| 13.857143 | 38 | 0.762887 |
acde3bf6430720e62bbee249b5340e8f642f7fea | 16,867 | py | Python | acrilog/concepts/mplogger.py | Acrisel/acrilog | 87cf010880ad421abcbce9c7316eefdb7380bcd6 | [
"MIT"
] | 3 | 2017-09-04T03:26:13.000Z | 2019-12-15T09:18:30.000Z | acrilog/concepts/mplogger.py | Acrisel/acrilog | 87cf010880ad421abcbce9c7316eefdb7380bcd6 | [
"MIT"
] | null | null | null | acrilog/concepts/mplogger.py | Acrisel/acrilog | 87cf010880ad421abcbce9c7316eefdb7380bcd6 | [
"MIT"
] | null | null | null | '''
Created on Nov 30, 2017
@author: arnon
'''
# -*- encoding: utf-8 -*-
##############################################################################
#
# Acrisel LTD
# Copyright (C) 2008- Acrisel (acrisel.com) . All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import logging
from logging.handlers import QueueListener, QueueHandler
import os
import multiprocessing as mp
from copy import copy
from acrilib import TimedSizedRotatingHandler
from datetime import datetime
import sys
import socket
class MpQueueListener(QueueListener):
def __init__(self, queue, name=None, logging_level=logging.INFO,
logdir=None, formatter=None, process_key=['processName'],
global_handlers=[], **kwargs):
super(MpQueueListener, self).__init__(queue, *global_handlers)
""" Initialize an instance with the specified queue and
handlers.
Args:
handlers: list of handlers to apply
process_key: list of keys by which to bind handler to records.
handlers that don't have any key are classified as global handlers.
if record doens't have any matching key, global handlers will be used.
if records match, only matching handlers will be used.
"""
self.process_key = process_key
self.logdir = logdir
self.formatter = formatter
self.name = name
self.kwargs = kwargs
key_handlers = dict([(p, dict()) for p in process_key])
self.key_handlers = key_handlers
self.global_handlers = global_handlers
self.console_handlers = list()
self.logging_level = logging_level
def handle(self, record):
""" Override handle a record.
This just loops through the handlers offering them the record
to handle.
Args:
record: The record to handle.
"""
# Find handlers that match process keys
handlers = list()
record_name = record.__dict__.get('name', None)
for process_key in self.process_key:
record_key = record.__dict__.get(process_key, None)
# print('record_key[process_key]: %s[%s]' %(record_key, process_key))
# print('record_key[processName]: %s' %(repr(record.__dict__)))
if record_key:
process_handlers = self.key_handlers[process_key]
key_handlers = process_handlers.get(record_key, [])
# avoid getting dedicated handler in special case when in
# consolidated mode and record with
# name equal to the global one (QueueListiner name)
need_handler=len(key_handlers) == 0 and (record_key != self.name or len(self.global_handlers) ==0)
if need_handler:
name = record_name
# file_prefix=self.kwargs.get('file_prefix')
# if file_prefix is None: file_prefix=name
# print('file_prefix, record_key, record_name:', file_prefix, record_key, record_name)
if record_name != record_key:
name = "%s.%s" % (name, record_key)
key_handlers = get_file_handler(logging_level=self.logging_level, logdir=self.logdir, process_key=name, formatter=self.formatter, **self.kwargs)
process_handlers[record_key] = key_handlers
handlers.extend(key_handlers)
if len(self.global_handlers) > 0:
handlers.extend(self.global_handlers)
if len(self.console_handlers) > 0:
handlers.extend(self.console_handlers)
record = self.prepare(record)
for handler in list(set(handlers)):
if record.levelno >= handler.level: # This check is not in the parent class
handler.handle(record)
def addConsoleHandler(self, handler):
self.console_handlers.append(handler)
def addHandler(self, handler):
"""
Add the specified handler to this logger.
handler is expected to have process_key attribute.
process_key attribute is expected to be a list of records attribute names that handler would bind to.
if handler does not have process_key attribute or it is empty, handler will be associated with
"""
key_bind = False
if hasattr(handler, 'process_key'):
handler_key = handler.process_key
for key in list(set(self.process_key) & set(handler_key)):
exist_handler = self.key_handlers.get(key, list())
self.key_handlers[key] = exist_handler
exist_handler.append(handler)
key_bind = True
if not key_bind:
self.global_handlers.append(handler)
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
if hdlr in self.handlers:
hdlr.close()
self.handlers.remove(hdlr)
class MicrosecondsDatetimeFormatter(logging.Formatter):
def formatTime(self, record, datefmt=None):
ct = datetime.fromtimestamp(record.created)
if ct is None:
ct = datetime.now()
if datefmt is not None:
s = ct.strftime(datefmt)
else:
# print('MicrosecondsDatetimeFormatter:', repr(ct),)
t = ct.strftime("%Y-%m-%d %H:%M:%S")
s = "%s.%03d" % (t, record.msecs)
return s
class LevelBasedFormatter(logging.Formatter):
defaults = {
logging.DEBUG : u"%(asctime)-15s: %(process)-7s: %(levelname)-7s: %(message)s: %(module)s.%(funcName)s(%(lineno)d)",
'default' : u"%(asctime)-15s: %(process)-7s: %(levelname)-7s: %(message)s",
}
def __init__(self, level_formats={}, datefmt=None):
defaults = LevelBasedFormatter.defaults
if level_formats:
defaults = copy(LevelBasedFormatter.defaults)
defaults.update(level_formats)
self.datefmt = datefmt
self.defaults = dict([(level, MicrosecondsDatetimeFormatter(fmt=fmt, datefmt=self.datefmt)) for level, fmt in defaults.items()])
self.default_format=self.defaults['default']
logging.Formatter.__init__(self, fmt=self.default_format, datefmt=self.datefmt)
def format(self, record):
formatter=self.defaults.get(record.levelno, self.default_format,)
result = formatter.format(record)
return result
def create_stream_handler(logging_level=logging.INFO, level_formats={}, datefmt=None):
handlers = list()
stdout_handler = logging.StreamHandler(stream=sys.stdout)
#stdout_handler.setLevel(logging_level)
formatter = LevelBasedFormatter(level_formats=level_formats,datefmt=datefmt)
stdout_handler.setFormatter(formatter)
handlers.append(stdout_handler)
stderr_handler = logging.StreamHandler(stream=sys.stderr)
return handlers
def get_file_handler(logdir='', logging_level=logging.INFO, process_key=None, formatter=None, file_prefix=None, file_suffix=None, **kwargs):
'''
Args:
kwargs:
file_mode='a',
maxBytes=0,
backupCount=0,
encoding='ascii',
delay=False,
when='h',
interval=1,
utc=False,
atTime=None
'''
result = list()
if logdir is None:
logdir = ''
# key_s=''
if file_suffix:
process_key = "%s.%s" % (process_key, file_suffix)
if process_key:
name = "%s.log" % process_key
else:
name = 'mplogger.log'
if file_prefix:
name = "%s.%s" %(file_prefix, name)
# print('get_file_handlers: process_key:', process_key)
# traceback.print_stack()
filename = os.path.join(logdir, name)
handler = TimedSizedRotatingHandler(filename=filename, delay="true", **kwargs)
#handler.setLevel(logging_level)
handler.setFormatter(formatter)
result.append(handler)
# create error file handler and set level to error
return result
class MpLogger(object):
''' Builds Multiprocessing logger such all process share the same logging mechanism
'''
def __init__(self, name='mplogger', logdir=None, logging_level=logging.INFO, level_formats={}, datefmt=None, process_key=['name'], console=True, consolidate=False, local_log=True, handlers=[], **kwargs):
'''Initiates MpLogger service
Args:
name: base name to use for file logs.
logdir: folder to which log files will be written; if not provided, log files will not be created
logging_level: level from which logging will be done
level_formats: mapping of logging levels to formats to use for constructing message
datefmt: date format to use
process_key: list of record names that would be used to create files
console_name: when set, records assigned to process_key handler will also routed to global handlers.
#logging_root: defaults to name if not provided
encoding: used in defining file handlers; default 'ascii'
handlers: list of global handlers
kwargs: pass-through to handler defining its policy
file_mode='a',
file_prefix='',
file_suffix='',
maxBytes=0,
backupCount=0,
encoding='ascii',
delay=False,
when='h',
interval=1,
utc=False,
atTime=None
'''
self.logdir=logdir
if not os.path.isdir(logdir):
try:
os.makedirs(logdir, mode=0o744, exist_ok=True)
except:
raise
self.logging_level=logging_level
self.level_formats=level_formats
self.datefmt=datefmt
self.record_formatter=LevelBasedFormatter(level_formats=level_formats, datefmt=datefmt)
#self.logging_root=logging_root if logging_root is not None else name
self.logger_initialized=False
self.queue_listener=None
self.handlers=handlers
self.process_key=process_key
self.consolidate=consolidate
self.console=console
self.name=name
self.kwargs=kwargs
#self.encoding=encoding
#self.file_mode=file_mode
self.local_log=local_log
def _global_file_handlers(self,):
#if not process_key: process_key=self.name
handlers=get_file_handler(logdir=self.logdir, logging_level=self.logging_level, process_key=self.name, formatter=self.record_formatter, **self.kwargs)
self.global_filename=handlers[0].filename
return handlers
#for handler in handlers:
# self.queue_listener.addHandler(handler)
@classmethod
def add_file_handlers(cls, name, logger, logdir, logging_level, record_formatter, process_key='', **kwargs):
'''
Args:
kwargs:
file_mode='a',
maxBytes=0,
backupCount=0,
encoding='ascii',
delay=False,
when='h',
interval=1,
utc=False,
atTime=None
'''
if not process_key: process_key=name
global_handlers=get_file_handler(logdir=logdir, logging_level=logging_level, process_key=process_key, formatter=record_formatter, **kwargs)
for handler in global_handlers:
logger.addHandler(handler)
def logger_info(self):
return {'process_key': self.process_key,
'logdir': self.logdir,
'logging_level': self.logging_level,
'record_formatter': self.record_formatter,
'record_formatter': self.record_formatter,
'loggerq': self.loggerq,
'handler_kwargs': self.kwargs,
'local_log': self.local_log,
'server_host': socket.gethostbyname(socket.gethostname()),
}
@classmethod
def get_logger(cls, logger_info, name):
# create the logger to use.
logger = logging.getLogger(name)
# The only handler desired is the SubProcessLogHandler. If any others
# exist, remove them. In this case, on Unix and Linux the StreamHandler
# will be inherited.
#for handler in logger.handlers:
# # just a check for my sanity
# assert not isinstance(handler, TimedSizedRotatingHandler)
# logger.removeHandler(handler)
server_host=socket.gethostbyname(socket.gethostname())
# server may already started logger
# if logger_info['server_host'] == server_host: return logger
logging_level=logger_info['logging_level']
loggerq=logger_info['loggerq']
queue_handler = QueueHandler(loggerq)
logger.addHandler(queue_handler)
# add the handler only if processing locally and this host is not server host.
if logger_info['local_log'] and logger_info['server_host'] != server_host:
cls.add_file_handlers(name=name, process_key=logger_info['process_key'],
logger=logger,
logdir=logger_info['logdir'],
logging_level=logging_level,
record_formatter=logger_info['record_formatter'],
**logger_info['handler_kwargs'],
)
# On Windows, the level will not be inherited. Also, we could just
# set the level to log everything here and filter it in the main
# process handlers. For now, just set it from the global default.
logger.setLevel(logging_level)
return logger
def start(self, name=None):
''' starts logger for multiprocessing using queue.
Args:
name: identify starting process to allow it log into its own logger
Returns:
logger: set with correct Q handler
'''
# create console handler and set level to info
#if MpLogger.logger_initialized:
if self.logger_initialized:
return
self.logger_initialized=True
#logger = logging.getLogger(name=self.logging_root)
logger = logging.getLogger(name=self.name)
logger.setLevel(self.logging_level)
manager=mp.Manager()
self.loggerq=manager.Queue()
queue_handler = QueueHandler(self.loggerq)
logger.addHandler(queue_handler)
ghandlers=[]
if self.logdir and self.consolidate: # and self.force_global:
ghandlers=self._global_file_handlers()
self.queue_listener = MpQueueListener(self.loggerq, name=self.name, logging_level=self.logging_level, logdir=self.logdir, formatter=self.record_formatter, process_key=self.process_key, global_handlers=ghandlers, **self.kwargs)
if len(self.handlers) == 0:
if self.console:
handlers=create_stream_handler(logging_level=self.logging_level, level_formats=self.level_formats, datefmt=self.datefmt)
for handler in handlers:
self.queue_listener.addConsoleHandler(handler)
else: # len(self.handlers) > 0:
for handler in self.handlers:
self.queue_listener.addHandler(handler)
self.queue_listener.start()
logger_name=name if name is not None else self.name
return logging.getLogger(name=logger_name)
def stop(self,):
if self.queue_listener:
self.queue_listener.stop()
def quite(self,):
if self.queue_listener:
self.queue_listener.enqueue_sentinel()
if __name__ == '__main__':
mp.freeze_support()
mp.set_start_method('spawn') | 39.408879 | 234 | 0.611193 |
acde3d0e0065ac7181e3afcb9d61888ef9e42eb2 | 1,120 | py | Python | var/spack/repos/builtin/packages/r-biobase/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/r-biobase/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/r-biobase/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBiobase(RPackage):
"""Biobase: Base functions for Bioconductor.
Functions that are needed by many other packages or which replace R
functions."""
bioc = "Biobase"
version('2.54.0', commit='8215d76ce44899e6d10fe8a2f503821a94ef6b40')
version('2.50.0', commit='9927f90d0676382f2f99e099d8d2c8e2e6f1b4de')
version('2.44.0', commit='bde2077f66047986297ec35a688751cdce150dd3')
version('2.42.0', commit='3e5bd466b99e3cc4af1b0c3b32687fa56d6f8e4d')
version('2.40.0', commit='6555edbbcb8a04185ef402bfdea7ed8ac72513a5')
version('2.38.0', commit='83f89829e0278ac014b0bc6664e621ac147ba424')
version('2.36.2', commit='15f50912f3fa08ccb15c33b7baebe6b8a59ce075')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-biocgenerics@0.3.2:', type=('build', 'run'))
depends_on('r-biocgenerics@0.27.1:', type=('build', 'run'), when='@2.42.0:')
| 40 | 80 | 0.724107 |
acde3d3440d6c9ba8da7de7ece0266e56b075f50 | 12,683 | py | Python | setup.py | egerber/fast-counter | bf36c09d6f6122302e04419388c1360ad4e4e59d | [
"MIT"
] | null | null | null | setup.py | egerber/fast-counter | bf36c09d6f6122302e04419388c1360ad4e4e59d | [
"MIT"
] | null | null | null | setup.py | egerber/fast-counter | bf36c09d6f6122302e04419388c1360ad4e4e59d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
"""setuptools-based setup.py template for Cython projects.
Main setup for the library.
Supports Python 2.7 and 3.4.
Usage as usual with setuptools:
python setup.py build_ext
python setup.py build
python setup.py install
python setup.py sdist
For details, see
http://setuptools.readthedocs.io/en/latest/setuptools.html#command-reference
or
python setup.py --help
python setup.py --help-commands
python setup.py --help bdist_wheel # or any command
"""
from __future__ import division, print_function, absolute_import
try:
# Python 3
MyFileNotFoundError = FileNotFoundError
except: # FileNotFoundError does not exist in Python 2.7
# Python 2.7
# - open() raises IOError
# - remove() (not currently used here) raises OSError
MyFileNotFoundError = (IOError, OSError)
#########################################################
# General config
#########################################################
# Name of the top-level package of your library.
#
# This is also the top level of its source tree, relative to the top-level project directory setup.py resides in.
#
libname = "fastcounter"
# Choose build type.
#
build_type = "optimized"
# build_type="debug"
# Short description for package list on PyPI
#
SHORTDESC = "setuptools template for Cython projects"
# Long description for package homepage on PyPI
#
DESC = """setuptools-based setup.py template for Cython projects.
The focus of this template is on numerical scientific projects,
where a custom Cython extension (containing all-new code) can bring a large speedup.
For completeness, a minimal Cython module is included.
Supports Python 2.7 and 3.4.
"""
# Set up data files for packaging.
#
# Directories (relative to the top-level directory where setup.py resides) in which to look for data files.
datadirs = ("test",)
# File extensions to be considered as data files. (Literal, no wildcards.)
dataexts = (".py", ".pyx", ".pxd", ".c", ".cpp", ".h", ".sh", ".lyx", ".tex", ".txt", ".pdf")
# Standard documentation to detect (and package if it exists).
#
standard_docs = ["README", "LICENSE", "TODO", "CHANGELOG", "AUTHORS"] # just the basename without file extension
standard_doc_exts = [".md", ".rst", ".txt",
""] # commonly .md for GitHub projects, but other projects may use .rst or .txt (or even blank).
#########################################################
# Init
#########################################################
# check for Python 2.7 or later
# http://stackoverflow.com/questions/19534896/enforcing-python-version-in-setup-py
import sys
if sys.version_info < (2, 7):
sys.exit('Sorry, Python < 2.7 is not supported')
import os
from setuptools import setup
from setuptools.extension import Extension
try:
from Cython.Build import cythonize
except ImportError:
sys.exit("Cython not found. Cython is needed to build the extension modules.")
#########################################################
# Definitions
#########################################################
# Define our base set of compiler and linker flags.
#
# This is geared toward x86_64, see
# https://gcc.gnu.org/onlinedocs/gcc-4.6.4/gcc/i386-and-x86_002d64-Options.html
#
# Customize these as needed.
#
# Note that -O3 may sometimes cause mysterious problems, so we limit ourselves to -O2.
# Modules involving numerical computations
#
extra_compile_args_math_optimized = ['-march=native', '-O2', '-msse', '-msse2', '-mfma', '-mfpmath=sse']
extra_compile_args_math_debug = ['-march=native', '-O0', '-g']
extra_link_args_math_optimized = []
extra_link_args_math_debug = []
# Modules that do not involve numerical computations
#
extra_compile_args_nonmath_optimized = ['-O2']
extra_compile_args_nonmath_debug = ['-O0', '-g']
extra_link_args_nonmath_optimized = []
extra_link_args_nonmath_debug = []
# Additional flags to compile/link with OpenMP
#
openmp_compile_args = ['-fopenmp']
openmp_link_args = ['-fopenmp']
#########################################################
# Helpers
#########################################################
# Make absolute cimports work.
#
# See
# https://github.com/cython/cython/wiki/PackageHierarchy
#
# For example: my_include_dirs = [np.get_include()]
my_include_dirs = ["."]
# Choose the base set of compiler and linker flags.
#
if build_type == 'optimized':
my_extra_compile_args_math = extra_compile_args_math_optimized
my_extra_compile_args_nonmath = extra_compile_args_nonmath_optimized
my_extra_link_args_math = extra_link_args_math_optimized
my_extra_link_args_nonmath = extra_link_args_nonmath_optimized
my_debug = False
print("build configuration selected: optimized")
elif build_type == 'debug':
my_extra_compile_args_math = extra_compile_args_math_debug
my_extra_compile_args_nonmath = extra_compile_args_nonmath_debug
my_extra_link_args_math = extra_link_args_math_debug
my_extra_link_args_nonmath = extra_link_args_nonmath_debug
my_debug = True
print("build configuration selected: debug")
else:
raise ValueError("Unknown build configuration '%s'; valid: 'optimized', 'debug'" % (build_type))
def declare_cython_extension(extName, use_math=False, use_openmp=False, include_dirs=None):
"""Declare a Cython extension module for setuptools.
Parameters:
extName : str
Absolute module name, e.g. use `fastcounter.mypackage.mymodule`
for the Cython source file `fastcounter/mypackage/mymodule.pyx`.
use_math : bool
If True, set math flags and link with ``libm``.
use_openmp : bool
If True, compile and link with OpenMP.
Return value:
Extension object
that can be passed to ``setuptools.setup``.
"""
extPath = extName.replace(".", os.path.sep) + ".pyx"
if use_math:
compile_args = list(my_extra_compile_args_math) # copy
link_args = list(my_extra_link_args_math)
libraries = ["m"] # link libm; this is a list of library names without the "lib" prefix
else:
compile_args = list(my_extra_compile_args_nonmath)
link_args = list(my_extra_link_args_nonmath)
libraries = None # value if no libraries, see setuptools.extension._Extension
# OpenMP
if use_openmp:
compile_args.insert(0, openmp_compile_args)
link_args.insert(0, openmp_link_args)
# See
# http://docs.cython.org/src/tutorial/external.html
#
# on linking libraries to your Cython extensions.
#
return Extension(extName,
[extPath],
extra_compile_args=compile_args,
extra_link_args=link_args,
include_dirs=include_dirs,
libraries=libraries
)
# Gather user-defined data files
#
# http://stackoverflow.com/questions/13628979/setuptools-how-to-make-package-contain-extra-data-folder-and-all-folders-inside
#
datafiles = []
getext = lambda filename: os.path.splitext(filename)[1]
for datadir in datadirs:
datafiles.extend([(root, [os.path.join(root, f) for f in files if getext(f) in dataexts])
for root, dirs, files in os.walk(datadir)])
# Add standard documentation (README et al.), if any, to data files
#
detected_docs = []
for docname in standard_docs:
for ext in standard_doc_exts:
filename = "".join((docname, ext)) # relative to the directory in which setup.py resides
if os.path.isfile(filename):
detected_docs.append(filename)
datafiles.append(('.', detected_docs))
# Extract __version__ from the package __init__.py
# (since it's not a good idea to actually run __init__.py during the build process).
#
# http://stackoverflow.com/questions/2058802/how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package
#
import ast
init_py_path = os.path.join(libname, '__init__.py')
version = '0.0.1'
try:
with open(init_py_path) as f:
for line in f:
if line.startswith('__version__'):
version = ast.parse(line).body[0].value.s
break
else:
print("WARNING: Version information not found in '%s', using placeholder '%s'" % (init_py_path, version),
file=sys.stderr)
except MyFileNotFoundError:
print("WARNING: Could not find file '%s', using placeholder version information '%s'" % (init_py_path, version),
file=sys.stderr)
#########################################################
# Set up modules
#########################################################
# declare Cython extension modules here
#
ext_module_fastcounter = declare_cython_extension("fastcounter.FastCounter", use_math=False, use_openmp=False,
include_dirs=my_include_dirs)
ext_module_memory = declare_cython_extension("fastcounter.ShortTermMemory", use_math=True, use_openmp=False,
include_dirs=my_include_dirs)
# ext_module_helloworld = declare_cython_extension("fastcounter.subpackage.helloworld", use_math=False, use_openmp=False,
# include_dirs=my_include_dirs)
# this is mainly to allow a manual logical ordering of the declared modules
#
cython_ext_modules = [ext_module_fastcounter,
ext_module_memory]
# ext_module_helloworld]
# Call cythonize() explicitly, as recommended in the Cython documentation. See
# http://cython.readthedocs.io/en/latest/src/reference/compilation.html#compiling-with-distutils
#
# This will favor Cython's own handling of '.pyx' sources over that provided by setuptools.
#
# Note that my_ext_modules is just a list of Extension objects. We could add any C sources (not coming from Cython modules) here if needed.
# cythonize() just performs the Cython-level processing, and returns a list of Extension objects.
#
my_ext_modules = cythonize(cython_ext_modules, include_path=my_include_dirs, gdb_debug=my_debug)
#########################################################
# Call setup()
#########################################################
setup(
name="fast-counter",
version=version,
author="Emanuel Gerber",
author_email="emanuel.gerber@tum.de",
url="https://github.com/egerber/fast-counter",
description=SHORTDESC,
long_description=DESC,
# See
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
#
# for the standard classifiers.
#
# Remember to configure these appropriately for your project, especially license!
#
classifiers=["Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Cython",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4"
],
# See
# http://setuptools.readthedocs.io/en/latest/setuptools.html
#
license="MIT",
setup_requires=["cython", "numpy"],
install_requires=["numpy"],
provides=["fastcounter"],
# keywords for PyPI (in case you upload your project)
#
# e.g. the keywords your project uses as topics on GitHub, minus "python" (if there)
#
keywords=["setuptools template example cython"],
# All extension modules (list of Extension objects)
#
ext_modules=my_ext_modules,
# Declare packages so that python -m setup build will copy .py files (especially __init__.py).
#
# This **does not** automatically recurse into subpackages, so they must also be declared.
#
packages=["fastcounter"],
# Install also Cython headers so that other Cython modules can cimport ours
#
# Fileglobs relative to each package, **does not** automatically recurse into subpackages.
#
# FIXME: force sdist, but sdist only, to keep the .pyx files (this puts them also in the bdist)
package_data={'fastcounter': ['*.pxd', '*.pyx']},
# Disable zip_safe, because:
# - Cython won't find .pxd files inside installed .egg, hard to compile libs depending on this one
# - dynamic loader may need to have the library unzipped to a temporary directory anyway (at import time)
#
zip_safe=False,
# Custom data files not inside a Python package
data_files=datafiles
)
| 35.526611 | 139 | 0.652133 |
acde3d8efb8ff0bce2ebe00a36b20af4c7e8c28b | 10,137 | py | Python | tests/integration/sync/v1/service/test_sync_list.py | fefi95/twilio-python | b9bfea293b6133fe84d4d8d3ac4e2a75381c3881 | [
"MIT"
] | 1 | 2019-12-30T21:46:55.000Z | 2019-12-30T21:46:55.000Z | tests/integration/sync/v1/service/test_sync_list.py | fefi95/twilio-python | b9bfea293b6133fe84d4d8d3ac4e2a75381c3881 | [
"MIT"
] | null | null | null | tests/integration/sync/v1/service/test_sync_list.py | fefi95/twilio-python | b9bfea293b6133fe84d4d8d3ac4e2a75381c3881 | [
"MIT"
] | null | null | null | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class SyncListTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists(sid="ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://sync.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"date_expires": "2015-07-30T21:00:00Z",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"links": {
"items": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items",
"permissions": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
},
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists(sid="ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists(sid="ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://sync.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists(sid="ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.create()
self.holodeck.assert_has_request(Request(
'post',
'https://sync.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"date_expires": "2015-07-30T21:00:00Z",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"links": {
"items": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items",
"permissions": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
},
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.create()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists(sid="ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://sync.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"date_expires": "2015-07-30T21:00:00Z",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"links": {
"items": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items",
"permissions": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
},
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists(sid="ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.list()
self.holodeck.assert_has_request(Request(
'get',
'https://sync.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"lists": [],
"meta": {
"first_page_url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0",
"key": "lists",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"lists": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"date_expires": "2015-07-30T21:00:00Z",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"links": {
"items": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items",
"permissions": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
},
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"first_page_url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0",
"key": "lists",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists.list()
self.assertIsNotNone(actual)
| 42.95339 | 168 | 0.57591 |
acde3e83e3a9bce35d36a018aad99129cc530704 | 1,754 | py | Python | Practice_problems/pyramid_all.py | riyabhatia26/Python-Programming | 2882728982c15c3b6380033eb2e90761b538dd93 | [
"MIT"
] | 3 | 2020-08-07T04:33:19.000Z | 2021-10-06T08:58:01.000Z | Practice_problems/pyramid_all.py | riyabhatia26/Python-Programming | 2882728982c15c3b6380033eb2e90761b538dd93 | [
"MIT"
] | null | null | null | Practice_problems/pyramid_all.py | riyabhatia26/Python-Programming | 2882728982c15c3b6380033eb2e90761b538dd93 | [
"MIT"
] | 2 | 2021-10-06T08:58:05.000Z | 2021-10-06T09:46:42.000Z | stop = '-'*10
# Normal Pyramid
[print(i * '*') for i in range(1,6)]
print(stop)
# Reversed Pyramid
[print('*' * i) for i in range(5, 0, -1)]
print(stop)
# Print pyramid with numbers
[print(i * f'{i}') for i in range(1,6) ]
print(stop)
# Print reverse pyramid with numbers
[print(i * f'{i}') for i in range(5, 0, -1) ]
print(stop)
# Print pyramid with one # and one *
[ print('*' * i) if i % 2 ==0 else print('#' * i) for i in range(1,6) ]
print(stop)
# Print reversed pyramid with one # and one *
[ print('*' * i) if i % 2 ==0 else print('#' * i) for i in range(5,0,-1) ]
print(stop)
#for i in range(1,7):
# for j in range(1, i):
# print(j, end='')
# print(' ')
#
#print(stop)
# [ print(j, end='') for i in range(1,7) for j in range(1,i) ]
print('\n'.join(''.join(str(j) for j in range(1,i)) for i in range(1,7) ))
print(stop)
print('\n'.join(''.join(str(j) for j in range(i,7)) for i in range(1,7) ))
print(stop)
print( '\n'.join( ''.join('*' if j % 2 ==0 else '#' for j in range(1,i) ) for i in range(1,7) ))
print(stop)
print( '\n'.join( ''.join('*' if j % 2 ==0 else '#' for j in range(i,7) ) for i in range(1,7) ))
print(stop)
# Output:
# *
# **
# ***
# ****
# *****
# ----------
# *****
# ****
# ***
# **
# *
# ----------
# 1
# 22
# 333
# 4444
# 55555
# ----------
# 55555
# 4444
# 333
# 22
# 1
# ----------
# #
# **
# ###
# ****
# #####
# ----------
# #####
# ****
# ###
# **
# #
# ----------
# 1
# 12
# 123
# 1234
# 12345
# ----------
# 123456
# 23456
# 3456
# 456
# 56
# 6
# ----------
# #
# #*
# #*#
# #*#*
# #*#*#
# ----------
# #*#*#*
# *#*#*
# #*#*
# *#*
# #*
# *
# ----------
| 15.385965 | 96 | 0.415051 |
acde3e88ec625607b7dbdbe864c3cdd79ab67bcb | 7,354 | py | Python | Tools/mavlink_shell.py | a1846342933/poscontrol | 3ed043570f2da1a4926710c939544c6d972b821c | [
"BSD-3-Clause"
] | null | null | null | Tools/mavlink_shell.py | a1846342933/poscontrol | 3ed043570f2da1a4926710c939544c6d972b821c | [
"BSD-3-Clause"
] | null | null | null | Tools/mavlink_shell.py | a1846342933/poscontrol | 3ed043570f2da1a4926710c939544c6d972b821c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Open a shell over MAVLink.
@author: Beat Kueng (beat-kueng@gmx.net)
"""
from __future__ import print_function
import sys, select
import termios
try:
from pymavlink import mavutil
import serial
except:
print("Failed to import pymavlink.")
print("You may need to install it with 'pip install pymavlink pyserial'")
exit(-1)
from argparse import ArgumentParser
class MavlinkSerialPort():
'''an object that looks like a serial port, but
transmits using mavlink SERIAL_CONTROL packets'''
def __init__(self, portname, baudrate, devnum=0, debug=0):
self.baudrate = 0
self._debug = debug
self.buf = ''
self.port = devnum
self.debug("Connecting with MAVLink to %s ..." % portname)
self.mav = mavutil.mavlink_connection(portname, autoreconnect=True, baud=baudrate)
self.mav.wait_heartbeat()
self.debug("HEARTBEAT OK\n")
self.debug("Locked serial device\n")
def debug(self, s, level=1):
'''write some debug text'''
if self._debug >= level:
print(s)
def write(self, b):
'''write some bytes'''
self.debug("sending '%s' (0x%02x) of len %u\n" % (b, ord(b[0]), len(b)), 2)
while len(b) > 0:
n = len(b)
if n > 70:
n = 70
buf = [ord(x) for x in b[:n]]
buf.extend([0]*(70-len(buf)))
self.mav.mav.serial_control_send(self.port,
mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |
mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND,
0,
0,
n,
buf)
b = b[n:]
def close(self):
self.mav.mav.serial_control_send(self.port, 0, 0, 0, 0, [0]*70)
def _recv(self):
'''read some bytes into self.buf'''
m = self.mav.recv_match(condition='SERIAL_CONTROL.count!=0',
type='SERIAL_CONTROL', blocking=True,
timeout=0.03)
if m is not None:
if self._debug > 2:
print(m)
data = m.data[:m.count]
self.buf += ''.join(str(chr(x)) for x in data)
def read(self, n):
'''read some bytes'''
if len(self.buf) == 0:
self._recv()
if len(self.buf) > 0:
if n > len(self.buf):
n = len(self.buf)
ret = self.buf[:n]
self.buf = self.buf[n:]
if self._debug >= 2:
for b in ret:
self.debug("read 0x%x" % ord(b), 2)
return ret
return ''
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('port', metavar='PORT', nargs='?', default = None,
help='Mavlink port name: serial: DEVICE[,BAUD], udp: IP:PORT, tcp: tcp:IP:PORT. Eg: \
/dev/ttyUSB0 or 0.0.0.0:14550. Auto-detect serial if not given.')
parser.add_argument("--baudrate", "-b", dest="baudrate", type=int,
help="Mavlink port baud rate (default=57600)", default=57600)
args = parser.parse_args()
if args.port == None:
if sys.platform == "darwin":
args.port = "/dev/tty.usbmodem1"
else:
serial_list = mavutil.auto_detect_serial(preferred_list=['*FTDI*',
"*Arduino_Mega_2560*", "*3D_Robotics*", "*USB_to_UART*", '*PX4*', '*FMU*'])
if len(serial_list) == 0:
print("Error: no serial connection found")
return
if len(serial_list) > 1:
print('Auto-detected serial ports are:')
for port in serial_list:
print(" {:}".format(port))
print('Using port {:}'.format(serial_list[0]))
args.port = serial_list[0].device
print("Connecting to MAVLINK...")
mav_serialport = MavlinkSerialPort(args.port, args.baudrate, devnum=10)
mav_serialport.write('\n') # make sure the shell is started
# setup the console, so we can read one char at a time
fd_in = sys.stdin.fileno()
old_attr = termios.tcgetattr(fd_in)
new_attr = termios.tcgetattr(fd_in)
new_attr[3] = new_attr[3] & ~termios.ECHO # lflags
new_attr[3] = new_attr[3] & ~termios.ICANON
try:
termios.tcsetattr(fd_in, termios.TCSANOW, new_attr)
cur_line = ''
command_history = []
cur_history_index = 0
def erase_last_n_chars(N):
if N == 0: return
CURSOR_BACK_N = '\x1b['+str(N)+'D'
ERASE_END_LINE = '\x1b[K'
sys.stdout.write(CURSOR_BACK_N + ERASE_END_LINE)
while True:
while True:
i, o, e = select.select([sys.stdin], [], [], 0)
if not i: break
ch = sys.stdin.read(1)
# provide a simple shell with command history
if ch == '\n':
if len(cur_line) > 0:
# erase current text (mavlink shell will echo it as well)
erase_last_n_chars(len(cur_line))
# add to history
if len(command_history) == 0 or command_history[-1] != cur_line:
command_history.append(cur_line)
if len(command_history) > 50:
del command_history[0]
cur_history_index = len(command_history)
mav_serialport.write(cur_line+'\n')
cur_line = ''
elif ord(ch) == 127: # backslash
if len(cur_line) > 0:
erase_last_n_chars(1)
cur_line = cur_line[:-1]
sys.stdout.write(ch)
elif ord(ch) == 27:
ch = sys.stdin.read(1) # skip one
ch = sys.stdin.read(1)
if ch == 'A': # arrow up
if cur_history_index > 0:
cur_history_index -= 1
elif ch == 'B': # arrow down
if cur_history_index < len(command_history):
cur_history_index += 1
# TODO: else: support line editing
erase_last_n_chars(len(cur_line))
if cur_history_index == len(command_history):
cur_line = ''
else:
cur_line = command_history[cur_history_index]
sys.stdout.write(cur_line)
elif ord(ch) > 3:
cur_line += ch
sys.stdout.write(ch)
sys.stdout.flush()
data = mav_serialport.read(4096)
if data and len(data) > 0:
sys.stdout.write(data)
sys.stdout.flush()
except serial.serialutil.SerialException as e:
print(e)
except KeyboardInterrupt:
mav_serialport.close()
finally:
termios.tcsetattr(fd_in, termios.TCSADRAIN, old_attr)
if __name__ == '__main__':
main()
| 35.019048 | 97 | 0.50136 |
acde3f27736df60e9bccf0d0f07f1da3d5344c7f | 13,141 | py | Python | tests/test_config.py | gitter-badger/conda | e082781cad83e0bc6a41a2870b605f4ee08bbd4d | [
"BSD-3-Clause"
] | null | null | null | tests/test_config.py | gitter-badger/conda | e082781cad83e0bc6a41a2870b605f4ee08bbd4d | [
"BSD-3-Clause"
] | null | null | null | tests/test_config.py | gitter-badger/conda | e082781cad83e0bc6a41a2870b605f4ee08bbd4d | [
"BSD-3-Clause"
] | null | null | null | # (c) 2012-2014 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
import os
import unittest
from os.path import dirname, join
import yaml
import conda.config as config
from tests.helpers import run_conda_command
# use condarc from source tree to run these tests against
config.rc_path = join(dirname(__file__), 'condarc')
def _get_default_urls():
return ['http://repo.continuum.io/pkgs/free',
'http://repo.continuum.io/pkgs/pro']
config.get_default_urls = _get_default_urls
# unset CIO_TEST
try:
del os.environ['CIO_TEST']
except KeyError:
pass
class TestConfig(unittest.TestCase):
# These tests are mostly to ensure API stability
def __init__(self, *args, **kwargs):
config.rc = config.load_condarc(config.rc_path)
super(TestConfig, self).__init__(*args, **kwargs)
def test_globals(self):
self.assertTrue(config.root_dir)
self.assertTrue(config.pkgs_dirs)
self.assertTrue(config.envs_dirs)
self.assertTrue(config.default_prefix)
self.assertTrue(config.platform)
self.assertTrue(config.subdir)
self.assertTrue(config.arch_name)
self.assertTrue(config.bits in (32, 64))
def test_pkgs_dir_from_envs_dir(self):
root_dir = config.root_dir
root_pkgs = join(root_dir, 'pkgs')
for pi, po in [
(join(root_dir, 'envs'), root_pkgs),
('/usr/local/foo/envs' if config.platform != 'win' else 'C:\envs',
'/usr/local/foo/envs/.pkgs' if config.platform != 'win' else 'C:\envs\.pkgs'),
]:
self.assertEqual(config.pkgs_dir_from_envs_dir(pi), po)
def test_proxy_settings(self):
self.assertEqual(config.get_proxy_servers(),
{'http': 'http://user:pass@corp.com:8080',
'https': 'https://user:pass@corp.com:8080'})
def test_normalize_urls(self):
current_platform = config.subdir
assert config.DEFAULT_CHANNEL_ALIAS == 'https://conda.binstar.org/'
assert config.rc.get('channel_alias') == 'https://your.repo/'
for channel in config.normalize_urls(['defaults', 'system',
'https://binstar.org/username', 'file:///Users/username/repo',
'username']):
assert channel.endswith('/%s/' % current_platform)
self.assertEqual(config.normalize_urls([
'defaults', 'system', 'https://conda.binstar.org/username',
'file:///Users/username/repo', 'username'
], 'osx-64'),
[
'http://repo.continuum.io/pkgs/free/osx-64/',
'http://repo.continuum.io/pkgs/pro/osx-64/',
'https://your.repo/binstar_username/osx-64/',
'http://some.custom/channel/osx-64/',
'http://repo.continuum.io/pkgs/free/osx-64/',
'http://repo.continuum.io/pkgs/pro/osx-64/',
'https://conda.binstar.org/username/osx-64/',
'file:///Users/username/repo/osx-64/',
'https://your.repo/username/osx-64/',
])
test_condarc = os.path.join(os.path.dirname(__file__), 'test_condarc')
def _read_test_condarc():
with open(test_condarc) as f:
return f.read()
# Tests for the conda config command
def test_config_command_basics():
try:
# Test that creating the file adds the defaults channel
assert not os.path.exists('test_condarc')
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- test
- defaults
"""
os.unlink(test_condarc)
# When defaults is explicitly given, it should not be added
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test', '--add', 'channels', 'defaults')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- defaults
- test
"""
os.unlink(test_condarc)
# Duplicate keys should not be added twice
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == stderr == ''
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == ''
assert stderr == "Skipping channels: test, item already exists\n"
assert _read_test_condarc() == """\
channels:
- test
- defaults
"""
os.unlink(test_condarc)
# Test creating a new file with --set
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'always_yes', 'yes')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
always_yes: yes
"""
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
def test_config_command_get():
try:
# Test --get
with open(test_condarc, 'w') as f:
f.write("""\
channels:
- test
- defaults
create_default_packages:
- ipython
- numpy
changeps1: no
always_yes: yes
invalid_key: yes
channel_alias: http://alpha.conda.binstar.org
""")
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get')
assert stdout == """\
--set always_yes True
--set changeps1 False
--add channels 'defaults'
--add channels 'test'
--add create_default_packages 'numpy'
--add create_default_packages 'ipython'
"""
assert stderr == "unknown key invalid_key\n"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'channels')
assert stdout == """\
--add channels 'defaults'
--add channels 'test'
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'changeps1')
assert stdout == """\
--set changeps1 False
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'changeps1', 'channels')
assert stdout == """\
--set changeps1 False
--add channels 'defaults'
--add channels 'test'
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'allow_softlinks')
assert stdout == ""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'track_features')
assert stdout == ""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'invalid_key')
assert stdout == ""
assert "invalid choice: 'invalid_key'" in stderr
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'not_valid_key')
assert stdout == ""
assert "invalid choice: 'not_valid_key'" in stderr
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
def test_config_command_parser():
try:
# Now test the YAML "parser"
condarc = """\
channels : \n\
- test
- defaults \n\
create_default_packages:
- ipython
- numpy
changeps1 : no
# Here is a comment
always_yes: yes \n\
"""
# First verify that this itself is valid YAML
assert yaml.load(condarc) == {'channels': ['test', 'defaults'],
'create_default_packages': ['ipython', 'numpy'], 'changeps1':
False, 'always_yes': True}
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get')
assert stdout == """\
--set always_yes True
--set changeps1 False
--add channels 'defaults'
--add channels 'test'
--add create_default_packages 'numpy'
--add create_default_packages 'ipython'
"""
assert stderr == ''
# List keys with nonstandard whitespace are not yet supported. For
# now, just test that it doesn't muck up the file.
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'create_default_packages', 'sympy')
assert stdout == ''
assert stderr == """\
Error: Could not parse the yaml file. Use -f to use the
yaml parser (this will remove any structure or comments from the existing
.condarc file). Reason: modified yaml doesn't match what it should be
"""
assert _read_test_condarc() == condarc
# assert _read_test_condarc() == """\
# channels : \n\
# - test
# - defaults \n\
#
# create_default_packages:
# - sympy
# - ipython
# - numpy
#
# changeps1 : no
#
# # Here is a comment
# always_yes: yes \n\
# """
# New keys when the keys are indented are not yet supported either.
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'disallow', 'perl')
assert stdout == ''
assert stderr == """\
Error: Could not parse the yaml file. Use -f to use the
yaml parser (this will remove any structure or comments from the existing
.condarc file). Reason: couldn't parse modified yaml
"""
assert _read_test_condarc() == condarc
# assert _read_test_condarc() == """\
# channels : \n\
# - test
# - defaults \n\
#
# create_default_packages:
# - sympy
# - ipython
# - numpy
#
# changeps1 : no
#
# # Here is a comment
# always_yes: yes \n\
# disallow:
# - perl
# """
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'mychannel')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels : \n\
- mychannel
- test
- defaults \n\
create_default_packages:
- ipython
- numpy
changeps1 : no
# Here is a comment
always_yes: yes \n\
"""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'changeps1', 'yes')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels : \n\
- mychannel
- test
- defaults \n\
create_default_packages:
- ipython
- numpy
changeps1 : yes
# Here is a comment
always_yes: yes \n\
"""
os.unlink(test_condarc)
# Test adding a new list key. We couldn't test this above because it
# doesn't work yet with odd whitespace
condarc = """\
channels:
- test
- defaults
always_yes: yes
"""
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'disallow', 'perl')
assert stdout == stderr == ''
assert _read_test_condarc() == condarc + """\
disallow:
- perl
"""
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
def test_config_command_remove_force():
try:
# Finally, test --remove, --remove-key, and --force (right now
# --remove and --remove-key require --force)
run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
run_conda_command('config', '--file', test_condarc, '--set',
'always_yes', 'yes')
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'channels', 'test', '--force')
assert stdout == stderr == ''
assert yaml.load(_read_test_condarc()) == {'channels': ['defaults'],
'always_yes': True}
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'channels', 'test', '--force')
assert stdout == ''
assert stderr == "Error: 'test' is not in the 'channels' key of the config file\n"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'disallow', 'python', '--force')
assert stdout == ''
assert stderr == "Error: key 'disallow' is not in the config file\n"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove-key', 'always_yes', '--force')
assert stdout == stderr == ''
assert yaml.load(_read_test_condarc()) == {'channels': ['defaults']}
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove-key', 'always_yes', '--force')
assert stdout == ''
assert stderr == "Error: key 'always_yes' is not in the config file\n"
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
| 28.62963 | 94 | 0.59067 |
acde4058dee24fc43d006e2a4feaef0594b20c8b | 1,618 | py | Python | ctpn/train_net.py | alwc/faster-CTPN | a5836aaa1766c962a4494f3dd186a7d66a05bee2 | [
"Apache-2.0"
] | 187 | 2018-12-13T03:08:51.000Z | 2021-06-27T03:50:56.000Z | ctpn/train_net.py | alwc/faster-CTPN | a5836aaa1766c962a4494f3dd186a7d66a05bee2 | [
"Apache-2.0"
] | 9 | 2018-12-13T04:09:44.000Z | 2019-08-15T15:46:45.000Z | ctpn/train_net.py | hsddlz/faster-CTPN | e8484e6694ec35088f1da90dceffe14d173741bf | [
"Apache-2.0"
] | 47 | 2018-12-13T03:08:36.000Z | 2021-03-30T08:23:57.000Z | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import pprint
import sys
import os.path
import sys, logging
sys.path.append(os.getcwd())
this_dir = os.path.dirname(__file__)
from lib.fast_rcnn.train import get_training_roidb, train_net
from lib.fast_rcnn.config import cfg_from_file, get_output_dir, get_log_dir
from lib.datasets.factory import get_imdb
from lib.networks.factory import get_network
from lib.fast_rcnn.config import cfg
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
cfg_from_file('ctpn/text.yml')
print('Using config:')
pprint.pprint(cfg)
imdb = get_imdb('voc_2007_trainval')
print('Loaded dataset `{:s}` for training'.format(imdb.name))
roidb = get_training_roidb(imdb)
output_dir = get_output_dir(imdb, None)
log_dir = get_log_dir(imdb)
print('Output will be saved to `{:s}`'.format(output_dir))
print('Logs will be saved to `{:s}`'.format(log_dir))
device_name = '/gpu:0'
print(device_name)
network = get_network('VGGnet_train')
train_net(network, imdb, roidb,
output_dir=output_dir,
log_dir=log_dir,
pretrained_model='data/pretrain/VGG_imagenet.npy',
max_iters=int(cfg.TRAIN.max_steps),
restore=bool(int(cfg.TRAIN.restore)))
| 33.020408 | 107 | 0.703337 |
acde40cf526716ba1c729f6afd60f614c30f7ad4 | 520 | py | Python | tests/conftest.py | liningtonlab/sample_git_project | 824a0688329e13c29d47d694c19f9e7ef2a79045 | [
"MIT"
] | null | null | null | tests/conftest.py | liningtonlab/sample_git_project | 824a0688329e13c29d47d694c19f9e7ef2a79045 | [
"MIT"
] | null | null | null | tests/conftest.py | liningtonlab/sample_git_project | 824a0688329e13c29d47d694c19f9e7ef2a79045 | [
"MIT"
] | null | null | null | import pytest
@pytest.fixture
def TEST_SMI():
return ["c1ccccc1", "CC[C@@H](C)c1cccc2ccccc12"]
@pytest.fixture
def TEST_INCHI():
return [
"InChI=1S/C6H6/c1-2-4-6-5-3-1/h1-6H",
"InChI=1S/C14H16/c1-3-11(2)13-10-6-8-12-7-4-5-9-14(12)13/h4-11H,3H2,1-2H3/t11-/m1/s1",
]
@pytest.fixture
def TEST_INCHIKEY():
return [
"UHOVQNZJYSORNB-UHFFFAOYSA-N",
"NDLQGVXBUFZFIX-LLVKDONJSA-N",
]
@pytest.fixture
def TEST_FLAT_SMI():
return ["c1ccccc1", "CCC(C)c1cccc2ccccc12"]
| 18.571429 | 94 | 0.626923 |
acde41d57e1c53bf754557820e8fb2717261c189 | 1,608 | py | Python | module2/webapp/app.py | apexcz/devopsTraining | a19c1bbddcfeb6bb06c32cb9d7e713150817ed26 | [
"Apache-2.0"
] | null | null | null | module2/webapp/app.py | apexcz/devopsTraining | a19c1bbddcfeb6bb06c32cb9d7e713150817ed26 | [
"Apache-2.0"
] | 1 | 2021-04-08T12:03:39.000Z | 2021-04-08T12:03:39.000Z | module2/webapp/app.py | apexcz/devopsTraining | a19c1bbddcfeb6bb06c32cb9d7e713150817ed26 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, request, render_template, redirect, url_for
import requests
API_KEY = 'YOUR TRANSPORT API API_KEY HERE'
APP_ID = 'YOUR TRANSPORT API APP_ID HERE'
app = Flask(__name__)
@app.route("/hello",methods=["GET"])
def hello():
return "hello from us"
@app.route("/",methods=["GET"])
def home():
post_code = request.args.get('postcode', 'NW5 1TL')
postcode_result = requests.get(f'http://api.postcodes.io/postcodes/{post_code}').json()
latitude = postcode_result['result']['latitude']
longitude = postcode_result['result']['longitude']
transport_params = {'lat': latitude, 'lon': longitude, 'type': 'bus_stop', 'api_key': API_KEY, 'app_id': APP_ID}
transport_result = requests.get('http://transportapi.com/v3/uk/places.json', params=transport_params)
first_two_busstops = transport_result.json()['member'][0:2]
busstops = []
for busstop in first_two_busstops:
bsustop_time_url = f'http://transportapi.com/v3/uk/bus/stop/{busstop["atcocode"]}/live.json?api_key={API_KEY}&app_id={APP_ID}'
busstop_times_result = requests.get(bsustop_time_url).json()
buses = []
bus_departures = busstop_times_result['departures']
for departure in bus_departures.keys():
busstop_times = {
'busstop_name': busstop['name'],
'bus_name': departure,
'aimed_departure_time': bus_departures[departure][0]['aimed_departure_time']
}
busstops.append(busstop_times)
return {'result': busstops}
if __name__ == "__main__":
app.run(debug=True) | 38.285714 | 134 | 0.66791 |
acde42cddf3ebbc4d735faf90817836e35b79dc6 | 3,019 | py | Python | src/sentry/rules/conditions/tagged_event.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | 1 | 2021-08-10T06:07:13.000Z | 2021-08-10T06:07:13.000Z | src/sentry/rules/conditions/tagged_event.py | fotinakis/sentry | c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c | [
"BSD-3-Clause"
] | 8 | 2019-12-28T23:49:55.000Z | 2022-03-02T04:34:18.000Z | src/sentry/rules/conditions/tagged_event.py | fotinakis/sentry | c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c | [
"BSD-3-Clause"
] | 1 | 2017-04-08T04:09:18.000Z | 2017-04-08T04:09:18.000Z | """
sentry.rules.conditions.tagged_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from collections import OrderedDict
from django import forms
from sentry.models import TagKey
from sentry.rules.conditions.base import EventCondition
class MatchType(object):
EQUAL = 'eq'
NOT_EQUAL = 'ne'
STARTS_WITH = 'sw'
ENDS_WITH = 'ew'
CONTAINS = 'co'
NOT_CONTAINS = 'nc'
MATCH_CHOICES = OrderedDict([
(MatchType.EQUAL, 'equals'),
(MatchType.NOT_EQUAL, 'does not equal'),
(MatchType.STARTS_WITH, 'starts with'),
(MatchType.ENDS_WITH, 'ends with'),
(MatchType.CONTAINS, 'contains'),
(MatchType.NOT_CONTAINS, 'does not contain'),
])
class TaggedEventForm(forms.Form):
key = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'key'}))
match = forms.ChoiceField(MATCH_CHOICES.items(), widget=forms.Select(
attrs={'style': 'width:150px'},
))
value = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'value'}))
class TaggedEventCondition(EventCondition):
form_cls = TaggedEventForm
label = u'An event\'s tags match {key} {match} {value}'
def passes(self, event, state, **kwargs):
key = self.get_option('key')
match = self.get_option('match')
value = self.get_option('value')
if not (key and match and value):
return False
value = value.lower()
key = key.lower()
tags = (
v.lower()
for k, v in event.get_tags()
if k.lower() == key or TagKey.get_standardized_key(k) == key
)
if match == MatchType.EQUAL:
for t_value in tags:
if t_value == value:
return True
return False
elif match == MatchType.NOT_EQUAL:
for t_value in tags:
if t_value == value:
return False
return True
elif match == MatchType.STARTS_WITH:
for t_value in tags:
if t_value.startswith(value):
return True
return False
elif match == MatchType.ENDS_WITH:
for t_value in tags:
if t_value.endswith(value):
return True
return False
elif match == MatchType.CONTAINS:
for t_value in tags:
if value in t_value:
return True
return False
elif match == MatchType.NOT_CONTAINS:
for t_value in tags:
if value in t_value:
return False
return True
def render_label(self):
data = {
'key': self.data['key'],
'value': self.data['value'],
'match': MATCH_CHOICES[self.data['match']],
}
return self.label.format(**data)
| 27.697248 | 83 | 0.567738 |
acde431eef42797e2a0dc51267e02aa68346d067 | 2,437 | py | Python | huaweicloud-sdk-scm/huaweicloudsdkscm/v3/model/push_certificate_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-scm/huaweicloudsdkscm/v3/model/push_certificate_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-scm/huaweicloudsdkscm/v3/model/push_certificate_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PushCertificateResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""PushCertificateResponse - a model defined in huaweicloud sdk"""
super(PushCertificateResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PushCertificateResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.337209 | 79 | 0.551498 |
acde43613504fc0e825c51e6db64eb7d4f0d0e35 | 7,516 | py | Python | draw_dist.py | xuyu92327/waveform-analysis | 8216cc8d7a75fc38d3fbc236d8b6b6cba963f78c | [
"MIT"
] | null | null | null | draw_dist.py | xuyu92327/waveform-analysis | 8216cc8d7a75fc38d3fbc236d8b6b6cba963f78c | [
"MIT"
] | null | null | null | draw_dist.py | xuyu92327/waveform-analysis | 8216cc8d7a75fc38d3fbc236d8b6b6cba963f78c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import h5py
import argparse
psr = argparse.ArgumentParser()
psr.add_argument('-o', dest='opt', help='output file')
psr.add_argument('ipt', help='input file')
psr.add_argument('--mod', type=str, help='mode of weight', choices=['PEnum', 'Charge'])
psr.add_argument('-p', dest='pri', action='store_false', help='print bool', default=True)
args = psr.parse_args()
mode = args.mod
if mode == 'PEnum':
extradist = 'pdist'
pecount = 'TotalPEnum'
extradistlabel = ['P-dist', r'$P-dist/\mathrm{1}$']
elif mode == 'Charge':
extradist = 'chargediff'
pecount = 'TotalPEpos'
extradistlabel = ['Charge-diff', r'$Charge-diff/\mathrm{mV}\cdot\mathrm{ns}$']
if args.pri:
sys.stdout = None
import csv
import numpy as np
from scipy import stats
from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import ListedColormap
import matplotlib.gridspec as gridspec
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.dpi'] = 300
plt.rcParams['font.size'] = 10
plt.rcParams['lines.markersize'] = 2
plt.rcParams['lines.linewidth'] = 1.0
plt.rcParams['mathtext.fontset'] = 'cm'
def my_cmap():
plasma = cm.get_cmap('plasma', 65536)
newcolors = plasma(np.linspace(0, 1, 65536))
white = np.array([65535/65536, 65535/65536, 65535/65536, 1])
newcolors[:1, :] = white
newcmp = ListedColormap(newcolors)
return newcmp
mycmp = my_cmap()
with h5py.File(args.ipt, 'r', libver='latest', swmr=True) as distfile:
dt = distfile['Record'][:]
method = distfile['Record'].attrs['Method']
pdf = PdfPages(args.opt)
N = np.percentile(dt['wdist'], 95)
M = 500
fig = plt.figure()
ax = fig.add_subplot(111)
c, t = np.unique(dt[pecount], return_counts=True)
ax.bar(c, t)
ax.set_xlabel(pecount)
ax.set_ylabel('Count')
ax.set_yscale('log')
fig.suptitle(pecount + ' summary')
pdf.savefig(fig)
plt.close(fig)
penum = np.unique(dt[pecount])
l = min(50, penum.max())
wdist_stats = np.zeros((l, 4))
edist_stats = np.zeros((l, 4))
for i in tqdm(range(l), disable=args.pri):
if i+1 in penum:
dtwpi = dt['wdist'][dt[pecount] == i+1]
dtepi = dt[extradist][dt[pecount] == i+1]
wdist_stats[i, 0] = np.median(dtwpi)
wdist_stats[i, 1] = np.median(np.absolute(dtwpi - np.median(dtwpi)))
wdist_stats[i, 2] = np.mean(dtwpi)
wdist_stats[i, 3] = np.std(dtwpi)
edist_stats[i, 0] = np.median(dtepi)
edist_stats[i, 1] = np.median(np.absolute(dtepi - np.median(dtepi)))
edist_stats[i, 2] = np.mean(dtepi)
edist_stats[i, 3] = np.std(dtepi)
rss_recon = dt['RSS_recon'][dt[pecount] == i+1]
rss_truth = dt['RSS_truth'][dt[pecount] == i+1]
plt.rcParams['figure.figsize'] = (12, 6)
fig = plt.figure()
gs = gridspec.GridSpec(2, 2, figure=fig, left=0.05, right=0.95, top=0.9, bottom=0.1, wspace=0.15, hspace=0.2)
ax1 = fig.add_subplot(gs[0, 0])
n = max(np.percentile(dtwpi, 95), N)
ax1.hist(dtwpi[dtwpi < n], bins=200)
a = (dtwpi < n).sum()
b = len(dtwpi)
ax1.set_title('count {}(<{:.2f}ns)/{}={:.2f}'.format(a, n, b, a/b))
ax1.set_xlabel('$W-dist/\mathrm{ns}$')
ax2 = fig.add_subplot(gs[1, 0])
ax2.hist(dtepi, bins=100)
ax2.set_xlabel(extradistlabel[1])
ax3 = fig.add_subplot(gs[1, 1])
deltarss = rss_recon - rss_truth
r1 = np.percentile(deltarss, 0)
r2 = np.percentile(deltarss, 98)
ax3.hist(deltarss[(deltarss > r1) & (deltarss < r2)], bins=200, density=1)
ax3.set_xlabel('$\mathrm{RSS}_{recon} - \mathrm{RSS}_{truth}/\mathrm{mV}^{2}$' + ', within ({:.2f}, {:.2f})'.format(r1, r2))
fig.suptitle(args.ipt.split('/')[-1] + ' ' + pecount + '={:.0f}'.format(i+1))
pdf.savefig(fig)
plt.close(fig)
else:
wdist_stats[i, :] = np.nan
edist_stats[i, :] = np.nan
a = (dt['wdist'] < N).sum()
b = (np.abs(dt[extradist]) < M).sum()
l = len(dt['wdist'])
if mode == 'PEnum':
extradisttitle = None
sumtitle = 'W&'+extradistlabel[0]+' hist,Wd<{:.2f}ns,'.format(N)+'flawed'
elif mode == 'Charge':
extradisttitle = 'count {}(|Cd|<{}mV*ns)/{}={:.2f}'.format(b, M, l, b/l)
sumtitle = 'W-dist&'+extradistlabel[0]+' hist,Wd<{:.2f}ns,'.format(N)+'|Cd|<{}mV*ns,'.format(M)+'flawed'
plt.rcParams['figure.figsize'] = (12, 6)
fig = plt.figure()
gs = gridspec.GridSpec(2, 2, figure=fig, left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0.2, hspace=0.3)
ax1 = fig.add_subplot(gs[0, 0])
ax1.hist(dt['wdist'][dt['wdist']<N], bins=100, density=1)
ax1.set_title('count {}(Wd<{:.2f}ns)/{}={:.2f}'.format(a, N, l, a/l))
ax1.set_xlabel('$W-dist/\mathrm{ns}$')
ax2 = fig.add_subplot(gs[1, 0])
ax2.hist(dt[extradist][np.abs(dt[extradist]) < M], bins=100, density=1)
ax2.set_title(extradisttitle)
ax2.set_xlabel(extradistlabel[1])
ax3 = fig.add_subplot(gs[:, 1])
vali = np.logical_and(np.logical_and(np.abs(dt[extradist])<M, dt['wdist']<N),np.logical_and(dt[extradist]!=0, dt['wdist']!=0))
h2 = ax3.hist2d(dt['wdist'][vali], dt[extradist][vali], bins=(100, 100), cmap=mycmp)
fig.colorbar(h2[3], ax=ax3, aspect=50)
ax3.set_xlabel('$W-dist/\mathrm{ns}$')
ax3.set_ylabel(extradistlabel[1])
ax3.set_title(sumtitle)
fig.suptitle(args.ipt.split('/')[-1] + ' Dist stats, method = ' + str(method))
pdf.savefig(fig)
plt.close(fig)
plt.rcParams['figure.figsize'] = (12, 6)
fig = plt.figure()
gs = gridspec.GridSpec(1, 2, figure=fig, left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0.2, hspace=0.2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(wdist_stats[:, 0], c='C0', label='W median')
ax1.plot(wdist_stats[:, 0] + wdist_stats[:, 1], c='C1', label='W median + mad')
ax1.plot(wdist_stats[:, 0] - wdist_stats[:, 1], c='C1', label='W median - mad')
ax1.plot(wdist_stats[:, 2], c='C2', label='W mean')
ax1.plot(wdist_stats[:, 2] + wdist_stats[:, 3], c='C3', label='W mean + std')
ax1.plot(wdist_stats[:, 2] - wdist_stats[:, 3], c='C3', label='W mean - std')
ax1.set_xlabel(pecount)
ax1.set_ylabel('$W-dist/\mathrm{ns}$')
ax1.set_title('W-dist vs ' + pecount + ' stats')
ax1.legend()
ax2 = fig.add_subplot(gs[0, 1])
ax2.plot(edist_stats[:, 0], c='C0', label=extradistlabel[0][0] + ' median')
ax2.plot(edist_stats[:, 0] + edist_stats[:, 1], c='C1', label=extradistlabel[0][0] + ' median + mad')
ax2.plot(edist_stats[:, 0] - edist_stats[:, 1], c='C1', label=extradistlabel[0][0] + ' median - mad')
ax2.plot(edist_stats[:, 2], c='C2', label=extradistlabel[0][0] + ' mean')
ax2.plot(edist_stats[:, 2] + edist_stats[:, 3], c='C3', label=extradistlabel[0][0] + ' mean + std')
ax2.plot(edist_stats[:, 2] - edist_stats[:, 3], c='C3', label=extradistlabel[0][0] + ' mean - std')
ax2.set_xlabel(pecount)
ax2.set_ylabel(extradistlabel[1])
ax2.set_title(extradistlabel[0] + ' vs ' + pecount + ' stats')
ax2.legend()
fig.suptitle(args.ipt.split('/')[-1] + ' Dist stats, method = ' + str(method))
pdf.savefig(fig)
plt.close(fig)
pdf.close()
| 42.948571 | 136 | 0.594598 |
acde4429fbca774b8a891704d61f5b2cf0f00b95 | 788 | py | Python | profiles_api/migrations/0002_profilefeeditem.py | saadbintariq444/profiles-rest-api | 18016a725d72bffe230744ce3be451c8cddfb4ea | [
"MIT"
] | null | null | null | profiles_api/migrations/0002_profilefeeditem.py | saadbintariq444/profiles-rest-api | 18016a725d72bffe230744ce3be451c8cddfb4ea | [
"MIT"
] | 7 | 2020-06-06T01:36:03.000Z | 2022-02-10T14:20:24.000Z | profiles_api/migrations/0002_profilefeeditem.py | saadbintariq444/profiles-rest-api | 18016a725d72bffe230744ce3be451c8cddfb4ea | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-03-26 09:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.52 | 126 | 0.633249 |
acde44839db52f493f2fb9e67d208dd192ad32cb | 2,852 | py | Python | locations/spiders/frys_electronics.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | 1 | 2019-08-19T10:00:55.000Z | 2019-08-19T10:00:55.000Z | locations/spiders/frys_electronics.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | null | null | null | locations/spiders/frys_electronics.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
class FrysElectronisSpider(scrapy.Spider):
name = 'frys-electronics'
allowed_domains = ['www.frys.com']
start_urls = (
'https://www.frys.com/ac/storeinfo/storelocator',
)
def parse_store(self, response):
print('\n', response.url)
address_lines = response.xpath('//div[@id="rightside"]/div[@id="text3"]/div[@id="address"]//b/text()').extract()
address = ', '.join([ a.strip() for a in address_lines ])
phone = [ t for t in response.xpath('//div[@id="rightside"]/div[@id="text3"]/div[@id="address"]//text()').extract() if 'Phone' in t ]
coordinates = [ c for c in response.xpath('//div[@id="rightside"]/div[@id="text3"]/div[@id="maps"]/text()').extract() if '°' in c ]
properties = {
'addr_full': address,
'website': response.url,
'ref': response.url.split('/')[-1],
}
if len(phone) == 1:
properties['phone'] = phone[0].replace('Phone', '').strip().replace('(', '').replace(') ', '-')
# Try to parse the degree, minutes, seconds coordinate pair
if coordinates and len(coordinates) == 1:
print('coordinates', coordinates)
# Add a comma to separate lat and lon
if '" -' in coordinates[0]:
coordinates[0] = coordinates[0].replace('" -', '", -')
latlon = coordinates[0].split(',')
properties['lat'] = self.dms2dd(latlon[0])
properties['lon'] = self.dms2dd(latlon[1])
else:
# Fall back to the ll URL param in the google maps URL
mapsLink = response.xpath('//div[@id="rightside"]/div[@id="text3"]/div[@id="maps"]/a/@href').extract_first()
if 'll=' in mapsLink:
latlon = mapsLink.split('ll=')[1].split('&')[0].split(',')
properties['lat'] = float(latlon[0])
properties['lon'] = float(latlon[1])
yield GeojsonPointItem(**properties)
def dms2dd(self, dms):
sign = 1
if '-' in dms or 'W' in dms:
sign = -1
degrees = [ d.strip() for d in dms.split('°') ]
d = int(degrees[0].replace('+', '').replace('-', '').replace('N', '').replace('W', ''))
minutes = degrees[1].split("'")
if '.' in minutes[0]:
m = float(minutes[0]) / 60
s = 0
else:
m = float(minutes[0]) / 60
s = float(minutes[1].replace('"', '').strip()) / 3600
dd = (d + m + s) * sign
return dd
def parse(self, response):
urls = response.xpath('//div[@id="main-stores"]//table//a/@href').extract()
for path in urls:
yield scrapy.Request(response.urljoin(path), callback=self.parse_store)
| 41.941176 | 141 | 0.532959 |
acde4598b14ae831c6bf92f25b9f5fb9a6c8f62c | 358 | py | Python | smart_test.py | shinde-shantanu/Repeated_A_Star | 31254f1987998a24637f036f3bde18b61a5a1ba4 | [
"MIT"
] | null | null | null | smart_test.py | shinde-shantanu/Repeated_A_Star | 31254f1987998a24637f036f3bde18b61a5a1ba4 | [
"MIT"
] | null | null | null | smart_test.py | shinde-shantanu/Repeated_A_Star | 31254f1987998a24637f036f3bde18b61a5a1ba4 | [
"MIT"
] | null | null | null | from Smart_Repeated_A_Star import smart_repeated_a_star
from Grid_Generator import gen_grid
#grid = gen_grid(5, 0)
grid = [[0, 0, 0, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
print(grid)
for x in grid:
print(x)
result, final, dis, cells, start, stop = smart_repeated_a_star(grid, 5, 0.3, 2)
print(result)
print(final)
| 25.571429 | 92 | 0.642458 |
acde45aaba56adf194c9c2de6fd15b9fd1ddc589 | 4,547 | py | Python | yateto/codegen/copyscaleadd/csa_gen.py | ZaubererHaft/yateto | 88a02d160da9bfa7f74a4280deaf465f15cae0fb | [
"BSD-3-Clause"
] | 2 | 2021-07-01T14:23:01.000Z | 2022-01-12T01:06:24.000Z | yateto/codegen/copyscaleadd/csa_gen.py | ZaubererHaft/yateto | 88a02d160da9bfa7f74a4280deaf465f15cae0fb | [
"BSD-3-Clause"
] | 14 | 2019-06-25T18:12:29.000Z | 2022-02-08T15:17:27.000Z | yateto/codegen/copyscaleadd/csa_gen.py | ZaubererHaft/yateto | 88a02d160da9bfa7f74a4280deaf465f15cae0fb | [
"BSD-3-Clause"
] | 3 | 2021-05-14T13:04:28.000Z | 2021-12-24T03:15:35.000Z | from ..common import *
from ..cache import RoutineGenerator, GpuRoutineGenerator
from ..common import BatchedOperationsAux
# Optional modules
import importlib.util
gf_spec = importlib.util.find_spec('gemmforge')
try:
if gf_spec:
gf = gf_spec.loader.load_module()
except:
raise ('Cannot load gemmforge.')
class CopyScaleAddGenerator(object):
def __init__(self, arch, descr):
self._arch = arch
self._descr = descr
def _formatTerm(self, alpha, term):
"""Generate a sub-string of a term for a source code which is going to be used
inside of the inner most for-loop
Args:
alpha (Union[Scalar, float]): TODO
term (IndexedTensorDescription): TODO
Returns:
Examples:
>>> from yateto.memory import DenseMemoryLayout
>>> from yateto.ast.indices import Indices
>>> from yateto.codegen.common import IndexedTensorDescription
>>> from yateto.aspp import dense
>>> from yateto.codegen.copyscaleadd.generic import Generic
>>> tensor_shape = (5, 6)
>>> layout = DenseMemoryLayout(shape=tensor_shape)
>>> indices = Indices(indexNames='ij', shape=tensor_shape)
>>> description = IndexedTensorDescription(name='A', \
indices=indices, \
memoryLayout=layout, \
eqspp=dense(shape=tensor_shape))
>>> obj = Generic(arch='dummy', descr=description)
>>> obj._formatTerm(alpha=3, term=description)
'3 * A[1*i + 5*j]'
"""
prefix = ''
if alpha == 0.0:
return ''
if alpha == 1.0:
prefix = term.name
else:
prefix = '{} * {}'.format(alpha, term.name)
return '{}[{}]'.format(prefix, term.memoryLayout.addressString(term.indices))
def generate(self, cpp, routineCache):
"""Generates a tensor equation of a form: B = beta * B + alpha * A
Args:
cpp (IO): a file stream
routineCache:
Returns:
"""
if (gf_spec):
d = self._descr # type: copyscaleadd.Description
m = d.loopRanges[d.result.indices[0]]
n = d.loopRanges[d.result.indices[1]]
alpha = d.alpha
aux = BatchedOperationsAux(self._arch.typename)
matrix_a = gf.YatetoInterface.produce_dense_matrix((m, n),
d.term.memoryLayout.bbox(),
addressing=aux.deduce_addresing(d.term),
transpose=False)
matrix_b = gf.YatetoInterface.produce_dense_matrix((m, n),
d.result.memoryLayout.bbox(),
addressing=aux.deduce_addresing(d.result),
transpose=False)
try:
vm = gf.vm_factory(self._arch.name, self._arch.sub_name, fp_type=self._arch.typename)
forge_generator = gf.CsaGenerator(vm)
forge_generator.set(matrix_a, matrix_b, alpha, d.beta)
routine_name = forge_generator.get_base_name()
args = [str(alpha),
aux.deduce_arg(d.term),
aux.deduce_arg(d.result),
BatchedOperationsAux.NUM_ELEMENTS_NAME,
BatchedOperationsAux.STREAM_PTR_NAME]
cpp("{}({});".format(routine_name, ', '.join(args)))
routineCache.addRoutine(routine_name, GemmForgeWriter(forge_generator))
except gf.GenerationError as err:
print("ERROR: {}".format(err))
raise err
return m.size() * n.size()
else:
raise RuntimeError('gemmforge module is not found. You can install it with pip3. '
'e.g., pip3 install gemmforge')
class GemmForgeWriter(GpuRoutineGenerator):
def __init__(self, forge_generator):
self._generator = forge_generator
self._basename = forge_generator.get_base_name()
def __eq__(self, other):
if isinstance(other, GemmForgeWriter):
return self._basename == other._basename
else:
return False
def header(self, cpp):
cpp.include('gemmforge_aux.h')
def __call__(self, routineName, fileName):
self._generator.generate()
declaration = self._generator.get_launcher_header()
launcher = self._generator.get_launcher()
kernel = self._generator.get_kernel()
with open(fileName, "a") as file:
file.write(kernel)
file.write(launcher)
return declaration
| 33.189781 | 99 | 0.592259 |
acde45ac38c59e8eefb7239bd0bdfd8fe0123e2a | 125 | py | Python | src/py_eventbus_pg/__init__.py | sampot/py-eventbus-pg | 5055e9541b58535d9344fd0ec408bc432e82de52 | [
"MIT"
] | 1 | 2022-02-21T06:50:40.000Z | 2022-02-21T06:50:40.000Z | src/py_eventbus_pg/__init__.py | sampot/py-eventbus-pg | 5055e9541b58535d9344fd0ec408bc432e82de52 | [
"MIT"
] | null | null | null | src/py_eventbus_pg/__init__.py | sampot/py-eventbus-pg | 5055e9541b58535d9344fd0ec408bc432e82de52 | [
"MIT"
] | null | null | null | __version__ = "0.1.0"
from .event_bus import EventBus, Event, Subscription
__all__ = ["EventBus", "Event", "Subscription"]
| 20.833333 | 52 | 0.72 |
acde45e05656b965acb333825826d66dc194e1af | 399 | py | Python | backend/site/MWS_Backend/MWS_Backend/wsgi.py | singapore19/team-3 | f021dc98f809faa62932be09c0ed00bec2aa5af3 | [
"Net-SNMP",
"Xnet",
"RSA-MD"
] | null | null | null | backend/site/MWS_Backend/MWS_Backend/wsgi.py | singapore19/team-3 | f021dc98f809faa62932be09c0ed00bec2aa5af3 | [
"Net-SNMP",
"Xnet",
"RSA-MD"
] | null | null | null | backend/site/MWS_Backend/MWS_Backend/wsgi.py | singapore19/team-3 | f021dc98f809faa62932be09c0ed00bec2aa5af3 | [
"Net-SNMP",
"Xnet",
"RSA-MD"
] | null | null | null | """
WSGI config for MWS_Backend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MWS_Backend.settings')
application = get_wsgi_application()
| 23.470588 | 78 | 0.789474 |
acde46e791619c523c16162d79761604225ea769 | 814 | py | Python | tests/tasks/test_manifest.py | jerjohste/exopy_pulses | 844660082331f8972039a085397a92c9a06a46af | [
"BSD-3-Clause"
] | 2 | 2016-02-09T20:23:16.000Z | 2017-09-04T10:18:45.000Z | tests/tasks/test_manifest.py | jerjohste/exopy_pulses | 844660082331f8972039a085397a92c9a06a46af | [
"BSD-3-Clause"
] | 15 | 2015-12-14T21:58:50.000Z | 2017-10-12T07:04:33.000Z | tests/tasks/test_manifest.py | jerjohste/exopy_pulses | 844660082331f8972039a085397a92c9a06a46af | [
"BSD-3-Clause"
] | 2 | 2018-04-20T14:51:07.000Z | 2020-01-27T16:12:12.000Z | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyPulses Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Check that the manifest does register the tasks.
"""
import enaml
with enaml.imports():
from exopy_pulses.tasks.manifest import PulsesTasksManifest
def test_manifest(task_workbench):
"""Test the addition of the sequence editor.
"""
task_workbench.register(PulsesTasksManifest())
pl = task_workbench.get_plugin('exopy.tasks')
assert pl.get_task_infos('exopy_pulses.TransferPulseSequenceTask')
| 31.307692 | 79 | 0.601966 |
acde47e9239414340202c5b80d88779102852a4b | 142 | py | Python | src/utils/Constants.py | Eerie6560/PonjoPyWrapper | 378fc1cd1656303e7f143943c95f6ad3f7d9b374 | [
"MIT"
] | null | null | null | src/utils/Constants.py | Eerie6560/PonjoPyWrapper | 378fc1cd1656303e7f143943c95f6ad3f7d9b374 | [
"MIT"
] | null | null | null | src/utils/Constants.py | Eerie6560/PonjoPyWrapper | 378fc1cd1656303e7f143943c95f6ad3f7d9b374 | [
"MIT"
] | null | null | null | ELIXIR_MUSIC = "838118537276031006"
ELIXIR_PREMIUM = "937394940009414696"
ELIXIR_TWO = "946122707844603974"
ELIXIR_BLUE = "946443578849243156" | 35.5 | 37 | 0.838028 |
acde47f7cdaa571a2abde3c7a22b637d977809a6 | 1,445 | py | Python | apps/credit_card/models.py | code-yeongyu/backend | cafad5a1cae47ab86ca71028379b72837ea4543d | [
"MIT"
] | 1 | 2021-07-09T01:27:16.000Z | 2021-07-09T01:27:16.000Z | apps/credit_card/models.py | code-yeongyu/backend | cafad5a1cae47ab86ca71028379b72837ea4543d | [
"MIT"
] | 10 | 2021-07-08T04:26:55.000Z | 2021-07-20T14:01:58.000Z | apps/credit_card/models.py | code-yeongyu/pangpang-eats-backend | cafad5a1cae47ab86ca71028379b72837ea4543d | [
"MIT"
] | 3 | 2021-07-08T04:06:59.000Z | 2021-10-02T04:32:16.000Z | from django.db import models
from django.core.validators import MinLengthValidator
from apps.user.models import User
from pangpangeats.settings import AUTH_USER_MODEL
from apps.common.models import BaseModel
from apps.common.validators import numeric_validator
class CreditCard(BaseModel):
owner: User = models.ForeignKey(AUTH_USER_MODEL,
on_delete=models.CASCADE,
null=False)
owner_first_name = models.CharField(max_length=5, null=False, blank=False)
owner_last_name = models.CharField(max_length=5, null=False, blank=False)
alias = models.CharField(max_length=100, null=True, blank=True)
card_number = models.CharField(
validators=(MinLengthValidator(16), ),
max_length=16,
null=False,
blank=False,
)
cvc = models.CharField(
validators=(
MinLengthValidator(3),
numeric_validator,
),
max_length=3,
null=False,
blank=False,
)
# both should be a future than now, but not validate them on the model, but validate them in the serializer
expiry_year = models.PositiveSmallIntegerField(null=False)
expiry_month = models.PositiveSmallIntegerField(null=False)
def __str__(self): # pragma: no cover
CARD_NUMBER = self.card_number[:4] + "-****" * 3
return f"{self.owner_last_name}{self.owner_first_name} {CARD_NUMBER}" | 39.054054 | 111 | 0.673356 |
acde4828b7893c7e0e0ce69f336f4be9febd410d | 4,070 | py | Python | tkdnn_python/darknet_rt.py | zauberzeug/l4t-tkdnn-darknet | 9a9aa1c42464a64f3a86bb07e8c316e209a9c207 | [
"MIT"
] | 1 | 2022-03-02T05:36:56.000Z | 2022-03-02T05:36:56.000Z | tkdnn_python/darknet_rt.py | zauberzeug/l4t-tkdnn-darknet | 9a9aa1c42464a64f3a86bb07e8c316e209a9c207 | [
"MIT"
] | null | null | null | tkdnn_python/darknet_rt.py | zauberzeug/l4t-tkdnn-darknet | 9a9aa1c42464a64f3a86bb07e8c316e209a9c207 | [
"MIT"
] | null | null | null | from ctypes import *
import cv2
import numpy as np
import argparse
import os
from threading import Thread
import time
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("cl", c_int),
("bbox", BOX),
("prob", c_float),
("name", c_char*20),
]
lib = CDLL("./build/libdarknetRT.so", RTLD_GLOBAL)
load_network = lib.load_network
load_network.argtypes = [c_char_p, c_int, c_int]
load_network.restype = c_void_p
copy_image_from_bytes = lib.copy_image_from_bytes
copy_image_from_bytes.argtypes = [IMAGE,c_char_p]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
do_inference = lib.do_inference
do_inference.argtypes = [c_void_p, IMAGE]
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_float, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
def detect_image(net, darknet_image, thresh=.2):
num = c_int(0)
pnum = pointer(num)
do_inference(net, darknet_image)
dets = get_network_boxes(net, thresh, pnum)
res = []
for i in range(pnum[0]):
b = dets[i].bbox
res.append((dets[i].name.decode("ascii"), dets[i].prob, (b.x, b.y, b.w, b.h)))
return res
def loop_detect(detect_m, file_path, width, height):
start = time.time()
cnt = 0
if not file_path.endswith('.mp4'):
image = cv2.imread(file_path)
img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
for _ in range(10):
detections = detect_m.detect(img_rgb)
cnt += 1
for det in detections:
print(det)
print()
else:
stream = cv2.VideoCapture(file_path)
while stream.isOpened():
ret, image = stream.read()
if ret is False:
break
image = cv2.resize(image,
(width, height),
interpolation=cv2.INTER_LINEAR)
detections = detect_m.detect(image)
cnt += 1
for det in detections:
print(det)
stream.release()
end = time.time()
print(f"frame:{cnt},time:{end-start},FPS:{cnt/(end-start)}")
class YOLO4RT(object):
def __init__(self,
image_width,
image_height,
weight_file,
conf_thres=0.2,
device='cuda'):
self.image_width = image_width
self.image_height = image_height
self.model = load_network(weight_file.encode("ascii"), 9, 1)
self.darknet_image = make_image(image_width, image_height, 3)
self.thresh = conf_thres
def detect(self, image):
try:
frame_data = image.ctypes.data_as(c_char_p)
copy_image_from_bytes(self.darknet_image, frame_data)
detections = detect_image(self.model, self.darknet_image, thresh=self.thresh)
return detections
except Exception as e:
print(e)
def parse_args():
parser = argparse.ArgumentParser(description='tkDNN detect')
parser.add_argument('weight', help='rt weightfile path')
parser.add_argument('width', nargs='?', help='width of frame', default=1600)
parser.add_argument('height', nargs='?', help='height of frame', default=1200)
parser.add_argument('--file', type=str, help='file path')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
detect_m = YOLO4RT(image_width=int(args.width), image_height=int(args.height), weight_file=args.weight)
t = Thread(target=loop_detect, args=(detect_m, args.file, int(args.width), int(args.height)), daemon=True)
t.start()
t.join() | 28.461538 | 110 | 0.597789 |
acde4883280327e47c61116bfa2c4cdc6e9cc7d3 | 657 | py | Python | smriprep/__init__.py | effigies/smriprep | f1cfc37bcdc346549dbf1d037cdade3a3b32d5de | [
"Apache-2.0"
] | null | null | null | smriprep/__init__.py | effigies/smriprep | f1cfc37bcdc346549dbf1d037cdade3a3b32d5de | [
"Apache-2.0"
] | null | null | null | smriprep/__init__.py | effigies/smriprep | f1cfc37bcdc346549dbf1d037cdade3a3b32d5de | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This pipeline is developed by the Poldrack lab at Stanford University
(https://poldracklab.stanford.edu/) for use at
the Center for Reproducible Neuroscience (http://reproducibility.stanford.edu/),
as well as for open-source software distribution.
"""
from .__about__ import ( # noqa
__version__,
__author__,
__copyright__,
__credits__,
__license__,
__maintainer__,
__email__,
__status__,
__url__,
__package__,
__description__,
__longdesc__
)
| 25.269231 | 80 | 0.692542 |
acde496ca2e97cac7514b928430edabdf3f3dcda | 814 | py | Python | LeetCode/Subtree of Another Tree.py | aaditkamat/competitive-programming | d0b8f30d3cb3411d2467b98363c12d75d852e245 | [
"MIT"
] | null | null | null | LeetCode/Subtree of Another Tree.py | aaditkamat/competitive-programming | d0b8f30d3cb3411d2467b98363c12d75d852e245 | [
"MIT"
] | 3 | 2019-02-24T11:42:28.000Z | 2019-06-03T14:15:46.000Z | LeetCode/Subtree of Another Tree.py | aaditkamat/online-judge-submissions | d0b8f30d3cb3411d2467b98363c12d75d852e245 | [
"MIT"
] | null | null | null | # Title: Subtree of Another Tree
# Runtime: 180 ms
# Memory: 14.5 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sameTree(self, s: TreeNode, t: TreeNode) -> bool:
if not s and not t:
return True
if not s or not t:
return False
return s.val == t.val and self.sameTree(s.left, t.left) and self.sameTree(s.right, t.right)
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
if not s and not t:
return True
if not s or not t:
return False
if self.isSubtree(s.left, t) or self.isSubtree(s.right, t):
return True
return self.sameTree(s, t)
| 29.071429 | 99 | 0.561425 |
acde49d63bb913b34f65f9cd65ac0cea0f91b70e | 2,217 | py | Python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_ip_configuration_py3.py | Christina-Kang/azure-sdk-for-python | bbf982eb06aab04b8151f69f1d230b7f5fb96ebf | [
"MIT"
] | 1 | 2022-03-30T22:39:15.000Z | 2022-03-30T22:39:15.000Z | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_ip_configuration_py3.py | Christina-Kang/azure-sdk-for-python | bbf982eb06aab04b8151f69f1d230b7f5fb96ebf | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_ip_configuration_py3.py | Christina-Kang/azure-sdk-for-python | bbf982eb06aab04b8151f69f1d230b7f5fb96ebf | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayIPConfiguration(SubResource):
"""IP configuration of an application gateway. Currently 1 public and 1
private IP configuration is allowed.
:param id: Resource ID.
:type id: str
:param subnet: Reference of the subnet resource. A subnet from where
application gateway gets its private address.
:type subnet: ~azure.mgmt.network.v2018_01_01.models.SubResource
:param provisioning_state: Provisioning state of the application gateway
subnet resource. Possible values are: 'Updating', 'Deleting', and
'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, subnet=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayIPConfiguration, self).__init__(id=id, **kwargs)
self.subnet = subnet
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| 41.055556 | 149 | 0.62562 |
acde4a6c69fa6f6b37cd3bba86e8bc9c32b92489 | 3,780 | py | Python | lattice.py | SaberSwordBoy/lattice_hawaii | e07c476544323914f5956f5e77f9d49b308363ce | [
"MIT"
] | null | null | null | lattice.py | SaberSwordBoy/lattice_hawaii | e07c476544323914f5956f5e77f9d49b308363ce | [
"MIT"
] | null | null | null | lattice.py | SaberSwordBoy/lattice_hawaii | e07c476544323914f5956f5e77f9d49b308363ce | [
"MIT"
] | null | null | null | import os
import random
import time
"""Lattice Hawaii - In Python!
Created by Bryce Casamento
for instructions on how to play see https://github.com/SaberSwordBoy/lattice_hawaii
NOTE: running this file will do nothing! this is just the basic functions and logic for the game.
Run lattice_cli.py to actually play!
"""
# Colors
RED = 'r'
GREEN = 'g'
BLUE = 'b'
CYAN = 'c'
PURPLE = 'p'
YELLOW = 'y'
# Patterns
TURTLE = '1'
LIZARD = '2'
FLOWER = '3'
FEATHER = '4'
BIRD = '5'
FISH = '6'
colors = [RED, GREEN, BLUE, CYAN, PURPLE, YELLOW]
patterns = [TURTLE, LIZARD, FLOWER, FEATHER, BIRD, FISH]
def create_tileset():
tileset = []
for i in colors:
for p in patterns:
tileset.append((i, p))
return tileset * 2
# Other Tiles
BLANK = ('0', '0')
SUN_SPOTS = [ # these give extra points
(0, 0), (1, 1), (2, 2), (0, 4),
(0, 8), (1, 7), (2, 6), (4, 8),
(8, 0), (7, 1), (6, 2), (4, 0),
(8, 8), (7, 7), (6, 6), (8, 4)
]
# Board Dimensions
board_width = 9
board_height = 9
def create_board():
# setup the board
latticeboard = {}
for row in range(board_height):
for col in range(board_width):
latticeboard[(row, col)] = BLANK
return latticeboard
def print_tiles(tileset):
layout = "|{}|" * len(tileset)
values = []
for tile in tileset:
values.append(f"{tile[0]}:{tile[1]}")
print(layout.format(*values))
def print_board(board):
layout = """| {} | {} | {} | {} | {} | {} | {} | {} | {} |\n\n""" * board_height
values = []
for val in board.values():
values.append(f"{val[0]}:{val[1]}")
print(layout.format(*values))
def print_board_keys(board):
layout = """| {} | {} | {} | {} | {} | {} | {} | {} | {} |\n\n""" * board_height
values = []
for val in board.keys():
if val in SUN_SPOTS:
values.append(f"{val[0]}${val[1]}")
else:
values.append(f"{val[0]}:{val[1]}")
print(layout.format(*values))
def check_legal_move(board, color, pattern, column, row):
up = BLANK if board.get((column - 1, row)) is None else board.get((column - 1, row))
left = BLANK if board.get((column, row - 1)) is None else board.get((column, row - 1))
right = BLANK if board.get((column, row + 1)) is None else board.get((column, row + 1))
down = BLANK if board.get((column + 1, row)) is None else board.get((column + 1, row))
print(up, left, right, down)
return all(
[(up[0] == color or up[1] == pattern or up == BLANK),
(left[0] == color or left[1] == pattern or left == BLANK),
(right[0] == color or right[1] == pattern or right == BLANK),
(down[0] == color or down[1] == pattern or down == BLANK)])
def get_empty_surrounding_tiles(board, column, row):
result = [False, False, False, False]
if board.get((column - 1, row)) == BLANK: # upper one
result[0] = True
if board.get((column, row-1)) == BLANK: # left one
result[1] = True
if board.get((column, row + 1)) == BLANK: # right one
result[2] = True
if board.get((column+1, row)) == BLANK: # lower one
result[3] = True
return result
def get_tile_points(board, col, row):
points = 0
surrounding_empty = get_empty_surrounding_tiles(board, col, row)
if surrounding_empty.count(False) == 2:
points = 1
elif surrounding_empty.count(False) == 3:
points = 2
elif surrounding_empty.count(False) == 4:
points = 4
else:
points = 0
return points
def print_help():
print("[$] HELP")
print("""[#] Colors:
RED : r
GREEN : g
BLUE : b
YELLOW = y
CYAN = c
PURPLE = p""")
print("""[#] Patterns:
TURTLE : 1
LIZARD : 2
FLOWER : 3
FEATHER : 4
BIRD : 5 """)
| 27.194245 | 98 | 0.566931 |
acde4a73a56090edb918ceab6a42c7f6f1f22515 | 296 | py | Python | evalmgr/studentmgr/migrations/0003_delete_submissionscaleeval.py | IamMayankThakur/automated-assignment-evaluation | 57db0f4b6f78f9c9c011862e8f264bfafd045797 | [
"MIT"
] | 1 | 2021-12-16T07:00:38.000Z | 2021-12-16T07:00:38.000Z | evalmgr/studentmgr/migrations/0003_delete_submissionscaleeval.py | IamMayankThakur/automated-assignment-evaluation | 57db0f4b6f78f9c9c011862e8f264bfafd045797 | [
"MIT"
] | null | null | null | evalmgr/studentmgr/migrations/0003_delete_submissionscaleeval.py | IamMayankThakur/automated-assignment-evaluation | 57db0f4b6f78f9c9c011862e8f264bfafd045797 | [
"MIT"
] | 1 | 2021-12-16T07:06:23.000Z | 2021-12-16T07:06:23.000Z | # Generated by Django 3.0.4 on 2020-04-15 10:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("studentmgr", "0002_delete_submissioncodeeval"),
]
operations = [
migrations.DeleteModel(name="SubmissionScaleEval",),
]
| 19.733333 | 60 | 0.679054 |
acde4a8fc56e2898f4b0890b4a63fc510d677676 | 1,363 | py | Python | pybott/users/views.py | locateneil/pybott | 40ffe189ffa0cef3e3f75d20fcac904d99ef2ecd | [
"MIT"
] | 9 | 2018-03-19T16:08:12.000Z | 2020-02-24T22:03:59.000Z | pybott/users/views.py | locateneil/pybott | 40ffe189ffa0cef3e3f75d20fcac904d99ef2ecd | [
"MIT"
] | 51 | 2018-03-22T12:07:18.000Z | 2018-11-17T00:38:54.000Z | pybott/users/views.py | locateneil/pybott | 40ffe189ffa0cef3e3f75d20fcac904d99ef2ecd | [
"MIT"
] | 2 | 2018-03-20T17:39:11.000Z | 2018-04-06T03:03:14.000Z | from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', ]
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
| 30.288889 | 79 | 0.710198 |
acde4afdd9c2cf2bd4240bcb87142711fa99900e | 35,671 | py | Python | neutron/api/v2/attributes.py | melon-li/neutron | 9fdda5b85009a0d7d06695c9bfa0a598527c9733 | [
"Apache-2.0"
] | null | null | null | neutron/api/v2/attributes.py | melon-li/neutron | 9fdda5b85009a0d7d06695c9bfa0a598527c9733 | [
"Apache-2.0"
] | null | null | null | neutron/api/v2/attributes.py | melon-li/neutron | 9fdda5b85009a0d7d06695c9bfa0a598527c9733 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import netaddr
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
import webob.exc
from neutron.common import constants
from neutron.common import exceptions as n_exc
LOG = logging.getLogger(__name__)
ATTR_NOT_SPECIFIED = object()
# Defining a constant to avoid repeating string literal in several modules
SHARED = 'shared'
# Used by range check to indicate no limit for a bound.
UNLIMITED = None
# TODO(watanabe.isao): A fix like in neutron/db/models_v2.py needs to be
# done in other db modules, to reuse the following constants.
# Common definitions for maximum string field length
NAME_MAX_LEN = 255
TENANT_ID_MAX_LEN = 255
DESCRIPTION_MAX_LEN = 255
DEVICE_ID_MAX_LEN = 255
DEVICE_OWNER_MAX_LEN = 255
def _verify_dict_keys(expected_keys, target_dict, strict=True):
"""Allows to verify keys in a dictionary.
:param expected_keys: A list of keys expected to be present.
:param target_dict: The dictionary which should be verified.
:param strict: Specifies whether additional keys are allowed to be present.
:return: True, if keys in the dictionary correspond to the specification.
"""
if not isinstance(target_dict, dict):
msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
"with keys: %(expected_keys)s") %
{'target_dict': target_dict, 'expected_keys': expected_keys})
LOG.debug(msg)
return msg
expected_keys = set(expected_keys)
provided_keys = set(target_dict.keys())
predicate = expected_keys.__eq__ if strict else expected_keys.issubset
if not predicate(provided_keys):
msg = (_("Validation of dictionary's keys failed. "
"Expected keys: %(expected_keys)s "
"Provided keys: %(provided_keys)s") %
{'expected_keys': expected_keys,
'provided_keys': provided_keys})
LOG.debug(msg)
return msg
def is_attr_set(attribute):
return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
def _validate_values(data, valid_values=None):
if data not in valid_values:
msg = (_("'%(data)s' is not in %(valid_values)s") %
{'data': data, 'valid_values': valid_values})
LOG.debug(msg)
return msg
def _validate_not_empty_string_or_none(data, max_len=None):
if data is not None:
return _validate_not_empty_string(data, max_len=max_len)
def _validate_not_empty_string(data, max_len=None):
msg = _validate_string(data, max_len=max_len)
if msg:
return msg
if not data.strip():
msg = _("'%s' Blank strings are not permitted") % data
LOG.debug(msg)
return msg
def _validate_string_or_none(data, max_len=None):
if data is not None:
return _validate_string(data, max_len=max_len)
def _validate_string(data, max_len=None):
if not isinstance(data, six.string_types):
msg = _("'%s' is not a valid string") % data
LOG.debug(msg)
return msg
if max_len is not None and len(data) > max_len:
msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
{'data': data, 'max_len': max_len})
LOG.debug(msg)
return msg
def _validate_boolean(data, valid_values=None):
try:
convert_to_boolean(data)
except n_exc.InvalidInput:
msg = _("'%s' is not a valid boolean value") % data
LOG.debug(msg)
return msg
def _validate_range(data, valid_values=None):
"""Check that integer value is within a range provided.
Test is inclusive. Allows either limit to be ignored, to allow
checking ranges where only the lower or upper limit matter.
It is expected that the limits provided are valid integers or
the value None.
"""
min_value = valid_values[0]
max_value = valid_values[1]
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if min_value is not UNLIMITED and data < min_value:
msg = _("'%(data)s' is too small - must be at least "
"'%(limit)d'") % {'data': data, 'limit': min_value}
LOG.debug(msg)
return msg
if max_value is not UNLIMITED and data > max_value:
msg = _("'%(data)s' is too large - must be no larger than "
"'%(limit)d'") % {'data': data, 'limit': max_value}
LOG.debug(msg)
return msg
def _validate_no_whitespace(data):
"""Validates that input has no whitespace."""
if re.search(r'\s', data):
msg = _("'%s' contains whitespace") % data
LOG.debug(msg)
raise n_exc.InvalidInput(error_message=msg)
return data
def _validate_mac_address(data, valid_values=None):
try:
valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
except Exception:
valid_mac = False
if valid_mac:
valid_mac = not netaddr.EUI(data) in map(netaddr.EUI,
constants.INVALID_MAC_ADDRESSES)
# TODO(arosen): The code in this file should be refactored
# so it catches the correct exceptions. _validate_no_whitespace
# raises AttributeError if data is None.
if not valid_mac:
msg = _("'%s' is not a valid MAC address") % data
LOG.debug(msg)
return msg
def _validate_mac_address_or_none(data, valid_values=None):
if data is not None:
return _validate_mac_address(data, valid_values)
def _validate_ip_address(data, valid_values=None):
try:
netaddr.IPAddress(_validate_no_whitespace(data))
# The followings are quick checks for IPv6 (has ':') and
# IPv4. (has 3 periods like 'xx.xx.xx.xx')
# NOTE(yamamoto): netaddr uses libraries provided by the underlying
# platform to convert addresses. For example, inet_aton(3).
# Some platforms, including NetBSD and OS X, have inet_aton
# implementation which accepts more varying forms of addresses than
# we want to accept here. The following check is to reject such
# addresses. For Example:
# >>> netaddr.IPAddress('1' * 59)
# IPAddress('199.28.113.199')
# >>> netaddr.IPAddress(str(int('1' * 59) & 0xffffffff))
# IPAddress('199.28.113.199')
# >>>
if ':' not in data and data.count('.') != 3:
raise ValueError()
except Exception:
msg = _("'%s' is not a valid IP address") % data
LOG.debug(msg)
return msg
def _validate_ip_pools(data, valid_values=None):
"""Validate that start and end IP addresses are present.
In addition to this the IP addresses will also be validated
"""
if not isinstance(data, list):
msg = _("Invalid data format for IP pool: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['start', 'end']
for ip_pool in data:
msg = _verify_dict_keys(expected_keys, ip_pool)
if msg:
return msg
for k in expected_keys:
msg = _validate_ip_address(ip_pool[k])
if msg:
return msg
def _validate_fixed_ips(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for fixed IP: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for fixed_ip in data:
if not isinstance(fixed_ip, dict):
msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
LOG.debug(msg)
return msg
if 'ip_address' in fixed_ip:
# Ensure that duplicate entries are not set - just checking IP
# suffices. Duplicate subnet_id's are legitimate.
fixed_ip_address = fixed_ip['ip_address']
if fixed_ip_address in ips:
msg = _("Duplicate IP address '%s'") % fixed_ip_address
LOG.debug(msg)
else:
msg = _validate_ip_address(fixed_ip_address)
if msg:
return msg
ips.append(fixed_ip_address)
if 'subnet_id' in fixed_ip:
msg = _validate_uuid(fixed_ip['subnet_id'])
if msg:
return msg
def _validate_nameservers(data, valid_values=None):
if not hasattr(data, '__iter__'):
msg = _("Invalid data format for nameserver: '%s'") % data
LOG.debug(msg)
return msg
hosts = []
for host in data:
# This must be an IP address only
msg = _validate_ip_address(host)
if msg:
msg = _("'%(host)s' is not a valid nameserver. %(msg)s") % {
'host': host, 'msg': msg}
LOG.debug(msg)
return msg
if host in hosts:
msg = _("Duplicate nameserver '%s'") % host
LOG.debug(msg)
return msg
hosts.append(host)
def _validate_hostroutes(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for hostroute: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['destination', 'nexthop']
hostroutes = []
for hostroute in data:
msg = _verify_dict_keys(expected_keys, hostroute)
if msg:
return msg
msg = _validate_subnet(hostroute['destination'])
if msg:
return msg
msg = _validate_ip_address(hostroute['nexthop'])
if msg:
return msg
if hostroute in hostroutes:
msg = _("Duplicate hostroute '%s'") % hostroute
LOG.debug(msg)
return msg
hostroutes.append(hostroute)
def _validate_ip_address_or_none(data, valid_values=None):
if data is not None:
return _validate_ip_address(data, valid_values)
def _validate_subnet(data, valid_values=None):
msg = None
try:
net = netaddr.IPNetwork(_validate_no_whitespace(data))
if '/' not in data or (net.version == 4 and str(net) != data):
msg = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": data,
"cidr": net.cidr}
else:
return
except Exception:
msg = _("'%s' is not a valid IP subnet") % data
if msg:
LOG.debug(msg)
return msg
def _validate_subnet_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
for item in data:
msg = _validate_subnet(item)
if msg:
return msg
def _validate_subnet_or_none(data, valid_values=None):
if data is not None:
return _validate_subnet(data, valid_values)
def _validate_regex(data, valid_values=None):
try:
if re.match(valid_values, data):
return
except TypeError:
pass
msg = _("'%s' is not a valid input") % data
LOG.debug(msg)
return msg
def _validate_regex_or_none(data, valid_values=None):
if data is not None:
return _validate_regex(data, valid_values)
def _validate_subnetpool_id(data, valid_values=None):
if data != constants.IPV6_PD_POOL_ID:
return _validate_uuid_or_none(data, valid_values)
def _validate_subnetpool_id_or_none(data, valid_values=None):
if data is not None:
return _validate_subnetpool_id(data, valid_values)
def _validate_uuid(data, valid_values=None):
if not uuidutils.is_uuid_like(data):
msg = _("'%s' is not a valid UUID") % data
LOG.debug(msg)
return msg
def _validate_uuid_or_none(data, valid_values=None):
if data is not None:
return _validate_uuid(data)
def _validate_uuid_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
for item in data:
msg = _validate_uuid(item)
if msg:
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
def _validate_dict_item(key, key_validator, data):
# Find conversion function, if any, and apply it
conv_func = key_validator.get('convert_to')
if conv_func:
data[key] = conv_func(data.get(key))
# Find validator function
# TODO(salv-orlando): Structure of dict attributes should be improved
# to avoid iterating over items
val_func = val_params = None
for (k, v) in six.iteritems(key_validator):
if k.startswith('type:'):
# ask forgiveness, not permission
try:
val_func = validators[k]
except KeyError:
msg = _("Validator '%s' does not exist.") % k
LOG.debug(msg)
return msg
val_params = v
break
# Process validation
if val_func:
return val_func(data.get(key), val_params)
def _validate_dict(data, key_specs=None):
if not isinstance(data, dict):
msg = _("'%s' is not a dictionary") % data
LOG.debug(msg)
return msg
# Do not perform any further validation, if no constraints are supplied
if not key_specs:
return
# Check whether all required keys are present
required_keys = [key for key, spec in six.iteritems(key_specs)
if spec.get('required')]
if required_keys:
msg = _verify_dict_keys(required_keys, data, False)
if msg:
return msg
# Perform validation and conversion of all values
# according to the specifications.
for key, key_validator in [(k, v) for k, v in six.iteritems(key_specs)
if k in data]:
msg = _validate_dict_item(key, key_validator, data)
if msg:
return msg
def _validate_dict_or_none(data, key_specs=None):
if data is not None:
return _validate_dict(data, key_specs)
def _validate_dict_or_empty(data, key_specs=None):
if data != {}:
return _validate_dict(data, key_specs)
def _validate_dict_or_nodata(data, key_specs=None):
if data:
return _validate_dict(data, key_specs)
def _validate_non_negative(data, valid_values=None):
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if data < 0:
msg = _("'%s' should be non-negative") % data
LOG.debug(msg)
return msg
def convert_to_boolean(data):
if isinstance(data, six.string_types):
val = data.lower()
if val == "true" or val == "1":
return True
if val == "false" or val == "0":
return False
elif isinstance(data, bool):
return data
elif isinstance(data, int):
if data == 0:
return False
elif data == 1:
return True
msg = _("'%s' cannot be converted to boolean") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_boolean_if_not_none(data):
if data is not None:
return convert_to_boolean(data)
def convert_to_int(data):
try:
return int(data)
except (ValueError, TypeError):
msg = _("'%s' is not a integer") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_int_if_not_none(data):
if data is not None:
return convert_to_int(data)
return data
def convert_to_positive_float_or_none(val):
# NOTE(salv-orlando): This conversion function is currently used by
# a vendor specific extension only at the moment It is used for
# port's RXTX factor in neutron.plugins.vmware.extensions.qos.
# It is deemed however generic enough to be in this module as it
# might be used in future for other API attributes.
if val is None:
return
try:
val = float(val)
if val < 0:
raise ValueError()
except (ValueError, TypeError):
msg = _("'%s' must be a non negative decimal.") % val
raise n_exc.InvalidInput(error_message=msg)
return val
def convert_kvp_str_to_list(data):
"""Convert a value of the form 'key=value' to ['key', 'value'].
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key).
"""
kvp = [x.strip() for x in data.split('=', 1)]
if len(kvp) == 2 and kvp[0]:
return kvp
msg = _("'%s' is not of the form <key>=[value]") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_list_to_dict(kvp_list):
"""Convert a list of 'key=value' strings to a dict.
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key) or if any
of the keys appear more than once.
"""
if kvp_list == ['True']:
# No values were provided (i.e. '--flag-name')
return {}
kvp_map = {}
for kvp_str in kvp_list:
key, value = convert_kvp_str_to_list(kvp_str)
kvp_map.setdefault(key, set())
kvp_map[key].add(value)
return dict((x, list(y)) for x, y in six.iteritems(kvp_map))
def convert_none_to_empty_list(value):
return [] if value is None else value
def convert_none_to_empty_dict(value):
return {} if value is None else value
def convert_to_list(data):
if data is None:
return []
elif hasattr(data, '__iter__') and not isinstance(data, six.string_types):
return list(data)
else:
return [data]
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
# Note: In order to ensure that the MAC address is unicast the first byte
# must be even.
MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
# Dictionary that maintains a list of validation functions
validators = {'type:dict': _validate_dict,
'type:dict_or_none': _validate_dict_or_none,
'type:dict_or_empty': _validate_dict_or_empty,
'type:dict_or_nodata': _validate_dict_or_nodata,
'type:fixed_ips': _validate_fixed_ips,
'type:hostroutes': _validate_hostroutes,
'type:ip_address': _validate_ip_address,
'type:ip_address_or_none': _validate_ip_address_or_none,
'type:ip_pools': _validate_ip_pools,
'type:mac_address': _validate_mac_address,
'type:mac_address_or_none': _validate_mac_address_or_none,
'type:nameservers': _validate_nameservers,
'type:non_negative': _validate_non_negative,
'type:range': _validate_range,
'type:regex': _validate_regex,
'type:regex_or_none': _validate_regex_or_none,
'type:string': _validate_string,
'type:string_or_none': _validate_string_or_none,
'type:not_empty_string': _validate_not_empty_string,
'type:not_empty_string_or_none':
_validate_not_empty_string_or_none,
'type:subnet': _validate_subnet,
'type:subnet_list': _validate_subnet_list,
'type:subnet_or_none': _validate_subnet_or_none,
'type:subnetpool_id': _validate_subnetpool_id,
'type:subnetpool_id_or_none': _validate_subnetpool_id_or_none,
'type:uuid': _validate_uuid,
'type:uuid_or_none': _validate_uuid_or_none,
'type:uuid_list': _validate_uuid_list,
'type:values': _validate_values,
'type:boolean': _validate_boolean}
# Define constants for base resource name
NETWORK = 'network'
NETWORKS = '%ss' % NETWORK
PORT = 'port'
PORTS = '%ss' % PORT
SUBNET = 'subnet'
SUBNETS = '%ss' % SUBNET
SUBNETPOOL = 'subnetpool'
SUBNETPOOLS = '%ss' % SUBNETPOOL
# Note: a default of ATTR_NOT_SPECIFIED indicates that an
# attribute is not required, but will be generated by the plugin
# if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED
# is different from an attribute that has been specified with a value of
# None. For example, if 'gateway_ip' is omitted in a request to
# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
# and the default gateway_ip will be generated.
# However, if gateway_ip is specified as None, this means that
# the subnet does not have a gateway IP.
# The following is a short reference for understanding attribute info:
# default: default value of the attribute (if missing, the attribute
# becomes mandatory.
# allow_post: the attribute can be used on POST requests.
# allow_put: the attribute can be used on PUT requests.
# validate: specifies rules for validating data in the attribute.
# convert_to: transformation to apply to the value before it is returned
# is_visible: the attribute is returned in GET responses.
# required_by_policy: the attribute is required by the policy engine and
# should therefore be filled by the API layer even if not present in
# request body.
# enforce_policy: the attribute is actively part of the policy enforcing
# mechanism, ie: there might be rules which refer to this attribute.
RESOURCE_ATTRIBUTE_MAP = {
NETWORKS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': NAME_MAX_LEN},
'default': '', 'is_visible': True},
'subnets': {'allow_post': False, 'allow_put': False,
'default': [],
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
SHARED: {'allow_post': True,
'allow_put': True,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
},
PORTS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': NAME_MAX_LEN},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'mac_address': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:mac_address': None},
'enforce_policy': True,
'is_visible': True},
'fixed_ips': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'convert_list_to': convert_kvp_list_to_dict,
'validate': {'type:fixed_ips': None},
'enforce_policy': True,
'is_visible': True},
'device_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': DEVICE_ID_MAX_LEN},
'default': '',
'is_visible': True},
'device_owner': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': DEVICE_OWNER_MAX_LEN},
'default': '', 'enforce_policy': True,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
SUBNETS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': NAME_MAX_LEN},
'is_visible': True},
'ip_version': {'allow_post': True, 'allow_put': False,
'convert_to': convert_to_int,
'validate': {'type:values': [4, 6]},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'subnetpool_id': {'allow_post': True,
'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'required_by_policy': False,
'validate': {'type:subnetpool_id_or_none': None},
'is_visible': True},
'prefixlen': {'allow_post': True,
'allow_put': False,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'required_by_policy': False,
'is_visible': False},
'cidr': {'allow_post': True,
'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:subnet_or_none': None},
'required_by_policy': False,
'is_visible': True},
'gateway_ip': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'allocation_pools': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_pools': None},
'is_visible': True},
'dns_nameservers': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:nameservers': None},
'is_visible': True},
'host_routes': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:hostroutes': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'enable_dhcp': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'ipv6_ra_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values': constants.IPV6_MODES},
'is_visible': True},
'ipv6_address_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values':
constants.IPV6_MODES},
'is_visible': True},
SHARED: {'allow_post': False,
'allow_put': False,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': False,
'required_by_policy': True,
'enforce_policy': True},
},
SUBNETPOOLS: {
'id': {'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True,
'allow_put': True,
'validate': {'type:not_empty_string': None},
'is_visible': True},
'tenant_id': {'allow_post': True,
'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'prefixes': {'allow_post': True,
'allow_put': True,
'validate': {'type:subnet_list': None},
'is_visible': True},
'default_quota': {'allow_post': True,
'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'is_visible': True},
'ip_version': {'allow_post': False,
'allow_put': False,
'is_visible': True},
'default_prefixlen': {'allow_post': True,
'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'is_visible': True},
'min_prefixlen': {'allow_post': True,
'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'is_visible': True},
'max_prefixlen': {'allow_post': True,
'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'is_visible': True},
'is_default': {'allow_post': True,
'allow_put': True,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
SHARED: {'allow_post': True,
'allow_put': False,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
}
}
# Identify the attribute used by a resource to reference another resource
RESOURCE_FOREIGN_KEYS = {
NETWORKS: 'network_id'
}
PLURALS = {NETWORKS: NETWORK,
PORTS: PORT,
SUBNETS: SUBNET,
SUBNETPOOLS: SUBNETPOOL,
'dns_nameservers': 'dns_nameserver',
'host_routes': 'host_route',
'allocation_pools': 'allocation_pool',
'fixed_ips': 'fixed_ip',
'extensions': 'extension'}
def fill_default_value(attr_info, res_dict,
exc_cls=ValueError,
check_allow_post=True):
for attr, attr_vals in six.iteritems(attr_info):
if attr_vals['allow_post']:
if ('default' not in attr_vals and
attr not in res_dict):
msg = _("Failed to parse request. Required "
"attribute '%s' not specified") % attr
raise exc_cls(msg)
res_dict[attr] = res_dict.get(attr,
attr_vals.get('default'))
elif check_allow_post:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise exc_cls(msg)
def convert_value(attr_info, res_dict, exc_cls=ValueError):
for attr, attr_vals in six.iteritems(attr_info):
if (attr not in res_dict or
res_dict[attr] is ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
res = validators[rule](res_dict[attr], attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = _("Invalid input for %(attr)s. "
"Reason: %(reason)s.") % msg_dict
raise exc_cls(msg)
def populate_tenant_id(context, res_dict, attr_info, is_create):
if (('tenant_id' in res_dict and
res_dict['tenant_id'] != context.tenant_id and
not context.is_admin)):
msg = _("Specifying 'tenant_id' other than authenticated "
"tenant in request requires admin privileges")
raise webob.exc.HTTPBadRequest(msg)
if is_create and 'tenant_id' not in res_dict:
if context.tenant_id:
res_dict['tenant_id'] = context.tenant_id
elif 'tenant_id' in attr_info:
msg = _("Running without keystone AuthN requires "
"that tenant_id is specified")
raise webob.exc.HTTPBadRequest(msg)
def verify_attributes(res_dict, attr_info):
extra_keys = set(res_dict.keys()) - set(attr_info.keys())
if extra_keys:
msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
raise webob.exc.HTTPBadRequest(msg)
| 37.118626 | 79 | 0.572482 |
acde4ec06682db579bd76329b35419c34872800e | 640 | py | Python | investing_algorithm_framework/globals.py | investing-algorithms/investing-algorithm-framework | d579e142a3857e2e2dfb59b7d6e54202f7df5466 | [
"Apache-2.0"
] | 1 | 2019-12-23T21:23:45.000Z | 2019-12-23T21:23:45.000Z | investing_algorithm_framework/globals.py | investing-algorithms/investing-algorithm-framework | d579e142a3857e2e2dfb59b7d6e54202f7df5466 | [
"Apache-2.0"
] | null | null | null | investing_algorithm_framework/globals.py | investing-algorithms/investing-algorithm-framework | d579e142a3857e2e2dfb59b7d6e54202f7df5466 | [
"Apache-2.0"
] | 1 | 2019-12-23T21:23:50.000Z | 2019-12-23T21:23:50.000Z | import logging
from investing_algorithm_framework.app import App
from investing_algorithm_framework.configuration.constants import \
CHECK_PENDING_ORDERS
from investing_algorithm_framework.core.models import TimeUnit
logger = logging.getLogger("investing-algorithm-framework")
current_app: App = App()
# Check pending orders every 5 minutes
@current_app.algorithm.schedule(
worker_id="default_order_checker", time_unit=TimeUnit.MINUTE, interval=5
)
def check_pending_orders(context):
if context.config.get(CHECK_PENDING_ORDERS, False):
logger.info("Checking pending orders")
context.check_pending_orders()
| 29.090909 | 76 | 0.803125 |
acde4ef90996aafb62d18c722b9253fc10208afa | 4,237 | py | Python | tests/test_endpoint.py | evilensky/python-cert_manager | db6e7f24a8eb05e8650514567ea68d687378252d | [
"BSD-3-Clause"
] | null | null | null | tests/test_endpoint.py | evilensky/python-cert_manager | db6e7f24a8eb05e8650514567ea68d687378252d | [
"BSD-3-Clause"
] | null | null | null | tests/test_endpoint.py | evilensky/python-cert_manager | db6e7f24a8eb05e8650514567ea68d687378252d | [
"BSD-3-Clause"
] | 1 | 2021-02-03T12:59:52.000Z | 2021-02-03T12:59:52.000Z | # -*- coding: utf-8 -*-
"""Define the cert_manager._endpoint.Endpoint unit tests."""
# Don't warn about things that happen as that is part of unit testing
# pylint: disable=protected-access
# pylint: disable=invalid-name
from testtools import TestCase
from cert_manager._endpoint import Endpoint
from .lib.testbase import ClientFixture
# pylint: disable=too-few-public-methods
class TestEndpoint(TestCase):
"""Serve as a Base class for all tests of the Endpoint class."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize the class."""
# Call the inherited setUp method
super().setUp()
# Make sure the Client fixture is created and setup
self.cfixt = self.useFixture(ClientFixture())
self.client = self.cfixt.client
# Set some default values
self.ep_path = "/test"
self.api_version = "v1"
self.api_url = self.cfixt.base_url + self.ep_path + "/" + self.api_version
class TestInit(TestEndpoint):
"""Test the class initializer."""
def test_defaults(self):
"""Parameters should be set correctly inside the class using defaults."""
end = Endpoint(client=self.client, endpoint=self.ep_path)
# Check all the internal values
self.assertEqual(end._client, self.client)
self.assertEqual(end._api_version, self.api_version)
self.assertEqual(end._api_url, self.api_url)
def test_version(self):
"""Parameters should be set correctly inside the class with a custom version."""
version = "v2"
api_url = self.cfixt.base_url + self.ep_path + "/" + version
end = Endpoint(client=self.client, endpoint=self.ep_path, api_version=version)
# Check all the internal values
self.assertEqual(end._client, self.client)
self.assertEqual(end._api_version, version)
self.assertEqual(end._api_url, api_url)
class TestProperties(TestEndpoint):
"""Test the class properties."""
def test_api_version(self):
"""Return the internal _api_version value."""
end = Endpoint(client=self.client, endpoint=self.ep_path)
# Make sure the values match
self.assertEqual(end.api_version, self.api_version)
def test_api_url(self):
"""Return the internal _api_url value."""
end = Endpoint(client=self.client, endpoint=self.ep_path)
# Make sure the values match
self.assertEqual(end.api_url, self.api_url)
class TestCreateApiUrl(TestEndpoint):
"""Test the create_api_url static function."""
def test_normal(self):
"""Return the API URL when called with correct parameters."""
url = Endpoint.create_api_url(self.cfixt.base_url, self.ep_path, self.api_version)
# Make sure the values match
self.assertEqual(url, self.api_url)
def test_extra_slashes(self):
"""Return a clean API URL when called with parameters containing extra slashes."""
url = Endpoint.create_api_url(
self.cfixt.base_url + "///", "//" + self.ep_path, "////" + self.api_version
)
# Make sure the values match
self.assertEqual(url, self.api_url)
class TestUrl(TestEndpoint):
"""Test the _url function."""
def test_normal(self):
"""Return the full API URL when called with correct parameters."""
end = Endpoint(client=self.client, endpoint=self.ep_path)
suffix = "/help"
url = self.api_url + suffix
# Make sure the values match
self.assertEqual(end._url(suffix), url)
def test_no_slashes(self):
"""Return the full API URL when called with a suffix with no slash."""
end = Endpoint(client=self.client, endpoint=self.ep_path)
suffix = "help"
url = self.api_url + "/" + suffix
# Make sure the values match
self.assertEqual(end._url(suffix), url)
def test_many_slashes(self):
"""Return the full API URL when called with a suffix with too many slashes."""
end = Endpoint(client=self.client, endpoint=self.ep_path)
suffix = "//help///"
url = self.api_url + "/help"
# Make sure the values match
self.assertEqual(end._url(suffix), url)
| 33.101563 | 90 | 0.658249 |
acde4f8fb39300dc5c188816ae1a63462b24b7dd | 9,690 | py | Python | src/alertsmanagement/azext_alertsmanagement/_help.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 1 | 2022-02-18T00:16:47.000Z | 2022-02-18T00:16:47.000Z | src/alertsmanagement/azext_alertsmanagement/_help.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 9 | 2022-03-25T19:35:49.000Z | 2022-03-31T06:09:47.000Z | src/alertsmanagement/azext_alertsmanagement/_help.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 1 | 2022-03-10T22:13:02.000Z | 2022-03-10T22:13:02.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['monitor alert-processing-rule'] = """
type: group
short-summary: Manage alert processing rule with alertsmanagement
"""
helps['monitor alert-processing-rule delete'] = """
type: command
short-summary: Delete an alert processing rule.
examples:
- name: Delete an alert processing rule.
text: |-
az monitor alert-processing-rule delete \\
--resource-group myResourceGroup \\
--name myRuleName
"""
helps['monitor alert-processing-rule update'] = """
type: command
short-summary: Enable, disable, or update tags for an alert processing rule.
examples:
- name: Disable an alert processing rule
text: |-
az monitor alert-processing-rule update \\
--resource-group myResourceGroup \\
--name myRuleName \\
--enabled False
- name: "Change tags on an alert processing rule."
text: |-
az monitor alert-processing-rule show \\
--resource-group myResourceGroup \\
--name myRuleName \\
--tags key1=value1 key2=value2
- name: "Change the description's value in an alert processing rule."
text: |-
az monitor alert-processing-rule show \\
--resource-group myResourceGroup \\
--name myRuleName \\
--set properties.description="this is a new description"
"""
helps['monitor alert-processing-rule list'] = """
type: command
short-summary: List all alert processing rules in a subscription or resource group
examples:
- name: List all alert processing rules in current subscription
text: |-
az monitor alert-processing-rule list
- name: List all alert processing rules in a resource group
text: |-
az monitor alert-processing-rule list \\
--resource-group myResourceGroup
"""
helps['monitor alert-processing-rule show'] = """
type: command
short-summary: Get an alert processing rule.
examples:
- name: Get an alert processing rule by name
text: |-
az monitor alert-processing-rule show \\
--name myRuleName \\
--resource-group myRuleNameResourceGroup
- name: Get alerts processing rule by ids
text: |-
az monitor alert-processing-rule show \\
--ids ruleId1 ruleId2
"""
helps['monitor alert-processing-rule create'] = """
type: command
short-summary: Create an alert processing rule.
parameters:
- name: --filter-alert-context
short-summary: Filter alerts by alert context (payload).
long-summary: |
Filter format is <operator> <space-delimited values> where
Operator: one of <Equals, NotEquals, Contains, DoesNotContain>
Values: List of values to match for a given condition
- name: --schedule-recurrence
short-summary: List of recurrence pattern values
long-summary: |
--schedule-recurrence : List of recurrence pattern values (space-delimited).
For a weekly recurrence type, allowed values are Sunday to Saturday.
For a monthly recurrence type, allowed values are 1 to 31 (days of month)
- name: --schedule-recurrence-2
short-summary: List of recurrence pattern values for the second recurrence pattern.
long-summary: |
--schedule-recurrence-2 : List of recurrence pattern values (space-delimited).
For a weekly recurrence type, allowed values are Sunday to Saturday.
For a monthly recurrence type, allowed values are 1 to 31 (days of month)
examples:
- name: Create or update a rule that adds an action group to all alerts in a subscription
text: |-
az monitor alert-processing-rule create \\
--name 'AddActionGroupToSubscription' \\
--rule-type AddActionGroups \\
--scopes "/subscriptions/MySubscriptionId" \\
--action-groups "/subscriptions/MySubscriptionId/resourcegroups/MyResourceGroup1/providers/microsoft.insights/actiongroups/ActionGroup1" \\
--enabled true \\
--resource-group alertscorrelationrg \\
--description "Add ActionGroup1 to all alerts in the subscription"
- name: Create or update a rule that adds two action groups to all Sev0 and Sev1 alerts in two resource groups
text: |-
az monitor alert-processing-rule create \\
--name 'AddActionGroupsBySeverity' \\
--rule-type AddActionGroups \\
--action-groups "/subscriptions/MySubscriptionId/resourcegroups/MyResourceGroup1/providers/microsoft.insights/actiongroups/MyActionGroupId1" "/subscriptions/MySubscriptionId/resourceGroups/MyResourceGroup2/providers/microsoft.insights/actionGroups/MyActionGroup2" \\
--scopes "/subscriptions/MySubscriptionId" \\
--resource-group alertscorrelationrg \\
--filter-severity Equals Sev0 Sev1 \\
--description "Add AGId1 and AGId2 to all Sev0 and Sev1 alerts in these resourceGroups"
- name: Create or update a rule that removes all action groups from alerts on a specific VM during a one-off maintenance window (1800-2000 at a specific date, Pacific Standard Time)
text: |-
az monitor alert-processing-rule create \\
--name 'RemoveActionGroupsMaintenanceWindow' \\
--rule-type RemoveAllActionGroups \\
--scopes "/subscriptions/MySubscriptionId/resourceGroups/MyResourceGroup1/providers/Microsoft.Compute/virtualMachines/VMName" \\
--resource-group alertscorrelationrg \\
--schedule-start-datetime '2022-01-02 18:00:00' \\
--schedule-end-datetime '2022-01-02 20:00:00' \\
--schedule-time-zone 'Pacific Standard Time' \\
--description "Removes all ActionGroups from all Alerts on VMName during the maintenance window"
- name: Create or update a rule that removes all action groups from all alerts in a subscription coming from a specific alert rule
text: |-
az monitor alert-processing-rule create \\
--name 'RemoveActionGroupsSpecificAlertRule' \\
--rule-type RemoveAllActionGroups \\
--scopes "/subscriptions/MySubscriptionId" \\
--resource-group alertscorrelationrg \\
--filter-alert-rule-id Equals "/subscriptions/MySubscriptionId/resourceGroups/MyResourceGroup1/providers/microsoft.insights/activityLogAlerts/RuleName" \\
--description "Removes all ActionGroups from all Alerts that fire on above AlertRule"
- name: Create or update a rule that removes all action groups from all alerts on any VM in two resource groups during a recurring maintenance window (2200-0400 every Sat and Sun, India Standard Time)
text: |-
az monitor alert-processing-rule create \\
--name 'RemoveActionGroupsRecurringMaintenance' \\
--rule-type RemoveAllActionGroups \\
--scopes "/subscriptions/MySubscriptionId/resourceGroups/MyResourceGroup1" "/subscriptions/MySubscriptionId/resourceGroups/MyResourceGroup2" \\
--resource-group alertscorrelationrg \\
--filter-resource-type Equals "microsoft.compute/virtualmachines" \\
--schedule-time-zone "India Standard Time" \\
--schedule-recurrence-type Weekly \\
--schedule-recurrence-start-time "22:00:00" \\
--schedule-recurrence-end-time "04:00:00" \\
--schedule-recurrence Sunday Saturday \\
--description "Remove all ActionGroups from all Virtual machine Alerts during the recurring maintenance"
- name: Create or update a rule that removes all action groups outside business hours (Mon-Fri 09:00-17:00, Eastern Standard Time)
text: |-
az monitor alert-processing-rule create \\
--name 'RemoveActionGroupsOutsideBusinessHours' \\
--rule-type RemoveAllActionGroups \\
--scopes "/subscriptions/MySubscriptionId" \\
--resource-group alertscorrelationrg \\
--schedule-time-zone "Eastern Standard Time" \\
--schedule-recurrence-type Daily \\
--schedule-recurrence-start-time "17:00:00" \\
--schedule-recurrence-end-time "09:00:00" \\
--schedule-recurrence-2-type Weekly \\
--schedule-recurrence-2 Saturday Sunday \\
--description "Remove all ActionGroups outside business hours"
"""
helps['monitor alert-processing-rule update'] = """
type: command
short-summary: Enable, disable, or update tags for an alert processing rule.
examples:
- name: PatchAlertProcessingRule
text: |-
az monitor alert-processing-rule update \\
--name "WeeklySuppression" \\
--enabled false \\
--tags key1="value1" key2="value2" --resource-group "alertscorrelationrg"
"""
| 51.269841 | 280 | 0.628896 |
acde50c80aeced8e0e2137926ade0f8a090c4b5b | 2,361 | py | Python | tests/v20/test_v20_charge_point.py | MrMika96/ocpp | bdcb28492c84a977b58069df9a9e78fadb095e9a | [
"MIT"
] | null | null | null | tests/v20/test_v20_charge_point.py | MrMika96/ocpp | bdcb28492c84a977b58069df9a9e78fadb095e9a | [
"MIT"
] | null | null | null | tests/v20/test_v20_charge_point.py | MrMika96/ocpp | bdcb28492c84a977b58069df9a9e78fadb095e9a | [
"MIT"
] | 1 | 2019-12-05T18:10:06.000Z | 2019-12-05T18:10:06.000Z | import json
import pytest
from ocpp.exceptions import NotImplementedError
from ocpp.routing import on, after, create_route_map
from ocpp.v20 import call_result
@pytest.mark.asyncio
async def test_route_message_with_existing_route(base_central_system,
boot_notification_call):
""" Test if the correct handler is called when routing a message.
Also test if payload of request is injected correctly in handler.
"""
@on("BootNotification")
def on_boot_notification(reason, charging_station, **kwargs):
assert reason == 'PowerUp'
assert charging_station == {
'vendor_name': 'ICU Eve Mini',
'firmware_version': "#1:3.4.0-2990#N:217H;1.0-223",
'model': 'ICU Eve Mini',
}
return call_result.BootNotificationPayload(
current_time='2018-05-29T17:37:05.495259',
interval=350,
status='Accepted',
)
@after("BootNotification")
def after_boot_notification(reason, charging_station, **kwargs):
assert reason == 'PowerUp'
assert charging_station == {
'vendor_name': 'ICU Eve Mini',
'firmware_version': "#1:3.4.0-2990#N:217H;1.0-223",
'model': 'ICU Eve Mini',
}
setattr(base_central_system, 'on_boot_notification', on_boot_notification)
setattr(base_central_system, 'after_boot_notification',
after_boot_notification)
base_central_system.route_map = create_route_map(base_central_system)
await base_central_system.route_message(boot_notification_call)
base_central_system._connection.send.assert_called_once_with(
json.dumps([
3,
"1",
{
'currentTime': '2018-05-29T17:37:05.495259',
'interval': 350,
'status': 'Accepted',
}
])
)
@pytest.mark.asyncio
async def test_route_message_with_no_route(base_central_system,
heartbeat_call):
"""
Test that NotImplementedError is raised when message received without a
handler registered for it.
"""
# Empty the route map
base_central_system.route_map = {}
with pytest.raises(NotImplementedError):
await base_central_system.route_message(heartbeat_call)
| 32.791667 | 78 | 0.634477 |
acde5156cf5d1b918f3257e299575405f2cd2af1 | 1,862 | py | Python | utils/metric.py | kreimanlab/AugMem | cb0e8d39eb0c469da46c7c550c19229927a2bec5 | [
"MIT"
] | 6 | 2021-04-07T15:17:24.000Z | 2021-07-07T04:37:29.000Z | utils/metric.py | kreimanlab/AugMem | cb0e8d39eb0c469da46c7c550c19229927a2bec5 | [
"MIT"
] | null | null | null | utils/metric.py | kreimanlab/AugMem | cb0e8d39eb0c469da46c7c550c19229927a2bec5 | [
"MIT"
] | null | null | null | import time
import torch
def accuracy(output, target, topk=(1,)):
'''computes precision@k for the specified values of k'''
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
# prediction vectors are stacked along the batch dimension (dim zero)
_, pred = output.topk(k = maxk, dim = 1, largest = True, sorted = True)
pred = pred.t()
correct = pred.eq(target.view(1,-1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum().item()
res.append(correct_k*100.0 / batch_size)
if len(res) == 1:
return res[0]
else:
return res
class AverageMeter(object):
'''
Computes and stores the average and current values
'''
def __init__(self):
self.reset()
def reset(self):
self.val = 0 # current (latest) value
self.avg = 0 # running average
self.sum = 0 # running sum
self.count = 0 # running count (number of updates)
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = float(self.sum) / self.count
class Timer(object):
'''
Implementation of timer to time batches, epochs, tasks, etc.
'''
def __init__(self):
self.reset()
def reset(self):
self.interval = 0
self.time = time.time()
def value(self):
return time.time() - self.time
def tic(self):
self.time = time.time()
def toc(self):
# length of time that has passed
self.interval = time.time() - self.time
# recording completion time
self.time = time.time()
return self.interval | 27.382353 | 79 | 0.531149 |
acde515fed45482651e8ba5f66e92f175b35281b | 14,214 | py | Python | core/controllers/suggestion.py | jlau323/oppia | 37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691 | [
"Apache-2.0"
] | 2 | 2021-04-08T01:06:08.000Z | 2021-06-02T08:20:13.000Z | core/controllers/suggestion.py | jlau323/oppia | 37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691 | [
"Apache-2.0"
] | null | null | null | core/controllers/suggestion.py | jlau323/oppia | 37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691 | [
"Apache-2.0"
] | 1 | 2020-12-11T06:56:31.000Z | 2020-12-11T06:56:31.000Z | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for suggestions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import logging
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import fs_services
from core.domain import html_cleaner
from core.domain import image_validation_services
from core.domain import opportunity_services
from core.domain import skill_fetchers
from core.domain import suggestion_services
from core.platform import models
import feconf
import utils
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
def _get_target_id_to_exploration_opportunity_dict(suggestions):
"""Returns a dict of target_id to exploration opportunity summary dict.
Args:
suggestions: list(BaseSuggestion). A list of suggestions to retrieve
opportunity dicts.
Returns:
dict. Dict mapping target_id to corresponding exploration opportunity
summary dict.
"""
target_ids = set([s.target_id for s in suggestions])
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in (
opportunity_services.get_exploration_opportunity_summaries_by_ids(
list(target_ids)).items())
}
return opportunity_id_to_opportunity_dict
def _get_target_id_to_skill_opportunity_dict(suggestions):
"""Returns a dict of target_id to skill opportunity summary dict.
Args:
suggestions: list(BaseSuggestion). A list of suggestions to retrieve
opportunity dicts.
Returns:
dict. Dict mapping target_id to corresponding skill opportunity dict.
"""
target_ids = set([s.target_id for s in suggestions])
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in opportunity_services.get_skill_opportunities_by_ids(
list(target_ids)).items()
}
opportunity_id_to_skill = {
skill.id: skill
for skill in skill_fetchers.get_multi_skills([
opp['id']
for opp in opportunity_id_to_opportunity_dict.values()
if opp is not None])
}
for opp_id, skill in opportunity_id_to_skill.items():
if skill is not None:
opportunity_id_to_opportunity_dict[opp_id]['skill_rubrics'] = [
rubric.to_dict() for rubric in skill.rubrics]
return opportunity_id_to_opportunity_dict
class SuggestionHandler(base.BaseHandler):
""""Handles operations relating to suggestions."""
@acl_decorators.can_suggest_changes
def post(self):
try:
suggestion = suggestion_services.create_suggestion(
self.payload.get('suggestion_type'),
self.payload.get('target_type'), self.payload.get('target_id'),
self.payload.get('target_version_at_submission'),
self.user_id, self.payload.get('change'),
self.payload.get('description'))
except utils.ValidationError as e:
raise self.InvalidInputException(e)
# TODO(#10513) : Find a way to save the images before the suggestion is
# created.
suggestion_image_context = suggestion.image_context
# For suggestion which doesn't need images for rendering the
# image_context is set to None.
if suggestion_image_context is None:
self.render_json(self.values)
return
new_image_filenames = (
suggestion.get_new_image_filenames_added_in_suggestion())
for filename in new_image_filenames:
image = self.request.get(filename)
if not image:
logging.error(
'Image not provided for file with name %s when the '
' suggestion with target id %s was created.' % (
filename, suggestion.target_id))
raise self.InvalidInputException(
'No image data provided for file with name %s.'
% (filename))
try:
file_format = (
image_validation_services.validate_image_and_filename(
image, filename))
except utils.ValidationError as e:
raise self.InvalidInputException('%s' % (e))
image_is_compressible = (
file_format in feconf.COMPRESSIBLE_IMAGE_FORMATS)
fs_services.save_original_and_compressed_versions_of_image(
filename, suggestion_image_context, suggestion.target_id,
image, 'image', image_is_compressible)
target_entity_html_list = suggestion.get_target_entity_html_strings()
target_image_filenames = (
html_cleaner.get_image_filenames_from_html_strings(
target_entity_html_list))
fs_services.copy_images(
suggestion.target_type, suggestion.target_id,
suggestion_image_context, suggestion.target_id,
target_image_filenames)
self.render_json(self.values)
class SuggestionToExplorationActionHandler(base.BaseHandler):
"""Handles actions performed on suggestions to explorations."""
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_exploration)
def put(self, target_id, suggestion_id):
if (
suggestion_id.split('.')[0] !=
suggestion_models.TARGET_TYPE_EXPLORATION):
raise self.InvalidInputException(
'This handler allows actions only'
' on suggestions to explorations.')
if suggestion_id.split('.')[1] != target_id:
raise self.InvalidInputException(
'The exploration id provided does not match the exploration id '
'present as part of the suggestion_id')
action = self.payload.get('action')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if suggestion.author_id == self.user_id:
raise self.UnauthorizedUserException(
'You cannot accept/reject your own suggestion.')
if action == suggestion_models.ACTION_TYPE_ACCEPT:
commit_message = self.payload.get('commit_message')
if (commit_message is not None and
len(commit_message) > feconf.MAX_COMMIT_MESSAGE_LENGTH):
raise self.InvalidInputException(
'Commit messages must be at most %s characters long.'
% feconf.MAX_COMMIT_MESSAGE_LENGTH)
suggestion_services.accept_suggestion(
suggestion_id, self.user_id, self.payload.get('commit_message'),
self.payload.get('review_message'))
elif action == suggestion_models.ACTION_TYPE_REJECT:
suggestion_services.reject_suggestion(
suggestion_id, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class ResubmitSuggestionHandler(base.BaseHandler):
"""Handler to reopen a rejected suggestion."""
@acl_decorators.can_resubmit_suggestion
def put(self, suggestion_id):
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
new_change = self.payload.get('change')
change_cls = type(suggestion.change)
change_object = change_cls(new_change)
summary_message = self.payload.get('summary_message')
suggestion_services.resubmit_rejected_suggestion(
suggestion_id, summary_message, self.user_id, change_object)
self.render_json(self.values)
class SuggestionToSkillActionHandler(base.BaseHandler):
"""Handles actions performed on suggestions to skills."""
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_skill)
def put(self, target_id, suggestion_id):
if suggestion_id.split('.')[0] != suggestion_models.TARGET_TYPE_SKILL:
raise self.InvalidInputException(
'This handler allows actions only on suggestions to skills.')
if suggestion_id.split('.')[1] != target_id:
raise self.InvalidInputException(
'The skill id provided does not match the skill id present as '
'part of the suggestion_id')
action = self.payload.get('action')
if action == suggestion_models.ACTION_TYPE_ACCEPT:
# Question suggestions do not use commit messages.
suggestion_services.accept_suggestion(
suggestion_id, self.user_id, 'UNUSED_COMMIT_MESSAGE',
self.payload.get('review_message'))
elif action == suggestion_models.ACTION_TYPE_REJECT:
suggestion_services.reject_suggestion(
suggestion_id, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class SuggestionsProviderHandler(base.BaseHandler):
"""Provides suggestions for a user and given suggestion type."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
def _require_valid_suggestion_and_target_types(
self, target_type, suggestion_type):
"""Checks whether the given target_type and suggestion_type are valid.
Args:
target_type: str. The type of the suggestion target.
suggestion_type: str. The type of the suggestion.
Raises:
InvalidInputException. If the given target_type of suggestion_type
are invalid.
"""
if target_type not in suggestion_models.TARGET_TYPE_CHOICES:
raise self.InvalidInputException(
'Invalid target_type: %s' % target_type)
if suggestion_type not in suggestion_models.SUGGESTION_TYPE_CHOICES:
raise self.InvalidInputException(
'Invalid suggestion_type: %s' % suggestion_type)
def _render_suggestions(self, target_type, suggestions):
"""Renders retrieved suggestions.
Args:
target_type: str. The suggestion type.
suggestions: list(BaseSuggestion). A list of suggestions to render.
"""
if target_type == suggestion_models.TARGET_TYPE_EXPLORATION:
target_id_to_opportunity_dict = (
_get_target_id_to_exploration_opportunity_dict(suggestions))
self.render_json({
'suggestions': [s.to_dict() for s in suggestions],
'target_id_to_opportunity_dict':
target_id_to_opportunity_dict
})
elif target_type == suggestion_models.TARGET_TYPE_SKILL:
target_id_to_opportunity_dict = (
_get_target_id_to_skill_opportunity_dict(suggestions))
self.render_json({
'suggestions': [s.to_dict() for s in suggestions],
'target_id_to_opportunity_dict':
target_id_to_opportunity_dict
})
else:
self.render_json({})
class ReviewableSuggestionsHandler(SuggestionsProviderHandler):
"""Provides all suggestions which can be reviewed by the user for a given
suggestion type.
"""
@acl_decorators.can_view_reviewable_suggestions
def get(self, target_type, suggestion_type):
"""Handles GET requests."""
self._require_valid_suggestion_and_target_types(
target_type, suggestion_type)
suggestions = suggestion_services.get_reviewable_suggestions(
self.user_id, suggestion_type)
self._render_suggestions(target_type, suggestions)
class UserSubmittedSuggestionsHandler(SuggestionsProviderHandler):
"""Provides all suggestions which are submitted by the user for a given
suggestion type.
"""
@acl_decorators.can_suggest_changes
def get(self, target_type, suggestion_type):
"""Handles GET requests."""
self._require_valid_suggestion_and_target_types(
target_type, suggestion_type)
suggestions = suggestion_services.get_submitted_suggestions(
self.user_id, suggestion_type)
self._render_suggestions(target_type, suggestions)
class SuggestionListHandler(base.BaseHandler):
"""Handles list operations on suggestions."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
# The query_fields_and_values variable is a list of tuples. The first
# element in each tuple is the field being queried and the second
# element is the value of the field being queried.
# request.GET.items() parses the params from the url into the above
# format. So in the url, the query should be passed as:
# ?field1=value1&field2=value2...fieldN=valueN.
query_fields_and_values = list(self.request.GET.items())
for query in query_fields_and_values:
if query[0] not in suggestion_models.ALLOWED_QUERY_FIELDS:
raise self.InvalidInputException(
'Not allowed to query on field %s' % query[0])
suggestions = suggestion_services.query_suggestions(
query_fields_and_values)
self.values.update({'suggestions': [s.to_dict() for s in suggestions]})
self.render_json(self.values)
| 40.611429 | 80 | 0.674757 |
acde529ac6707ae6bea3bf73120792ad64e11115 | 11,233 | py | Python | neurolight_evaluation/graph_metrics.py | pattonw/neurolight_evaluation | e5b2939e52ed7abe72055f8db6e550ae1e749f13 | [
"MIT"
] | null | null | null | neurolight_evaluation/graph_metrics.py | pattonw/neurolight_evaluation | e5b2939e52ed7abe72055f8db6e550ae1e749f13 | [
"MIT"
] | null | null | null | neurolight_evaluation/graph_metrics.py | pattonw/neurolight_evaluation | e5b2939e52ed7abe72055f8db6e550ae1e749f13 | [
"MIT"
] | null | null | null | import networkx as nx
import numpy as np
from funlib.evaluate.run_length import expected_run_length
from typing import List, Tuple, Dict
from enum import Enum
import logging
logger = logging.getLogger(__file__)
class Metric(Enum):
ERL = "erl"
GRAPH_EDIT = "graph_edit"
RECALL_PRECISION = "recall_precision"
def evaluate_matching(
metric: Metric,
node_matchings: List[Tuple[int, int]],
node_labels_x: Dict[int, int],
node_labels_y: Dict[int, int],
graph_x: nx.Graph,
graph_y: nx.Graph,
location_attr: str,
**metric_kwargs,
):
if metric == Metric.ERL:
return erl(
node_matchings,
node_labels_x,
node_labels_y,
graph_x,
graph_y,
location_attr,
**metric_kwargs,
)
elif metric == Metric.GRAPH_EDIT:
return psudo_graph_edit_distance(
node_matchings,
node_labels_x,
node_labels_y,
graph_x,
graph_y,
location_attr,
**metric_kwargs,
)
elif metric == Metric.RECALL_PRECISION:
return recall_precision(
node_matchings,
node_labels_x,
node_labels_y,
graph_x,
graph_y,
location_attr,
**metric_kwargs,
)
else:
raise NotImplementedError(
f"Passed in metric: {metric} is not supported. See {Metric}"
)
def recall_precision(
node_matchings: List[Tuple[int, int]],
node_labels_x: Dict[int, int],
node_labels_y: Dict[int, int],
graph_x: nx.Graph,
graph_y: nx.Graph,
location_attr: str,
details=False,
) -> Tuple[float, float]:
"""
Calculate recall and precision accross two graphs.
Recall is considered to be the percentage of the cable
length of graph_x that was successfully matched
Precision is considered to be the percentage of the cable
length of graph_y that was successfully matched
An edge (a, b) is considered successfully matched if a matches
to c, and b matches to d, where c and d share the same label id.
Note that it is assumed for edge (a, b) that a and b share a
label since they are part of the same connected component.
Note that nodes without adjacent edges will not contribute to
either metric.
Args:
node_matchings: (``list`` of ``tuple`` pairs of ``int``)
A list of tuples containing pairs of nodes that match
node_labels_x: (``dict`` mapping ``int`` to ``int``)
A dictionary mapping node_ids in graph_x (assumed to be integers)
to label ids in graph_x (also assumed to be integers)
node_labels_y: (``dict`` mapping ``int`` to ``int``)
A dictionary mapping node_ids in graph_y (assumed to be integers)
to label ids in graph_y (also assumed to be integers)
graph_x: (``nx.Graph``)
The graph_x on which to calculate recall
graph_y: (``nx.Graph``)
the graph_y on which to calculate precision
location_attr: (``str``)
An attribute that all nodes in graph_x and graph_y have that contains
a node's location for calculating edge lengths.
Returns:
(``tuple`` of ``int``):
recall and precision
"""
nomatch_node = max(list(graph_x.nodes) + list(graph_y.nodes)) + 1
nomatch_label = max(list(node_labels_x.values()) + list(node_labels_y.values())) + 1
matched_x = 0
total_x = 0
matched_y = 0
total_y = 0
x_node_to_y_label = {}
y_node_to_x_label = {}
for a, b in node_matchings:
y_label = x_node_to_y_label.setdefault(a, node_labels_y[b])
assert y_label == node_labels_y[b], (
f"node {a} in graph_x matches to multiple labels in graph_y, "
f"including {(y_label, node_labels_y[b])}!"
)
x_label = y_node_to_x_label.setdefault(b, node_labels_x[a])
assert x_label == node_labels_x[a], (
f"node {b} in graph_y matches to multiple labels in graph_x, "
f"including {(x_label, node_labels_x[a])}!"
)
for a, b in graph_x.edges():
a_loc = graph_x.nodes[a][location_attr]
b_loc = graph_x.nodes[b][location_attr]
edge_len = np.linalg.norm(a_loc - b_loc)
if x_node_to_y_label.get(a, nomatch_node) == x_node_to_y_label.get(
b, nomatch_node + 1
):
matched_x += edge_len
total_x += edge_len
for a, b in graph_y.edges():
a_loc = graph_y.nodes[a][location_attr]
b_loc = graph_y.nodes[b][location_attr]
edge_len = np.linalg.norm(a_loc - b_loc)
if y_node_to_x_label.get(a, nomatch_node) == y_node_to_x_label.get(
b, nomatch_node + 1
):
matched_y += edge_len
total_y += edge_len
if np.isclose(total_x, 0):
recall = 1
else:
recall = matched_x / total_x
if np.isclose(total_y, 0):
precision = 0
else:
precision = matched_y / total_y
if not details:
return (
recall,
precision,
)
else:
return (recall, precision, (matched_x, total_x, matched_y, total_y))
def psudo_graph_edit_distance(
node_matchings: List[Tuple[int, int]],
node_labels_x: Dict[int, int],
node_labels_y: Dict[int, int],
graph_x: nx.Graph,
graph_y: nx.Graph,
location_attr: str,
node_spacing: float,
details: bool = False,
) -> Tuple[float, float]:
"""
Calculate a psuedo graph edit distance.
The goal of this metric is to approximate the amount of time
it would take a trained tracing expert to correct a predicted
graph.
An edge (a, b) needs to be removed or added if a matches to some label
not equal to the label that b matches to. Removing this edge
should be 1 click, and thus contributes penalty of 1 to this metric.
This covers Splits and Merges
Every connected component of false positives should be removed
with 1 click so they each contribute 1.
Every node in y that matches to None, must be reconstructed.
The time it takes to reconstruct false negatives is based on
cable length. Summing the total cable length adjacent to a
false negative node, and dividing by two, gives us an approximation
of false negative cable length. Dividing by 5 microns gives an
approximation of how many nodes will need to be added. This is
the weight of a false negative node.
Args:
node_matchings: (``list`` of ``tuple`` pairs of ``int``)
A list of tuples containing pairs of nodes that match
node_labels_x: (``dict`` mapping ``int`` to ``int``)
A dictionary mapping node_ids in graph_x (assumed to be integers)
to label ids in graph_x (also assumed to be integers)
node_labels_y: (``dict`` mapping ``int`` to ``int``)
A dictionary mapping node_ids in graph_y (assumed to be integers)
to label ids in graph_y (also assumed to be integers)
graph_x: (``nx.Graph``)
The "predicted" graph
graph_y: (``nx.Graph``)
The "ground_truth" graph
location_attr: (``str``)
An attribute that all nodes in graph_x and graph_y have that contains
a node's location for calculating edge lengths.
Returns:
(``float``):
cost of this matching
"""
nomatch_node = max(list(graph_x.nodes) + list(graph_y.nodes)) + 1
nomatch_label = max(list(node_labels_x.values()) + list(node_labels_y.values())) + 1
x_node_to_y_label = {}
y_node_to_x_label = {}
for a, b in node_matchings:
y_label = x_node_to_y_label.setdefault(a, node_labels_y[b])
# assert y_label == node_labels_y[b], (
# f"node {a} in graph_x matches to multiple labels in graph_y, "
# f"including {(y_label, node_labels_y[b])}!"
# )
x_label = y_node_to_x_label.setdefault(b, node_labels_x[a])
# assert x_label == node_labels_x[a], (
# f"node {b} in graph_y matches to multiple labels in graph_x, "
# f"including {(x_label, node_labels_x[a])}!"
# )
false_pos_nodes = [
x_node
for x_node in graph_x.nodes
if x_node_to_y_label.get(x_node, nomatch_node) == nomatch_node
]
false_neg_nodes = [
y_node
for y_node in graph_y.nodes
if y_node_to_x_label.get(y_node, nomatch_node) == nomatch_node
]
false_pos_cost = len(
list(nx.connected_components(graph_x.subgraph(false_pos_nodes)))
)
false_neg_cost = 0
for node in false_neg_nodes:
cable_len = 0
for neighbor in graph_y.neighbors(node):
node_loc = graph_y.nodes[node][location_attr]
neighbor_loc = graph_y.nodes[neighbor][location_attr]
cable_len += np.linalg.norm(node_loc - neighbor_loc) / 2
false_neg_cost += cable_len / node_spacing
merge_cost = 0
for u, v in graph_x.edges:
if x_node_to_y_label.get(u, nomatch_node) != x_node_to_y_label.get(
v, nomatch_node
):
merge_cost += 1
split_cost = 0
for u, v in graph_y.edges:
if y_node_to_x_label.get(u, nomatch_node) != y_node_to_x_label.get(
v, nomatch_node
):
split_cost += 1
logger.info(
f"false_pos_cost: {false_pos_cost}, false_neg_cost: {false_neg_cost}, "
f"merge_cost: {merge_cost}, split_cost: {split_cost}"
)
edit_distance = false_pos_cost + false_neg_cost + merge_cost + split_cost
if not details:
return edit_distance
else:
return edit_distance, (split_cost, merge_cost, false_pos_cost, false_neg_cost)
def erl(
node_matchings: List[Tuple[int, int]],
node_labels_x: Dict[int, int],
node_labels_y: Dict[int, int],
graph_x: nx.Graph,
graph_y: nx.Graph,
location_attr: str,
):
nomatch_node = max(list(graph_x.nodes) + list(graph_y.nodes)) + 1
nomatch_label = max(list(node_labels_x.values()) + list(node_labels_y.values())) + 1
x_node_to_y_label = {}
y_node_to_x_label = {}
for a, b in node_matchings:
y_label = x_node_to_y_label.setdefault(a, node_labels_y[b])
assert y_label == node_labels_y[b], (
f"node {a} in graph_x matches to multiple labels in graph_y, "
f"including {(y_label, node_labels_y[b])}!"
)
x_label = y_node_to_x_label.setdefault(b, node_labels_x[a])
assert x_label == node_labels_x[a], (
f"node {b} in graph_y matches to multiple labels in graph_x, "
f"including {(x_label, node_labels_x[a])}!"
)
segment_lut = {
node_y: y_node_to_x_label.get(node_y, nomatch_node)
for node_y in graph_y.nodes()
}
for node, attrs in graph_y.nodes.items():
attrs["component"] = node_labels_y[node]
return expected_run_length(
graph_y,
"component",
"edge_len",
segment_lut,
skeleton_position_attributes=["location"],
)
| 31.202778 | 88 | 0.62263 |
acde52eaaa96c17beb3168d165969a856e66136d | 28,125 | py | Python | alntools/matrix/AlignmentPropertyMatrix.py | churchill-lab/alntools | dd9668284a7d924d881e4d39d1841242ef883d9c | [
"MIT"
] | 1 | 2018-03-22T21:52:27.000Z | 2018-03-22T21:52:27.000Z | alntools/matrix/AlignmentPropertyMatrix.py | churchill-lab/alntools | dd9668284a7d924d881e4d39d1841242ef883d9c | [
"MIT"
] | 8 | 2018-08-22T16:09:16.000Z | 2021-11-15T17:46:56.000Z | alntools/matrix/AlignmentPropertyMatrix.py | churchill-lab/alntools | dd9668284a7d924d881e4d39d1841242ef883d9c | [
"MIT"
] | 2 | 2019-03-10T04:34:16.000Z | 2020-06-03T21:30:27.000Z | #!/usr/bin/env python
import copy
import tables
from struct import pack, unpack
from itertools import dropwhile
import numpy as np
from scipy.sparse import lil_matrix, coo_matrix, csc_matrix, csr_matrix
from alntools.matrix.Sparse3DMatrix import Sparse3DMatrix
try:
xrange
except NameError:
xrange = range
def enum(**enums):
return type('Enum', (), enums)
def is_comment(s):
return s.startswith('#')
class AlignmentPropertyMatrix(Sparse3DMatrix):
Axis = enum(LOCUS=0, HAPLOTYPE=1, READ=2, GROUP=3, HAPLOGROUP=4)
def __init__(self,
other=None,
ecfile=None,
h5file=None, datanode='/', metanode='/', shallow=False,
shape=None, dtype=float, haplotype_names=None, locus_names=None, read_names=None, sample_names=None,
grpfile=None):
Sparse3DMatrix.__init__(self, other=other, h5file=h5file, datanode=datanode, shape=shape, dtype=dtype)
self.num_loci, self.num_haplotypes, self.num_reads = self.shape
self.num_samples = 0
self.num_groups = 0
self.count = None
self.hname = None
self.lname = None # locus name
self.rname = None # read name
self.sname = None # sample name (e.g. sample barcodes::cell barcodes)
self.lid = None # locus ID
self.rid = None # read ID
self.sid = None # sample ID
self.lengths = None # transcript lengths (or effective lengths)
self.gname = None # group name
self.groups = None # groups in terms of locus IDs
if other is not None: # Use for copying from other existing AlignmentPropertyMatrix object
if other.count is not None:
self.count = copy.copy(other.count)
if other.lengths is not None:
self.lengths = copy.copy(other.lengths)
if not shallow:
self.__copy_names(other)
self.__copy_group_info(other)
elif ecfile is not None:
with open(ecfile, 'rb') as f:
ecformat = unpack('<i', f.read(4))[0]
if ecformat == 2:
self.num_haplotypes = unpack('<i', f.read(4))[0]
hname = list()
for hidx in range(self.num_haplotypes):
hname_len = unpack('<i', f.read(4))[0]
hname.append(unpack('<{}s'.format(hname_len), f.read(hname_len))[0].decode('utf-8'))
self.hname = np.array(hname)
# Get transcipt info
self.num_loci = unpack('<i', f.read(4))[0]
self.lengths = np.zeros((self.num_loci, self.num_haplotypes), dtype=float)
tname = list()
for tidx in range(self.num_loci):
tname_len = unpack('<i', f.read(4))[0]
tname.append(unpack('<{}s'.format(tname_len), f.read(tname_len))[0].decode('utf-8'))
for hidx in range(self.num_haplotypes):
self.lengths[tidx, hidx] = unpack('<i', f.read(4))[0]
self.lname = np.array(tname)
self.lid = dict(zip(self.lname, np.arange(self.num_loci)))
# Get sample info
sname = list()
self.num_samples = unpack('<i', f.read(4))[0]
for sidx in range(self.num_samples):
sname_len = unpack('<i', f.read(4))[0]
sname.append(unpack('<{}s'.format(sname_len), f.read(sname_len))[0].decode('utf-8'))
self.sname = np.array(sname)
self.sid = dict(zip(self.sname, np.arange(self.num_samples)))
# Read in alignment matrix info
indptr_len = unpack('<i', f.read(4))[0]
self.num_reads = indptr_len - 1
nnz = unpack('<i', f.read(4))[0]
indptr_A = np.array(unpack('<{}i'.format(indptr_len), f.read(4*indptr_len)))
indices_A = np.array(unpack('<{}i'.format(nnz), f.read(4*nnz)))
data_A = np.array(unpack('<{}i'.format(nnz), f.read(4*nnz)))
# Read in EC count matrix info
indptr_len = unpack('<i', f.read(4))[0]
nnz = unpack('<i', f.read(4))[0]
indptr_N = np.array(unpack('<{}i'.format(indptr_len), f.read(4*indptr_len)))
indices_N = np.array(unpack('<{}i'.format(nnz), f.read(4*nnz)))
data_N = np.array(unpack('<{}i'.format(nnz), f.read(4*nnz)))
# Populate class member variables
for hidx in range(self.num_haplotypes-1):
data_A, data_A_rem = np.divmod(data_A, 2)
self.data.append(csr_matrix((data_A_rem, indices_A, indptr_A), shape=(self.num_reads, self.num_loci)))
self.data.append(csr_matrix((data_A, indices_A, indptr_A), shape=(self.num_reads, self.num_loci)))
for hidx in range(self.num_haplotypes):
self.data[hidx].eliminate_zeros()
self.count = csc_matrix((data_N, indices_N, indptr_N), shape=(self.num_reads, self.num_samples))
if self.num_samples == 1:
self.count = self.count.todense().A.flatten()
self.shape = (self.num_loci, self.num_haplotypes, self.num_reads)
self.finalize()
elif ecformat == 1:
raise NotImplementedError
elif ecformat == 0:
raise TypeError('Format 0 is not supported anymore.')
elif h5file is not None: # Use for loading from a pytables file
h5fh = tables.open_file(h5file, 'r')
if not shallow:
self.hname = h5fh.get_node_attr(datanode, 'hname')
self.lname = np.char.decode(h5fh.get_node(metanode, 'lname').read(), 'utf-8')
self.lid = dict(zip(self.lname, np.arange(self.num_loci)))
if h5fh.__contains__('%s' % (metanode + '/rname')):
self.rname = np.char.decode(h5fh.get_node(metanode, 'rname').read(), 'utf-8')
self.rid = dict(zip(self.rname, np.arange(self.num_reads)))
if h5fh.__contains__('%s' % (metanode + '/sname')):
self.sname = np.char.decode(h5fh.get_node(metanode, 'sname').read(), 'utf-8')
self.sid = dict(zip(self.sname, np.arange(self.num_samples)))
self.num_samples = len(self.sname)
if h5fh.__contains__('%s' % (datanode + '/count')):
try:
self.count = h5fh.get_node(datanode, 'count').read() # Format-1
except tables.NoSuchNodeError as e: # Format-2
nmat_node = h5fh.get_node(datanode + '/count')
indptr = h5fh.get_node(nmat_node, 'indptr').read()
indices = h5fh.get_node(nmat_node, 'indices').read()
data = h5fh.get_node(nmat_node, 'data').read()
self.count = csc_matrix((data, indices, indptr), dtype=np.float64)
self.num_samples = self.count.shape[1]
if h5fh.__contains__('%s' % (datanode + '/lengths')):
self.lengths = h5fh.get_node(datanode, 'lengths').read()
h5fh.close()
elif shape is not None: # Use for initializing an empty matrix
if haplotype_names is not None:
if len(haplotype_names) == self.num_haplotypes:
self.hname = haplotype_names
else:
raise RuntimeError('The number of names does not match to the matrix shape.')
if locus_names is not None:
if len(locus_names) == self.num_loci:
self.lname = np.array(locus_names)
self.lid = dict(zip(self.lname, np.arange(self.num_loci)))
else:
raise RuntimeError('The number of names does not match to the matrix shape.')
if read_names is not None:
if len(read_names) == self.num_reads:
self.rname = np.array(read_names)
self.rid = dict(zip(self.rname, np.arange(self.num_reads)))
else:
raise RuntimeError('The number of names does not match to the matrix shape.')
if sample_names is not None:
self.sname = np.array(sample_names)
self.sid = dict(zip(self.sname, np.arange(self.num_samples)))
self.num_samples = len(sample_names)
else:
self.num_samples = 1
if grpfile is not None:
self.__load_groups(grpfile)
def __load_groups(self, grpfile): # A group is a set of isoforms within a gene
if self.lid is not None:
self.gname = list()
self.groups = list()
with open(grpfile) as fh:
for curline in fh:
item = curline.rstrip().split("\t")
self.gname.append(item[0])
tid_list = [ self.lid[t] for t in item[1:] ]
self.groups.append(tid_list)
self.gname = np.array(self.gname)
self.num_groups = len(self.gname)
else:
raise RuntimeError('Locus IDs are not availalbe.')
load_groups = __load_groups
def __copy_names(self, other):
self.hname = other.hname
self.lname = copy.copy(other.lname)
self.rname = copy.copy(other.rname)
self.sname = copy.copy(other.sname)
self.lid = copy.copy(other.lid)
self.rid = copy.copy(other.rid)
self.sid = copy.copy(other.sid)
def __copy_group_info(self, other):
# if other.num_groups > 0:
if other.groups is not None and other.gname is not None:
self.groups = copy.deepcopy(other.groups)
self.gname = copy.copy(other.gname)
self.num_groups = other.num_groups
def copy(self, shallow=False):
dmat = Sparse3DMatrix.copy(self)
dmat.count = self.count.copy()
dmat.lengths = self.lengths.copy()
dmat.num_loci, dmat.num_haplotypes, dmat.num_reads = dmat.shape
if not shallow:
dmat.__copy_names(self)
dmat.__copy_group_info(self)
return dmat
def _bundle_inline(self, reset=False): # Inline bundling method
if self.finalized:
if self.num_groups > 0 and self.groups is not None and self.gname is not None:
grp_conv_mat = lil_matrix((self.num_loci, self.num_groups))
for i in xrange(self.num_groups):
grp_conv_mat[self.groups[i], i] = 1.0
grp_conv_mat = grp_conv_mat.tocsc()
for hid in xrange(self.num_haplotypes):
self.data[hid] = self.data[hid] * grp_conv_mat # TODO: Is there any better way to save memory?
self.num_loci = self.num_groups
self.shape = (self.num_groups, self.num_haplotypes, self.num_reads)
self.lname = copy.copy(self.gname)
self.lid = dict(zip(self.gname, np.arange(self.num_groups)))
self.num_groups = 0
self.groups = None
self.gname = None
if reset:
self.reset()
else:
raise RuntimeError('No group information is available for bundling.')
else:
raise RuntimeError('The matrix is not finalized.')
def bundle(self, reset=False, shallow=False): # Copies the original matrix (Use lots of memory)
"""
Returns ``AlignmentPropertyMatrix`` object in which loci are bundled using grouping information.
:param reset: whether to reset the values at the loci
:param shallow: whether to copy all the meta data
"""
if self.finalized:
# if self.num_groups > 0:
if self.groups is not None and self.gname is not None:
grp_conv_mat = lil_matrix((self.num_loci, self.num_groups))
for i in xrange(self.num_groups):
grp_conv_mat[self.groups[i], i] = 1.0
grp_align = Sparse3DMatrix.__mul__(self, grp_conv_mat) # The core of the bundling
grp_align.num_loci = self.num_groups
grp_align.num_haplotypes = self.num_haplotypes
grp_align.num_reads = self.num_reads
grp_align.shape = (grp_align.num_loci, grp_align.num_haplotypes, grp_align.num_reads)
grp_align.count = self.count
if not shallow:
grp_align.lname = copy.copy(self.gname)
grp_align.hname = self.hname
grp_align.rname = copy.copy(self.rname)
grp_align.sname = copy.copy(self.sname)
grp_align.lid = dict(zip(grp_align.lname, np.arange(grp_align.num_loci)))
grp_align.rid = copy.copy(self.rid)
grp_align.sid = copy.copy(self.sid)
if reset:
grp_align.reset()
return grp_align
else:
raise RuntimeError('No group information is available for bundling.')
else:
raise RuntimeError('The matrix is not finalized.')
#
# Binary Operators
#
def __add__(self, other):
dmat = Sparse3DMatrix.__add__(self, other)
dmat.num_loci, dmat.num_haplotypes, dmat.num_reads = self.shape
dmat.__copy_names(self)
dmat.__copy_group_info(self)
return dmat
def __sub__(self, other):
dmat = Sparse3DMatrix.__sub__(self, other)
dmat.num_loci, dmat.num_haplotypes, dmat.num_reads = self.shape
dmat.__copy_names(self)
dmat.__copy_group_info(self)
return dmat
def __mul__(self, other):
dmat = Sparse3DMatrix.__mul__(self, other)
dmat.num_loci, dmat.num_haplotypes, dmat.num_reads = dmat.shape
if isinstance(other, (np.ndarray, csc_matrix, csr_matrix, coo_matrix, lil_matrix)):
dmat.hname = self.hname
dmat.rname = copy.copy(self.rname)
dmat.rid = copy.copy(self.rid)
dmat.num_groups = 0
else:
dmat.__copy_names(self)
dmat.__copy_group_info(self)
return dmat
#
# Helper functions
#
def sum(self, axis):
if self.finalized:
if axis == self.Axis.LOCUS:
sum_mat = [] # sum along loci
for hid in xrange(self.num_haplotypes):
sum_mat.append(self.data[hid].sum(axis=1).A)
sum_mat = np.hstack(sum_mat)
elif axis == self.Axis.HAPLOTYPE: # sum along haplotypes
sum_mat = self.data[0]
for hid in xrange(1, self.num_haplotypes):
sum_mat = sum_mat + self.data[hid] # Unlike others, this sum_mat is still sparse matrix
elif axis == self.Axis.READ: # sum along reads
sum_mat = []
for hid in xrange(self.num_haplotypes):
if self.count is None:
sum_hap = self.data[hid].sum(axis=0).A
else:
hap_mat = self.data[hid].copy()
hap_mat.data *= self.count[hap_mat.indices]
sum_hap = hap_mat.sum(axis=0).A
sum_mat.append(sum_hap)
sum_mat = np.vstack(sum_mat)
else:
raise RuntimeError('The axis should be 0, 1, or 2.')
return sum_mat
else:
raise RuntimeError('The original matrix must be finalized.')
def normalize_reads(self, axis, grouping_mat=None):
"""
Read-wise normalization
:param axis: The dimension along which we want to normalize values
:param grouping_mat: An incidence matrix that specifies which isoforms are from a same gene
:return: Nothing (as the method performs in-place operations)
:rtype: None
"""
if self.finalized:
if axis == self.Axis.LOCUS: # Locus-wise normalization on each read
normalizer = self.sum(axis=self.Axis.HAPLOTYPE) # Sparse matrix of |reads| x |loci|
normalizer.eliminate_zeros()
for hid in xrange(self.num_haplotypes):
self.data[hid].eliminate_zeros() # Trying to avoid numerical problem (inf or nan)
self.data[hid] = np.divide(self.data[hid], normalizer) # element-wise division
elif axis == self.Axis.HAPLOTYPE: # haplotype-wise normalization on each read
for hid in xrange(self.num_haplotypes):
normalizer = self.data[hid].sum(axis=self.Axis.HAPLOTYPE) # 1-dim Sparse matrix of |reads| x 1
normalizer = normalizer.A.flatten()
self.data[hid].data /= normalizer[self.data[hid].indices]
elif axis == self.Axis.READ: # normalization each read as a whole
sum_mat = self.sum(axis=self.Axis.LOCUS)
normalizer = sum_mat.sum(axis=self.Axis.HAPLOTYPE)
normalizer = normalizer.ravel()
for hid in xrange(self.num_haplotypes):
self.data[hid].data /= normalizer[self.data[hid].indices]
elif axis == self.Axis.GROUP: # group-wise normalization on each read
if grouping_mat is None:
raise RuntimeError('Group information matrix is missing.')
normalizer = self.sum(axis=self.Axis.HAPLOTYPE) * grouping_mat
for hid in xrange(self.num_haplotypes):
self.data[hid].eliminate_zeros() # Trying to avoid numerical problem (inf or nan)
self.data[hid] = np.divide(self.data[hid], normalizer)
elif axis == self.Axis.HAPLOGROUP: # haplotype-wise & group-wise normalization on each read
if grouping_mat is None:
raise RuntimeError('Group information matrix is missing.')
for hid in xrange(self.num_haplotypes): # normalizer is different hap-by-hap
normalizer = self.data[hid] * grouping_mat # Sparse matrix of |reads| x |loci|
self.data[hid].eliminate_zeros() # Trying to avoid numerical problem (inf or nan)
self.data[hid] = np.divide(self.data[hid], normalizer)
else:
raise RuntimeError('The axis should be 0, 1, 2, or 3.')
else:
raise RuntimeError('The original matrix must be finalized.')
def pull_alignments_from(self, reads_to_use, shallow=False):
"""
Pull out alignments of certain reads
:param reads_to_use: numpy array of dtype=bool specifying which reads to use
:param shallow: whether to copy sparse 3D matrix only or not
:return: a new AlignmentPropertyMatrix object that particular reads are
"""
new_alnmat = self.copy(shallow=shallow)
for hid in xrange(self.num_haplotypes):
hdata = new_alnmat.data[hid]
hdata.data *= reads_to_use[hdata.indices]
hdata.eliminate_zeros()
if new_alnmat.count is not None:
if type(new_alnmat.count) == csc_matrix:
new_alnmat.count.data *= reads_to_use[new_alnmat.count.indices]
new_alnmat.count.eliminate_zeros()
elif type(new_alnmat.count) == np.ndarray:
new_alnmat.count[np.logical_not(reads_to_use)] = 0
else:
raise RuntimeError('APM count should be either scipy.sparse.csc_matrix or numpy.ndarray')
return new_alnmat
def get_unique_reads(self, ignore_haplotype=False, shallow=False):
"""
Pull out alignments of uniquely-aligning reads
:param ignore_haplotype: whether to regard allelic multiread as uniquely-aligning read
:param shallow: whether to copy sparse 3D matrix only or not
:return: a new AlignmentPropertyMatrix object that particular reads are
"""
if self.finalized:
if ignore_haplotype:
summat = self.sum(axis=self.Axis.HAPLOTYPE)
nnz_per_read = np.diff(summat.tocsr().indptr)
unique_reads = np.logical_and(nnz_per_read > 0, nnz_per_read < 2)
else: # allelic multireads should be removed
alncnt_per_read = self.sum(axis=self.Axis.LOCUS).sum(axis=self.Axis.HAPLOTYPE)
unique_reads = np.logical_and(alncnt_per_read > 0, alncnt_per_read < 2)
return self.pull_alignments_from(unique_reads, shallow=shallow)
else:
raise RuntimeError('The matrix is not finalized.')
def count_unique_reads(self, ignore_haplotype=False):
if self.finalized:
unique_reads = self.get_unique_reads(ignore_haplotype=ignore_haplotype, shallow=True)
if ignore_haplotype:
numaln_per_read = unique_reads.sum(axis=self.Axis.HAPLOTYPE)
if self.count is None:
numaln_per_read.data = np.ones(numaln_per_read.nnz)
else:
numaln_per_read.data = self.count[numaln_per_read.indices]
return numaln_per_read.sum(axis=0).A.ravel() # An array of size |num_loci|
else:
return unique_reads.sum(axis=self.Axis.READ) # An array of size |num_haplotypes|x|num_loci|
else:
raise RuntimeError('The matrix is not finalized.')
def count_alignments(self):
if self.finalized:
return self.sum(axis=self.Axis.READ)
else:
raise RuntimeError('The matrix is not finalized.')
def report_alignment_counts(self, filename):
alignment_counts = self.count_alignments()
allelic_unique_counts = self.count_unique_reads(ignore_haplotype=False)
locus_unique_counts = self.count_unique_reads(ignore_haplotype=True)
cntdata = np.vstack((alignment_counts, allelic_unique_counts))
cntdata = np.vstack((cntdata, locus_unique_counts))
fhout = open(filename, 'w')
fhout.write("locus\t" + "\t".join(['aln_%s' % h for h in self.hname]) + "\t")
fhout.write("\t".join(['uniq_%s' % h for h in self.hname]) + "\t")
fhout.write("locus_uniq" + "\n")
for locus_id in xrange(self.num_loci):
fhout.write("\t".join([self.lname[locus_id]] + map(str, cntdata[:, locus_id].ravel())) + "\n")
fhout.close()
def combine(self, other, shallow=False):
if self.finalized and other.finalized:
dmat = Sparse3DMatrix.combine(self, other)
dmat.num_loci, dmat.num_haplotypes, dmat.num_reads = dmat.shape
if self.count is not None and other.count is not None:
dmat.count = np.concatenate((self.count, other.count))
if self.lengths is not None:
dmat.lengths = copy.copy(self.lengths)
if not shallow:
dmat.hname = self.hname
dmat.lname = copy.copy(self.lname)
dmat.rname = np.concatenate((self.rname, other.rname))
dmat.lid = copy.copy(self.lid)
dmat.rid = dict(zip(dmat.rname, np.arange(dmat.num_reads)))
dmat.__copy_group_info(self)
return dmat
else:
raise RuntimeError('Both matrices must be finalized.')
def apply_genotypes(self, gt_file):
hid = dict(zip(self.hname, np.arange(self.num_haplotypes)))
gid = dict(zip(self.gname, np.arange(len(self.gname))))
gtmask = np.zeros((self.num_haplotypes, self.num_loci))
with open(gt_file) as fh:
if self.groups is not None:
for curline in dropwhile(is_comment, fh):
item = curline.rstrip().split("\t")
g, gt = item[:2]
hid2set = np.array([hid[c] for c in gt])
tid2set = np.array(self.groups[gid[g]])
I, J = np.meshgrid(hid2set, tid2set, indexing='ij')
gtmask[I, J] = 1.0
else:
for curline in dropwhile(is_comment, fh):
item = curline.rstrip().split("\t")
g, gt = item[:2]
hid2set = np.array([hid[c] for c in gt])
I, J = np.meshgrid(hid2set, gid[g], indexing='ij')
gtmask[I, J] = 1.0
self.multiply(gtmask, axis=2)
for h in xrange(self.num_haplotypes):
self.data[h].eliminate_zeros()
def save(self, h5file, title=None, index_dtype='uint32', data_dtype=float, incidence_only=True, complib='zlib', shallow=False):
Sparse3DMatrix.save(self, h5file=h5file, title=title, index_dtype=index_dtype, data_dtype=data_dtype, incidence_only=incidence_only, complib=complib)
h5fh = tables.open_file(h5file, 'a')
fil = tables.Filters(complevel=1, complib=complib)
if self.lengths is not None:
h5fh.create_carray(h5fh.root, 'lengths', obj=self.lengths, title='Transcript Lengths', filters=fil)
if self.count is not None:
if len(self.count.shape) == 1: # count is a vector
h5fh.create_carray(h5fh.root, 'count', obj=self.count, title='Equivalence Class Counts', filters=fil)
elif len(self.count.shape) == 2: # count is 2-dim matrix
if not isinstance(self.count, csc_matrix):
self.count = csc_matrix(self.count)
self.count.eliminate_zeros()
cgroup = h5fh.create_group(h5fh.root, 'count', 'Sparse matrix components for N matrix')
h5fh.create_carray(cgroup, 'indptr', obj=self.count.indptr.astype(index_dtype), filters=fil)
h5fh.create_carray(cgroup, 'indices', obj=self.count.indices.astype(index_dtype), filters=fil)
h5fh.create_carray(cgroup, 'data', obj=self.count.data.astype(index_dtype), filters=fil)
if not shallow:
h5fh.set_node_attr(h5fh.root, 'hname', self.hname)
h5fh.create_carray(h5fh.root, 'lname', obj=self.lname, title='Locus Names', filters=fil)
if self.rname is not None:
h5fh.create_carray(h5fh.root, 'rname', obj=self.rname, title='Read Names', filters=fil)
if self.sname is not None:
h5fh.create_carray(h5fh.root, 'sname', obj=self.sname, title='Sample Names', filters=fil)
h5fh.flush()
h5fh.close()
def get_read_data(self, rid):
return self.get_cross_section(index=rid, axis=self.Axis.READ)
def print_read(self, rid):
"""
Prints nonzero rows of the read wanted
"""
if self.rname is not None:
print(self.rname[rid])
print('--')
r = self.get_read_data(rid)
aligned_loci = np.unique(r.nonzero()[1])
for locus in aligned_loci:
nzvec = r[:, locus].todense().transpose()[0].A.flatten()
if self.lname is not None:
print(self.lname[locus])
else:
print(locus)
print(nzvec)
#
# For future use
#
def get_reads_aligned_to_locus(self, lid, hid=None):
ridset = set()
if hid is None:
for hid in xrange(self.num_haplotypes):
curset = set(np.nonzero(self.data[hid][:, lid])[0])
ridset = ridset.union(curset)
return sorted(list(ridset))
else:
return sorted(np.nonzero(self.data[hid][:, lid])[0])
if __name__ == "__main__":
pass # TODO: Put a simple usage example here
| 49.255692 | 157 | 0.569529 |
acde5510dce90f909d107e0823eac727c6a9ef6c | 15,570 | py | Python | training/isotropic/train_cell_8to4.py | d-v-b/CNNectome | 2b1f4786282306edf94b231c9fcf64419d8d1e2a | [
"BSD-2-Clause"
] | null | null | null | training/isotropic/train_cell_8to4.py | d-v-b/CNNectome | 2b1f4786282306edf94b231c9fcf64419d8d1e2a | [
"BSD-2-Clause"
] | null | null | null | training/isotropic/train_cell_8to4.py | d-v-b/CNNectome | 2b1f4786282306edf94b231c9fcf64419d8d1e2a | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
from gunpowder import *
from gunpowder.tensorflow import *
from gunpowder.contrib import ZeroOutConstSections, AddDistance
from networks.isotropic.mk_dtu_cell_generic import *
import gpn
import tensorflow as tf
import os
import math
import json
import sys
import logging
import time
import collections
print("syspath", sys.path)
import z5py
from utils.label import *
import numpy as np
def train_until(
max_iteration,
data_sources,
ribo_sources,
input_shape,
output_shape,
dt_scaling_factor,
loss_name,
labels,
net_name,
min_masked_voxels=17561.0,
mask_ds_name="volumes/masks/training",
):
with open("net_io_names.json", "r") as f:
net_io_names = json.load(f)
ArrayKey("RAW")
ArrayKey("ALPHA_MASK")
ArrayKey("GT_LABELS")
ArrayKey("MASK")
ArrayKey("RIBO_GT")
voxel_size_up = Coordinate((2, 2, 2))
voxel_size_input = Coordinate((8, 8, 8))
voxel_size_output = Coordinate((4, 4, 4))
input_size = Coordinate(input_shape) * voxel_size_input
output_size = Coordinate(output_shape) * voxel_size_output
# context = input_size-output_size
keep_thr = float(min_masked_voxels) / np.prod(output_shape)
data_providers = []
inputs = dict()
outputs = dict()
snapshot = dict()
request = BatchRequest()
snapshot_request = BatchRequest()
datasets_ribo = {
ArrayKeys.RAW: None,
ArrayKeys.GT_LABELS: "volumes/labels/all",
ArrayKeys.MASK: mask_ds_name,
ArrayKeys.RIBO_GT: "volumes/labels/ribosomes",
}
# for datasets without ribosome annotations volumes/labels/ribosomes doesn't exist, so use volumes/labels/all
# instead (only one with the right resolution)
datasets_no_ribo = {
ArrayKeys.RAW: None,
ArrayKeys.GT_LABELS: "volumes/labels/all",
ArrayKeys.MASK: mask_ds_name,
ArrayKeys.RIBO_GT: "volumes/labels/all",
}
array_specs = {
ArrayKeys.MASK: ArraySpec(interpolatable=False),
ArrayKeys.RAW: ArraySpec(voxel_size=Coordinate(voxel_size_input)),
}
array_specs_pred = {}
inputs[net_io_names["raw"]] = ArrayKeys.RAW
snapshot[ArrayKeys.RAW] = "volumes/raw"
snapshot[ArrayKeys.GT_LABELS] = "volumes/labels/gt_labels"
request.add(ArrayKeys.GT_LABELS, output_size, voxel_size=voxel_size_up)
request.add(ArrayKeys.MASK, output_size, voxel_size=voxel_size_output)
request.add(ArrayKeys.RIBO_GT, output_size, voxel_size=voxel_size_up)
request.add(ArrayKeys.RAW, input_size, voxel_size=voxel_size_input)
for label in labels:
datasets_no_ribo[label.mask_key] = "volumes/masks/" + label.labelname
datasets_ribo[label.mask_key] = "volumes/masks/" + label.labelname
array_specs[label.mask_key] = ArraySpec(interpolatable=False)
array_specs_pred[label.pred_dist_key] = ArraySpec(
voxel_size=voxel_size_output, interpolatable=True
)
inputs[net_io_names["mask_" + label.labelname]] = label.mask_key
inputs[net_io_names["gt_" + label.labelname]] = label.gt_dist_key
if label.scale_loss or label.scale_key is not None:
inputs[net_io_names["w_" + label.labelname]] = label.scale_key
outputs[net_io_names[label.labelname]] = label.pred_dist_key
snapshot[label.gt_dist_key] = "volumes/labels/gt_dist_" + label.labelname
snapshot[label.pred_dist_key] = "volumes/labels/pred_dist_" + label.labelname
request.add(label.gt_dist_key, output_size, voxel_size=voxel_size_output)
request.add(label.pred_dist_key, output_size, voxel_size=voxel_size_output)
request.add(label.mask_key, output_size, voxel_size=voxel_size_output)
if label.scale_loss:
request.add(label.scale_key, output_size, voxel_size=voxel_size_output)
snapshot_request.add(
label.pred_dist_key, output_size, voxel_size=voxel_size_output
)
if tf.train.latest_checkpoint("."):
trained_until = int(tf.train.latest_checkpoint(".").split("_")[-1])
print("Resuming training from", trained_until)
else:
trained_until = 0
print("Starting fresh training")
for src in data_sources:
for subsample_variant in range(8):
dnr = datasets_no_ribo.copy()
dr = datasets_ribo.copy()
dnr[ArrayKeys.RAW] = "volumes/subsampled/raw{0:}/".format(subsample_variant)
dr[ArrayKeys.RAW] = "volumes/subsampled/raw{0:}/".format(subsample_variant)
if src not in ribo_sources:
n5_source = N5Source(
src.full_path, datasets=dnr, array_specs=array_specs
)
else:
n5_source = N5Source(
src.full_path, datasets=dr, array_specs=array_specs
)
data_providers.append(n5_source)
# create a tuple of data sources, one for each HDF file
data_stream = tuple(
provider + Normalize(ArrayKeys.RAW) + # ensures RAW is in float in [0, 1]
# zero-pad provided RAW and MASK to be able to draw batches close to
# the boundary of the available data
# size more or less irrelevant as followed by Reject Node
# Pad(ArrayKeys.RAW, context) +
RandomLocation()
+ Reject( # chose a random location inside the provided arrays
ArrayKeys.MASK, min_masked=keep_thr
)
for provider in data_providers
)
train_pipeline = (
data_stream
+ RandomProvider(
tuple(np.repeat([ds.labeled_voxels for ds in data_sources], 8))
)
+ gpn.SimpleAugment()
+ gpn.ElasticAugment(
voxel_size_output,
(100, 100, 100),
(10.0, 10.0, 10.0),
(0, math.pi / 2.0),
spatial_dims=3,
subsample=8,
)
+ gpn.IntensityAugment(ArrayKeys.RAW, 0.25, 1.75, -0.5, 0.35)
+ GammaAugment(ArrayKeys.RAW, 0.5, 2.0)
+ IntensityScaleShift(ArrayKeys.RAW, 2, -1)
)
for label in labels:
if label.labelname != "ribosomes":
train_pipeline += AddDistance(
label_array_key=ArrayKeys.GT_LABELS,
distance_array_key=label.gt_dist_key,
normalize="tanh",
normalize_args=dt_scaling_factor,
label_id=label.labelid,
factor=2,
)
else:
train_pipeline += AddDistance(
label_array_key=ArrayKeys.RIBO_GT,
distance_array_key=label.gt_dist_key,
normalize="tanh+",
normalize_args=(dt_scaling_factor, 8),
label_id=label.labelid,
factor=2,
)
for label in labels:
if label.scale_loss:
train_pipeline += BalanceByThreshold(
label.gt_dist_key, label.scale_key, mask=label.mask_key
)
train_pipeline = (
train_pipeline
+ PreCache(cache_size=10, num_workers=20)
+ Train(
net_name,
optimizer=net_io_names["optimizer"],
loss=net_io_names[loss_name],
inputs=inputs,
summary=net_io_names["summary"],
log_dir="log",
outputs=outputs,
gradients={},
log_every=5,
save_every=500,
array_specs=array_specs_pred,
)
+ Snapshot(
snapshot,
every=500,
output_filename="batch_{iteration}.hdf",
output_dir="snapshots/",
additional_request=snapshot_request,
)
+ PrintProfilingStats(every=500)
)
print("Starting training...")
with build(train_pipeline) as b:
for i in range(max_iteration):
start_it = time.time()
b.request_batch(request)
time_it = time.time() - start_it
logging.info("it {0:}: {1:}".format(i + 1, time_it))
print("Training finished")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
data_dir = (
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v020719_505/{0:}.n5"
)
data_sources = list()
data_sources.append(N5Dataset("crop1", 500 * 500 * 100, data_dir=data_dir))
data_sources.append(N5Dataset("crop3", 400 * 400 * 250, data_dir=data_dir))
data_sources.append(N5Dataset("crop4", 300 * 300 * 238, data_dir=data_dir))
data_sources.append(
N5Dataset(
"crop6",
250 * 250 * 250,
special_categories=("ribosomes",),
data_dir=data_dir,
)
)
data_sources.append(
N5Dataset(
"crop7",
300 * 300 * 80,
special_categories=("ribosomes",),
data_dir=data_dir,
)
)
data_sources.append(N5Dataset("crop8", 200 * 200 * 100, data_dir=data_dir))
data_sources.append(N5Dataset("crop9", 100 * 100 * 53, data_dir=data_dir))
data_sources.append(
N5Dataset(
"crop13",
160 * 160 * 110,
special_categories=("ribosomes",),
data_dir=data_dir,
)
)
data_sources.append(N5Dataset("crop14", 150 * 150 * 65, data_dir=data_dir))
data_sources.append(N5Dataset("crop15", 150 * 150 * 64, data_dir=data_dir))
data_sources.append(N5Dataset("crop18", 200 * 200 * 110, data_dir=data_dir))
data_sources.append(N5Dataset("crop19", 150 * 150 * 55, data_dir=data_dir))
data_sources.append(N5Dataset("crop20", 200 * 200 * 85, data_dir=data_dir))
data_sources.append(N5Dataset("crop21", 160 * 160 * 55, data_dir=data_dir))
data_sources.append(N5Dataset("crop22", 170 * 170 * 100, data_dir=data_dir))
ribo_sources = filter_by_category(data_sources, "ribosomes")
input_shape = (196, 196, 196)
# output_shape = (92, 92, 92)
dt_scaling_factor = 50
max_iteration = 500000
loss_name = "loss_total"
labels = list()
labels.append(Label("ecs", 1, data_sources=data_sources, data_dir=data_dir))
labels.append(
Label("plasma_membrane", 2, data_sources=data_sources, data_dir=data_dir)
)
labels.append(
Label("mito", (3, 4, 5), data_sources=data_sources, data_dir=data_dir)
)
labels.append(
Label(
"mito_membrane",
3,
scale_loss=False,
scale_key=labels[-1].scale_key,
data_sources=data_sources,
data_dir=data_dir,
)
)
labels.append(
Label(
"mito_DNA",
5,
scale_loss=False,
scale_key=labels[-2].scale_key,
data_sources=data_sources,
data_dir=data_dir,
)
)
labels.append(Label("golgi", (6, 7), data_sources=data_sources, data_dir=data_dir))
labels.append(
Label("golgi_membrane", 6, data_sources=data_sources, data_dir=data_dir)
)
labels.append(
Label("vesicle", (8, 9), data_sources=data_sources, data_dir=data_dir)
)
labels.append(
Label(
"vesicle_membrane",
8,
scale_loss=False,
scale_key=labels[-1].scale_key,
data_sources=data_sources,
data_dir=data_dir,
)
)
labels.append(Label("MVB", (10, 11), data_sources=data_sources, data_dir=data_dir))
labels.append(
Label(
"MVB_membrane",
10,
scale_loss=False,
scale_key=labels[-1].scale_key,
data_sources=data_sources,
data_dir=data_dir,
)
)
labels.append(
Label("lysosome", (12, 13), data_sources=data_sources, data_dir=data_dir)
)
labels.append(
Label(
"lysosome_membrane",
12,
scale_loss=False,
scale_key=labels[-1].scale_key,
data_sources=data_sources,
data_dir=data_dir,
)
)
labels.append(Label("LD", (14, 15), data_sources=data_sources, data_dir=data_dir))
labels.append(
Label(
"LD_membrane",
14,
scale_loss=False,
scale_key=labels[-1].scale_key,
data_sources=data_sources,
data_dir=data_dir,
)
)
labels.append(
Label(
"er",
(16, 17, 18, 19, 20, 21, 22, 23),
data_sources=data_sources,
data_dir=data_dir,
)
)
labels.append(
Label(
"er_membrane",
(16, 18, 20),
scale_loss=False,
scale_key=labels[-1].scale_key,
data_sources=data_sources,
data_dir=data_dir,
)
)
labels.append(Label("ERES", (18, 19), data_sources=data_sources, data_dir=data_dir))
# labels.append(Label('ERES_membrane', 18, scale_loss=False, scale_key=labels[-1].scale_key,
# data_sources=data_sources, data_dir=data_dir))
labels.append(
Label(
"nucleus",
(20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 36),
data_sources=data_sources,
data_dir=data_dir,
)
)
labels.append(Label("nucleolus", 29, data_sources=data_sources, data_dir=data_dir))
labels.append(
Label(
"NE",
(20, 21, 22, 23),
scale_loss=False,
scale_key=labels[-1].scale_key,
data_sources=data_sources,
data_dir=data_dir,
)
)
# labels.append(Label('NE_membrane', (20, 22, 23), scale_loss=False, scale_key=labels[-1].scale_key,
# data_sources=data_sources, data_dir=data_dir))
labels.append(
Label("nuclear_pore", (22, 23), data_sources=data_sources, data_dir=data_dir)
)
labels.append(
Label("nuclear_pore_out", 22, scale_loss=False, scale_key=labels[-1].scale_key)
)
labels.append(
Label(
"chromatin",
(24, 25, 26, 27, 36),
data_sources=data_sources,
data_dir=data_dir,
)
)
# labels.append(Label('NHChrom', 25, scale_loss=False, scale_key=labels[-1].scale_key))
# labels.append(Label('EChrom', 26, scale_loss=False, scale_key=labels[-2].scale_key))
# labels.append(Label('NEChrom', 27, scale_loss=False, scale_key=labels[-3].scale_key))
labels.append(Label("NHChrom", 25, data_sources=data_sources, data_dir=data_dir))
labels.append(Label("EChrom", 26, data_sources=data_sources, data_dir=data_dir))
labels.append(Label("NEChrom", 27, data_sources=data_sources, data_dir=data_dir))
labels.append(
Label("microtubules", (30, 31), data_sources=data_sources, data_dir=data_dir)
)
labels.append(
Label("centrosome", (31, 32, 33), data_sources=data_sources, data_dir=data_dir)
)
labels.append(Label("distal_app", 32, data_sources=data_sources, data_dir=data_dir))
labels.append(
Label("subdistal_app", 33, data_sources=data_sources, data_dir=data_dir)
)
labels.append(Label("ribosomes", 1, data_sources=ribo_sources, data_dir=data_dir))
make_net(labels, (340, 340, 340), mode="inference")
tf.reset_default_graph()
net_name, output_shape = make_net(
labels, input_shape, mode="train", loss_name=loss_name
)
train_until(
max_iteration,
data_sources,
ribo_sources,
input_shape,
output_shape,
dt_scaling_factor,
loss_name,
labels,
net_name,
)
| 33.701299 | 113 | 0.612331 |
acde56db85e9972d6ded6f5ccdf6d15617158cdd | 1,198 | py | Python | utils/math.py | Tom-stack3/Labeler_demo | 7a14bf70f1ef6fbae20f0677fa1c0871630c65b7 | [
"Apache-2.0"
] | null | null | null | utils/math.py | Tom-stack3/Labeler_demo | 7a14bf70f1ef6fbae20f0677fa1c0871630c65b7 | [
"Apache-2.0"
] | null | null | null | utils/math.py | Tom-stack3/Labeler_demo | 7a14bf70f1ef6fbae20f0677fa1c0871630c65b7 | [
"Apache-2.0"
] | null | null | null | import math
import numpy as np
def calc_angle(a, b, c):
"""
:param a: a[x, y]
:param b: b[x, y]
:param c: c[x, y]
:return: angle between ab and bc
"""
if b in (a, c):
raise ValueError("Undefined angle, two identical points", (a, b, c))
ang = math.degrees(
math.atan2(a[1] - b[1], a[0] - b[0]) - math.atan2(c[1] - b[1], c[0] - b[0]))
return ang + 360 if ang < 0 else ang
def calc_m_and_b(point1, point2):
"""
calculate the slope intercept form of the line from Point 1 to Point 2.
meaning, finding the m and b, in y=mx+b.
:param point1: point 1
:param point2: point 2
:return: m, b
"""
points = [point1, point2]
x_coords, y_coords = zip(*points)
A = np.vstack([x_coords, np.ones(len(x_coords))]).T
return np.linalg.lstsq(A, y_coords, rcond=None)[0]
def y_from_m_b_x(m, b, x):
"""
get y from y=mx+b
:param m: slope (m)
:param b: b
:param x: x
:return: y from y=mx+b
"""
return m * x + b
def x_from_m_b_y(m, b, y):
"""
get x from y=mx+b
:param m: slope (m)
:param b: b
:param y: y
:return: get x from y=mx+b
"""
return (y - b) / m
| 21.781818 | 84 | 0.547579 |
acde5712fc94cfb1d8f989453371558cbcca45e4 | 139 | py | Python | tappayment/constants/error_code.py | Qasem-h/tappayment-python | f9b25471291688cc33102ecd015c4b4e85c6bf25 | [
"MIT"
] | 2 | 2020-12-17T16:16:00.000Z | 2020-12-21T13:32:22.000Z | tappayment/constants/error_code.py | Qasem-h/tappayment-python | f9b25471291688cc33102ecd015c4b4e85c6bf25 | [
"MIT"
] | null | null | null | tappayment/constants/error_code.py | Qasem-h/tappayment-python | f9b25471291688cc33102ecd015c4b4e85c6bf25 | [
"MIT"
] | 1 | 2020-12-17T14:27:58.000Z | 2020-12-17T14:27:58.000Z | class ERROR_CODE(object):
BAD_REQUEST_ERROR = "BAD_REQUEST_ERROR"
GATEWAY_ERROR = "GATEWAY_ERROR"
SERVER_ERROR = "SERVER_ERROR" | 34.75 | 43 | 0.755396 |
acde57433680b2cece3cae06848062405a86c504 | 12,122 | py | Python | libcloud/compute/drivers/rimuhosting.py | llambiel/libcloud | 7123206a9ee32333da7fc92905acb0bb16d37d0a | [
"Apache-2.0"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | libcloud/compute/drivers/rimuhosting.py | llambiel/libcloud | 7123206a9ee32333da7fc92905acb0bb16d37d0a | [
"Apache-2.0"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | libcloud/compute/drivers/rimuhosting.py | llambiel/libcloud | 7123206a9ee32333da7fc92905acb0bb16d37d0a | [
"Apache-2.0"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RimuHosting Driver
"""
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.common.types import InvalidCredsError
from libcloud.compute.types import Provider, NodeState
from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation
from libcloud.compute.base import NodeImage
API_CONTEXT = '/r'
API_HOST = 'rimuhosting.com'
class RimuHostingException(Exception):
"""
Exception class for RimuHosting driver
"""
def __str__(self):
return self.args[0]
def __repr__(self):
return "<RimuHostingException '%s'>" % (self.args[0])
class RimuHostingResponse(JsonResponse):
"""
Response Class for RimuHosting driver
"""
def success(self):
if self.status == 403:
raise InvalidCredsError()
return True
def parse_body(self):
try:
js = super(RimuHostingResponse, self).parse_body()
keys = list(js.keys())
if js[keys[0]]['response_type'] == "ERROR":
raise RimuHostingException(
js[keys[0]]['human_readable_message']
)
return js[keys[0]]
except KeyError:
raise RimuHostingException('Could not parse body: %s'
% (self.body))
class RimuHostingConnection(ConnectionKey):
"""
Connection class for the RimuHosting driver
"""
api_context = API_CONTEXT
host = API_HOST
port = 443
responseCls = RimuHostingResponse
def __init__(self, key, secure=True):
# override __init__ so that we can set secure of False for testing
ConnectionKey.__init__(self, key, secure)
def add_default_headers(self, headers):
# We want JSON back from the server. Could be application/xml
# (but JSON is better).
headers['Accept'] = 'application/json'
# Must encode all data as json, or override this header.
headers['Content-Type'] = 'application/json'
headers['Authorization'] = 'rimuhosting apikey=%s' % (self.key)
return headers
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
# Override this method to prepend the api_context
return ConnectionKey.request(self, self.api_context + action,
params, data, headers, method)
class RimuHostingNodeDriver(NodeDriver):
"""
RimuHosting node driver
"""
type = Provider.RIMUHOSTING
name = 'RimuHosting'
website = 'http://rimuhosting.com/'
connectionCls = RimuHostingConnection
features = {'create_node': ['password']}
def __init__(self, key, host=API_HOST, port=443,
api_context=API_CONTEXT, secure=True):
"""
:param key: API key (required)
:type key: ``str``
:param host: hostname for connection
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:param api_context: Optional API context.
:type api_context: ``str``
:param secure: Weither to use HTTPS or HTTP.
:type secure: ``bool``
:rtype: ``None``
"""
# Pass in some extra vars so that
self.key = key
self.secure = secure
self.connection = self.connectionCls(key, secure)
self.connection.host = host
self.connection.api_context = api_context
self.connection.port = port
self.connection.driver = self
self.connection.connect()
def _order_uri(self, node, resource):
# Returns the order uri with its resourse appended.
return "/orders/%s/%s" % (node.id, resource)
# TODO: Get the node state.
def _to_node(self, order):
n = Node(id=order['slug'],
name=order['domain_name'],
state=NodeState.RUNNING,
public_ips=(
[order['allocated_ips']['primary_ip']]
+ order['allocated_ips']['secondary_ips']),
private_ips=[],
driver=self.connection.driver,
extra={
'order_oid': order['order_oid'],
'monthly_recurring_fee': order.get(
'billing_info').get('monthly_recurring_fee')})
return n
def _to_size(self, plan):
return NodeSize(
id=plan['pricing_plan_code'],
name=plan['pricing_plan_description'],
ram=plan['minimum_memory_mb'],
disk=plan['minimum_disk_gb'],
bandwidth=plan['minimum_data_transfer_allowance_gb'],
price=plan['monthly_recurring_amt']['amt_usd'],
driver=self.connection.driver
)
def _to_image(self, image):
return NodeImage(id=image['distro_code'],
name=image['distro_description'],
driver=self.connection.driver)
def list_sizes(self, location=None):
# Returns a list of sizes (aka plans)
# Get plans. Note this is really just for libcloud.
# We are happy with any size.
if location is None:
location = ''
else:
location = ";dc_location=%s" % (location.id)
res = self.connection.request(
'/pricing-plans;server-type=VPS%s' % (location)).object
return list(map(lambda x: self._to_size(x), res['pricing_plan_infos']))
def list_nodes(self):
# Returns a list of Nodes
# Will only include active ones.
res = self.connection.request('/orders;include_inactive=N').object
return list(map(lambda x: self._to_node(x), res['about_orders']))
def list_images(self, location=None):
# Get all base images.
# TODO: add other image sources. (Such as a backup of a VPS)
# All Images are available for use at all locations
res = self.connection.request('/distributions').object
return list(map(lambda x: self._to_image(x), res['distro_infos']))
def reboot_node(self, node):
# Reboot
# PUT the state of RESTARTING to restart a VPS.
# All data is encoded as JSON
data = {'reboot_request': {'running_state': 'RESTARTING'}}
uri = self._order_uri(node, 'vps/running-state')
self.connection.request(uri, data=json.dumps(data), method='PUT')
# XXX check that the response was actually successful
return True
def destroy_node(self, node):
# Shutdown a VPS.
uri = self._order_uri(node, 'vps')
self.connection.request(uri, method='DELETE')
# XXX check that the response was actually successful
return True
def create_node(self, **kwargs):
"""Creates a RimuHosting instance
@inherits: :class:`NodeDriver.create_node`
:keyword name: Must be a FQDN. e.g example.com.
:type name: ``str``
:keyword ex_billing_oid: If not set,
a billing method is automatically picked.
:type ex_billing_oid: ``str``
:keyword ex_host_server_oid: The host server to set the VPS up on.
:type ex_host_server_oid: ``str``
:keyword ex_vps_order_oid_to_clone: Clone another VPS to use as
the image for the new VPS.
:type ex_vps_order_oid_to_clone: ``str``
:keyword ex_num_ips: Number of IPs to allocate. Defaults to 1.
:type ex_num_ips: ``int``
:keyword ex_extra_ip_reason: Reason for needing the extra IPs.
:type ex_extra_ip_reason: ``str``
:keyword ex_memory_mb: Memory to allocate to the VPS.
:type ex_memory_mb: ``int``
:keyword ex_disk_space_mb: Diskspace to allocate to the VPS.
Defaults to 4096 (4GB).
:type ex_disk_space_mb: ``int``
:keyword ex_disk_space_2_mb: Secondary disk size allocation.
Disabled by default.
:type ex_disk_space_2_mb: ``int``
:keyword ex_control_panel: Control panel to install on the VPS.
:type ex_control_panel: ``str``
"""
# Note we don't do much error checking in this because we
# expect the API to error out if there is a problem.
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
data = {
'instantiation_options': {
'domain_name': name,
'distro': image.id
},
'pricing_plan_code': size.id,
'vps_parameters': {}
}
if 'ex_control_panel' in kwargs:
data['instantiation_options']['control_panel'] = \
kwargs['ex_control_panel']
auth = self._get_and_check_auth(kwargs.get('auth'))
data['instantiation_options']['password'] = auth.password
if 'ex_billing_oid' in kwargs:
# TODO check for valid oid.
data['billing_oid'] = kwargs['ex_billing_oid']
if 'ex_host_server_oid' in kwargs:
data['host_server_oid'] = kwargs['ex_host_server_oid']
if 'ex_vps_order_oid_to_clone' in kwargs:
data['vps_order_oid_to_clone'] = \
kwargs['ex_vps_order_oid_to_clone']
if 'ex_num_ips' in kwargs and int(kwargs['ex_num_ips']) > 1:
if 'ex_extra_ip_reason' not in kwargs:
raise RimuHostingException(
'Need an reason for having an extra IP')
else:
if 'ip_request' not in data:
data['ip_request'] = {}
data['ip_request']['num_ips'] = int(kwargs['ex_num_ips'])
data['ip_request']['extra_ip_reason'] = \
kwargs['ex_extra_ip_reason']
if 'ex_memory_mb' in kwargs:
data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb']
if 'ex_disk_space_mb' in kwargs:
data['vps_parameters']['disk_space_mb'] = \
kwargs['ex_disk_space_mb']
if 'ex_disk_space_2_mb' in kwargs:
data['vps_parameters']['disk_space_2_mb'] =\
kwargs['ex_disk_space_2_mb']
# Don't send empty 'vps_parameters' attribute
if not data['vps_parameters']:
del data['vps_parameters']
res = self.connection.request(
'/orders/new-vps',
method='POST',
data=json.dumps({"new-vps": data})
).object
node = self._to_node(res['about_order'])
node.extra['password'] = \
res['new_order_request']['instantiation_options']['password']
return node
def list_locations(self):
return [
NodeLocation('DCAUCKLAND', "RimuHosting Auckland", 'NZ', self),
NodeLocation('DCDALLAS', "RimuHosting Dallas", 'US', self),
NodeLocation('DCLONDON', "RimuHosting London", 'GB', self),
NodeLocation('DCSYDNEY', "RimuHosting Sydney", 'AU', self),
]
| 35.863905 | 79 | 0.592724 |
acde578c56738916a83e9db38fde9b67076b0d94 | 3,649 | py | Python | agent_admin_sdk/model/metadata_center/stream_metric_states_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | agent_admin_sdk/model/metadata_center/stream_metric_states_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | agent_admin_sdk/model/metadata_center/stream_metric_states_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stream_metric_states.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from agent_admin_sdk.model.metadata_center import stream_metric_schema_pb2 as agent__admin__sdk_dot_model_dot_metadata__center_dot_stream__metric__schema__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='stream_metric_states.proto',
package='metadata_center',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_center'),
serialized_pb=_b('\n\x1astream_metric_states.proto\x12\x0fmetadata_center\x1a@agent_admin_sdk/model/metadata_center/stream_metric_schema.proto\"h\n\x12StreamMetricStates\x12\x0b\n\x03org\x18\x01 \x01(\x05\x12\x0f\n\x07\x63ommand\x18\x02 \x01(\t\x12\x34\n\x07payload\x18\x03 \x03(\x0b\x32#.metadata_center.StreamMetricSchemaBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_centerb\x06proto3')
,
dependencies=[agent__admin__sdk_dot_model_dot_metadata__center_dot_stream__metric__schema__pb2.DESCRIPTOR,])
_STREAMMETRICSTATES = _descriptor.Descriptor(
name='StreamMetricStates',
full_name='metadata_center.StreamMetricStates',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='org', full_name='metadata_center.StreamMetricStates.org', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command', full_name='metadata_center.StreamMetricStates.command', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload', full_name='metadata_center.StreamMetricStates.payload', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=217,
)
_STREAMMETRICSTATES.fields_by_name['payload'].message_type = agent__admin__sdk_dot_model_dot_metadata__center_dot_stream__metric__schema__pb2._STREAMMETRICSCHEMA
DESCRIPTOR.message_types_by_name['StreamMetricStates'] = _STREAMMETRICSTATES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StreamMetricStates = _reflection.GeneratedProtocolMessageType('StreamMetricStates', (_message.Message,), {
'DESCRIPTOR' : _STREAMMETRICSTATES,
'__module__' : 'stream_metric_states_pb2'
# @@protoc_insertion_point(class_scope:metadata_center.StreamMetricStates)
})
_sym_db.RegisterMessage(StreamMetricStates)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 41 | 415 | 0.790354 |
acde594ab67210c39032ffff3af1b4faa71854cc | 18,434 | py | Python | indexformat.py | anthonycurtisadler/book-index-creator | fa33157afc7d8c48917dde4a596f16136830af4b | [
"MIT"
] | 7 | 2019-05-30T22:21:48.000Z | 2021-02-11T12:25:31.000Z | indexformat.py | anthonycurtisadler/book-index-creator | fa33157afc7d8c48917dde4a596f16136830af4b | [
"MIT"
] | 4 | 2020-06-22T22:36:43.000Z | 2020-07-10T13:11:10.000Z | indexformat.py | anthonycurtisadler/ARCADES | 3ca061776ef7c43d0ce29f2759df00738c8f9e82 | [
"MIT"
] | null | null | null | # For displaying the index
from numbertools import format_range as fr, rom_to_int, int_to_roman,\
abbreviate_range,convert_range
import string
SUBHEAD_WORDS = ['and','of','as','vs.','for','a','the','into']
DELETE_CHARS = (string.punctuation+string.whitespace).replace(',','').replace('(','').replace(')','')
normal_letters = 'abcdefghijklmnopqrstuvwxyz'
normal_letters = normal_letters + normal_letters.upper()+string.punctuation+string.whitespace
from indexconfiguration import LEFT_ITALIC, RIGHT_ITALIC
from ALLALPHABETS import extended_latin
class SpecialCharacters:
def __init__ (self,letters=extended_latin):
if letters:
self.letter_dict = self.create_letter_substitution_dict(letters)
def create_letter_substitution_dict (self,letters):
returndict = {}
for x in letters.split('\n'):
if '|' in x:
from_this, to_that = x.split('|')
if to_that[0] in normal_letters:
returndict[from_this]=to_that[0]
return returndict
def convert_letter(self,x):
if x in self.letter_dict:
return self.letter_dict[x]
if not x in normal_letters + '0123456789':
while True:
new_char = input('SPECIAL CHAR '+x+' NOT FOUND; ENTER NEW!')
if new_char in normal_letters + '0123456789':
self.letter_dict[x]=new_char
return new_char
return x
def convert_word (self,x):
return ''.join([self.convert_letter(l) for l in x])
converter = SpecialCharacters()
def format_range(x):
return convert_range(fr(x),join_phrase=',')
def truncate_small(x,words=SUBHEAD_WORDS):
while x:
found = False
for w in words:
if x.startswith(w+' '):
x = x[len(w+' '):]
found = True
if not found:
break
return x
def get_if(x,left=None,right=None):
if left and right:
if left in x and right in x:
return x.split(left)[1].split(right)[0].strip(),\
x.split(left)[0]+x.split(right)[1].strip()
return '',x.strip()
def get_comma (x):
if ',' in x:
return x.split(',')[0].strip(),x.split(',')[1].strip()
return x.strip(),''
def get_right (x,div_char=';;'):
if div_char in x:
return x.split(div_char)[1].strip(), x.split(div_char)[0].strip()
else:
return '',x.strip()
def sort_function (x):
#FOLLOWING CMS 16 'LETER by LETTER'
x = x.replace(LEFT_ITALIC,'').replace(RIGHT_ITALIC,'')
x = converter.convert_word(x)
x = truncate_small(x)
x = x.replace('(',',(')
for c in DELETE_CHARS:
x = x.replace(c,'')
x
x = x.lower()
return tuple(x.split(','))
class Entry:
def __init__ (self,entry):
pages,entry = get_if(entry,'{','}')
self.ref_full_name, entry = get_if(entry,'<','>')
if self.ref_full_name:
self.ref_full_name = self.ref_full_name.split(';;')[0].strip()
self.ref_last_name, self.ref_first_name = get_comma(self.ref_full_name)
else:
self.ref_last_name = ''
self.ref_first_name = ''
self.search_phrase, entry = get_right(entry)
self.see_also, self.head_phrase = get_if(entry,'[',']')
if ';' in entry:
if 'see ' in ''.join(entry.split(';')[1]):
self.see_also = ''.join(entry.split(';')[1:]).split('see ')[1]
self.head_phrase = entry
else:
self.see_also, self.head_phrase = '', entry
self.sub_head, self.main_head = get_right(self.head_phrase,div_char='_')
self.descriptor, self.main_head = get_if(self.main_head,'(',')')
def __str__ (self):
to_return = ''
if self.ref_full_name:
to_return += 'REF_FULL_NAME='+self.ref_full_name+';'
if self.ref_last_name:
to_return += 'REF_LAST_NAME='+self.ref_last_name+';'
if self.ref_first_name:
to_return += 'REF_FIRST_NAME='+self.ref_first_name+';'
if self.search_phrase:
to_return += 'SEARCH_PHRASE='+self.search_phrase+';'
if self.see_also:
to_return += 'SEE_ALSO='+self.see_also+';'
if self.main_head:
to_return += ('MAIN_HEAD='+self.main_head+';').replace(':;',':').replace(',',', ').replace(' ',' ')
if self.sub_head:
to_return += 'SUB_HEAD='+self.sub_head
if self.descriptor:
to_return += 'DESCRIPTOR='+self.descriptor
return to_return
class FormatIndex:
def __init__ (self,index_object=None):
self.index_object = index_object
self.headings = {}
self.names = {} #For keeping track of common last names
self.comma_before_single_quotes = True
self.comma_before_double_quotes = True
self.single_quote = '’'
self.double_quote = '”'
self.cross_references = set()
self.cross_reference_head = set()
self.unmatching_heads = set()
self.unmatching_cross_references = set()
def get_cross_references (self):
for dict_obj in [self.index_object['names'],self.index_object['concepts'],self.index_object['titles']]:
for obj in dict_obj:
name_inf = Entry(obj)
if name_inf.see_also:
self.cross_references.add(name_inf.see_also.split('_')[0])
self.cross_reference_head.add((name_inf.see_also.split('_')[0],name_inf.main_head))
def check_cross_references (self):
letters_only = lambda x:x.replace(' ','').replace(',','')
if not self.cross_references:
self.get_cross_references()
cross_ref_copy = {letters_only(x) for x in self.cross_references}
for dict_obj in [self.index_object['names'],self.index_object['concepts'],self.index_object['titles']]:
for obj in dict_obj:
name_inf = Entry(obj)
if not name_inf.see_also:
cross_ref_copy.discard(letters_only(name_inf.main_head))
print('ALL CROSS REFERENCES ',','.join(self.cross_references))
self.unmatching_cross_references = {x for x in self.cross_references if letters_only(x) in cross_ref_copy}
print('NO CROSS REFERENCES FOUND FOR ',','.join(self.unmatching_cross_references))
self.unmatching_heads = {x[1] for x in self.cross_reference_head if x[0] in self.unmatching_cross_references}
def generate_dictionary (self):
for mode, dict_obj in enumerate([self.index_object['names'],self.index_object['concepts']]):
for name in dict_obj:
name_inf = Entry(name)
if not name_inf.sub_head:
if name_inf.main_head not in self.headings:
self.headings[name_inf.main_head] = {'descriptor':name_inf.descriptor,
'pages':dict_obj[name],
'works':{},
'pages_in_titles':set(),
'subheadings':{},
'pages_in_subheadings':set(),
'type':'HEADNAME'}
if mode == 0:
last_name, first_name = get_comma (name_inf.main_head)
if last_name not in self.names:
self.names[last_name] = set()
self.names[last_name].add(name_inf.main_head)
for name in dict_obj:
name_inf = Entry(name)
if name_inf.sub_head:
if name_inf.main_head in self.headings:
self.headings[name_inf.main_head]['subheadings'][name_inf.sub_head] = {'pages':dict_obj[name]}
self.headings[name_inf.main_head]['pages_in_subheadings'].update(dict_obj[name])
else:
if mode == 0:
self.headings[name_inf.main_head] = {'descriptor':name_inf.descriptor,
'pages':self.index_object['names'][name],
'works':{},
'pages_in_titles':set(),
'subheadings':{},
'pages_in_subheadings':set(),
'type':'HEADNAME'}
self.headings[name_inf.main_head]['subheadings'][name_inf.sub_head] = {'pages':dict_obj[name]}
self.headings[name_inf.main_head]['pages_in_subheadings'] = dict_obj[name]
else:
self.headings[name_inf.main_head] = {'descriptor':name_inf.descriptor,
'pages':self.index_object['concepts'][name],
'works':{},
'pages_in_titles':set(),
'subheadings':{},
'pages_in_subheadings':set(),
'type':'HEADNAME'}
self.headings[name_inf.main_head]['subheadings'][name_inf.sub_head] = {'pages':dict_obj[name]}
self.headings[name_inf.main_head]['pages_in_subheadings'] = dict_obj[name]
for title in self.index_object['titles']:
title_inf = Entry(title)
if not title_inf.ref_full_name:
if title_inf.main_head not in self.headings:
self.headings[title_inf.main_head] = {'descriptor':name_inf.descriptor,
'pages':self.index_object['titles'][title],
'works':{},
'pages_in_titles':set(),
'subheadings':{},
'pages_in_subheadings':set(),
'type':'SOLOTITLE'}
else:
if title_inf.ref_last_name in self.names:
if len(self.names[title_inf.ref_last_name]) == 1:
name = list(self.names[title_inf.ref_last_name])[0]
else:
name = [x for x in self.names[title_inf.ref_last_name] if title_inf.ref_last_name in x and title_inf.ref_first_name in x]
if name:
name = name[0]
else:
print('NAME NOT FOUND')
name = ''
if name and name in self.headings:
self.headings[name]['works'][title_inf.main_head] = {'pages':self.index_object['titles'][title]}
self.headings[name]['pages_in_titles'].update(self.index_object['titles'][title])
def print_dictionary (self,exclude_empty=False,exclude_not_matching=False):
all_heads = sorted(self.headings.keys(),key=lambda x:sort_function(x))
returnlist = []
last_letter = ''
for x in all_heads:
linetext = ''
if last_letter.lower() != x.replace(LEFT_ITALIC,'').replace(RIGHT_ITALIC,'')[0].lower():
returnlist.append('')
last_letter = x.replace(LEFT_ITALIC,'').replace(RIGHT_ITALIC,'')[0]
skip_empty = True
def some_numeric(x):
return 'see ' in x or len(set(y for y in x if y.isnumeric()))>0
def correct (x):
if self.comma_before_single_quotes and self.comma_before_double_quotes:
x = x.replace(self.single_quote+self.double_quote+',',','+self.single_quote+self.double_quote)
if self.comma_before_single_quotes:
x = x.replace(self.single_quote+',',','+self.single_quote)
if self.comma_before_double_quotes:
x = x.replace(self.double_quote+',',','+self.double_quote)
return x
if not exclude_not_matching or x not in self.unmatching_heads:
for mode in [0,1]:
first_skipped = False
if mode == 0 or (mode == 1 and self.headings[x]['works']):
# mode=0 for the MAIN HEADING of CONCEPTS, NAMES
# mode=1 for the WORKS listed under AUTHORS
if mode == 0:
if self.headings[x]['pages']-self.headings[x]['pages_in_titles']-self.headings[x]['pages_in_subheadings']:
linetext += x
if self.headings[x]['descriptor']:
linetext += '('+self.headings[x]['descriptor']+')'
linetext += ', '
linetext += format_range(self.headings[x]['pages']-self.headings[x]['pages_in_titles']-self.headings[x]['pages_in_subheadings']).replace(',',', ')
linetext += '; '
else:
linetext += x
linetext += '; '
if self.headings[x]['subheadings']:
for sub_head in sorted(self.headings[x]['subheadings'],key=lambda x:sort_function(x)):
fr = format_range(self.headings[x]['subheadings'][sub_head]['pages']).replace(',',', ')
if fr:
linetext += sub_head
linetext += ', '
linetext += fr
linetext += '; '
elif not exclude_empty:
linetext += sub_head + ' EMPTY; '
else:
print('NO PAGES FOR '+sub_head)
if linetext.endswith('; '):
linetext = linetext[:-2]
if linetext and some_numeric(linetext):
returnlist.append(correct(linetext))
else:
first_skipped = True
linetext = ''
else:
linetext += x
if self.headings[x]['descriptor']:
linetext += '('+self.headings[x]['descriptor']+')'
linetext += ', works by: '
work_found = False
total_works = 0
for work in sorted(self.headings[x]['works'],key=lambda x:sort_function(x)):
total_works += 1
fr = format_range(self.headings[x]['works'][work]['pages']).replace(',',', ')
if fr:
linetext += work
linetext += ', '
linetext += fr
linetext += '; '
work_found = True
elif not exclude_empty:
linetext += work + 'EMPTY; '
else:
print('NO PAGES FOR '+work)
if linetext.endswith('; '):
linetext = linetext[:-2]
if linetext:
returnlist.append(correct(linetext))
linetext = ''
if first_skipped and not work_found:
print('NO ENTRIES OR WORKS FOUND FOR ',x)
elif not work_found and self.headings[x]['works']:
print('NO WORKS FOUND FOR',x,' in ',total_works,'WORKS')
elif mode == 1:
if first_skipped:
print('SKIPPED ',x)
else:
print('NO CROSS REFERENCE FOR',x)
return '\n'.join(returnlist)
| 41.239374 | 179 | 0.440219 |
acde59b3cc0fc17d8c742c8c4faa70cf5a41213a | 2,292 | bzl | Python | layer/mariadb/version.bzl | y0psolo/YAD | 0f1f9c5140687345dee591667793d6f8ed6e29e5 | [
"Apache-2.0"
] | 1 | 2021-11-05T09:13:57.000Z | 2021-11-05T09:13:57.000Z | layer/mariadb/version.bzl | y0psolo/YAD | 0f1f9c5140687345dee591667793d6f8ed6e29e5 | [
"Apache-2.0"
] | 9 | 2021-12-02T13:25:52.000Z | 2022-01-26T14:24:05.000Z | layer/mariadb/version.bzl | y0psolo/YAD | 0f1f9c5140687345dee591667793d6f8ed6e29e5 | [
"Apache-2.0"
] | null | null | null | load("@mariadb_10_5_focal_amd64//debs:deb_packages.bzl", "mariadb_10_5_focal_amd64")
load("@mariadb_10_5_focal_arm64//debs:deb_packages.bzl", "mariadb_10_5_focal_arm64")
load("@mariadb_10_5_bionic_amd64//debs:deb_packages.bzl", "mariadb_10_5_bionic_amd64")
load("@mariadb_10_5_bionic_arm64//debs:deb_packages.bzl", "mariadb_10_5_bionic_arm64")
load("@mariadb_10_6_focal_amd64//debs:deb_packages.bzl", "mariadb_10_6_focal_amd64")
load("@mariadb_10_6_focal_arm64//debs:deb_packages.bzl", "mariadb_10_6_focal_arm64")
load("@mariadb_10_6_bionic_amd64//debs:deb_packages.bzl", "mariadb_10_6_bionic_amd64")
load("@mariadb_10_6_bionic_arm64//debs:deb_packages.bzl", "mariadb_10_6_bionic_arm64")
load("@mariadb_10_7_focal_amd64//debs:deb_packages.bzl", "mariadb_10_7_focal_amd64")
load("@mariadb_10_7_focal_arm64//debs:deb_packages.bzl", "mariadb_10_7_focal_arm64")
load("@mariadb_10_7_bionic_amd64//debs:deb_packages.bzl", "mariadb_10_7_bionic_amd64")
load("@mariadb_10_7_bionic_arm64//debs:deb_packages.bzl", "mariadb_10_7_bionic_arm64")
def mariadb_package(version, package):
return select({
"//platforms:k8_cpu_focal": [
mariadb_10_5_focal_amd64[package] if (version == "10.5")
else mariadb_10_6_focal_amd64[package] if (version == "10.6")
else mariadb_10_7_focal_amd64[package] if version == "10.7" else "",
],
"//platforms:aarch64_cpu_focal": [
mariadb_10_5_focal_arm64[package] if (version == "10.5")
else mariadb_10_6_focal_arm64[package] if (version == "10.6")
else mariadb_10_7_focal_arm64[package] if version == "10.7" else "",
],
"//platforms:k8_cpu_bionic": [
mariadb_10_5_bionic_amd64[package] if (version == "10.5")
else mariadb_10_6_bionic_amd64[package] if (version == "10.6")
else mariadb_10_7_bionic_amd64[package] if version == "10.7" else "",
],
"//platforms:aarch64_cpu_bionic": [
mariadb_10_5_bionic_arm64[package] if (version == "10.5")
else mariadb_10_6_bionic_arm64[package] if (version == "10.6")
else mariadb_10_7_bionic_arm64[package] if version == "10.7" else "",
]
}) | 63.666667 | 86 | 0.686736 |
acde59f2d7033a542946ec8078b153858ee79de9 | 4,216 | py | Python | domato/webgl/generator.py | BOB-Jour/Domino_Fuzzer | 82afb7b6e9b74235819e6d170bf85bde5d6d4213 | [
"Apache-2.0"
] | 4 | 2021-12-21T23:52:44.000Z | 2021-12-23T19:19:21.000Z | domato/webgl/generator.py | BOB-Jour/Domino_Fuzzer | 82afb7b6e9b74235819e6d170bf85bde5d6d4213 | [
"Apache-2.0"
] | null | null | null | domato/webgl/generator.py | BOB-Jour/Domino_Fuzzer | 82afb7b6e9b74235819e6d170bf85bde5d6d4213 | [
"Apache-2.0"
] | 1 | 2022-01-05T09:09:39.000Z | 2022-01-05T09:09:39.000Z | # Domato - main generator script
# -------------------------------
#
# Written and maintained by Ivan Fratric <ifratric@google.com>
#
# Copyright 2017 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import random
import sys
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(parent_dir)
from grammar import Grammar
_N_MAIN_LINES = 100
_N_EVENTHANDLER_LINES = 1
def generate_function_body(jsgrammar, num_lines):
js = ''
js += jsgrammar._generate_code(num_lines)
return js
def GenerateNewSample(template, jsgrammar):
"""Parses grammar rules from string.
Args:
template: A template string.
htmlgrammar: Grammar for generating HTML code.
cssgrammar: Grammar for generating CSS code.
jsgrammar: Grammar for generating JS code.
Returns:
A string containing sample data.
"""
result = template
handlers = False
while '<glfuzz>' in result:
numlines = _N_MAIN_LINES
if handlers:
numlines = _N_EVENTHANDLER_LINES
else:
handlers = True
result = result.replace(
'<glfuzz>',
generate_function_body(jsgrammar, numlines),
1
)
return result
def generate_samples(grammar_dir, outfiles):
"""Generates a set of samples and writes them to the output files.
Args:
grammar_dir: directory to load grammar files from.
outfiles: A list of output filenames.
"""
f = open(os.path.join(grammar_dir, 'template.html'))
template = f.read()
f.close()
jsgrammar = Grammar()
err = jsgrammar.parse_from_file(os.path.join(grammar_dir, 'webgl.txt'))
if err > 0:
print('There were errors parsing grammar')
return
for outfile in outfiles:
result = GenerateNewSample(template, jsgrammar)
if result is not None:
print('Writing a sample to ' + outfile)
try:
f = open(outfile, 'w')
f.write(result)
f.close()
except IOError:
print('Error writing to output')
def get_option(option_name):
for i in range(len(sys.argv)):
if (sys.argv[i] == option_name) and ((i + 1) < len(sys.argv)):
return sys.argv[i + 1]
elif sys.argv[i].startswith(option_name + '='):
return sys.argv[i][len(option_name) + 1:]
return None
def main():
fuzzer_dir = os.path.dirname(__file__)
multiple_samples = False
for a in sys.argv:
if a.startswith('--output_dir='):
multiple_samples = True
if '--output_dir' in sys.argv:
multiple_samples = True
if multiple_samples:
print('Running on ClusterFuzz')
out_dir = get_option('--output_dir')
nsamples = int(get_option('--no_of_files'))
print('Output directory: ' + out_dir)
print('Number of samples: ' + str(nsamples))
if not os.path.exists(out_dir):
os.mkdir(out_dir)
outfiles = []
for i in range(nsamples):
outfiles.append(os.path.join(out_dir, 'fuzz-' + str(i).zfill(5) + '.html'))
generate_samples(fuzzer_dir, outfiles)
elif len(sys.argv) > 1:
outfile = sys.argv[1]
generate_samples(fuzzer_dir, [outfile])
else:
print('Arguments missing')
print("Usage:")
print("\tpython generator.py <output file>")
print("\tpython generator.py --output_dir <output directory> --no_of_files <number of output files>")
if __name__ == '__main__':
main()
| 28.295302 | 109 | 0.628795 |
acde5b52b017f138df085fa60cbf6122ba571beb | 1,949 | py | Python | klustakwik2/tests/test_compute_cluster_masks.py | kwikteam/klustakwik2 | 415c945fa795f62f6dad4d017ccd59323a719d51 | [
"BSD-3-Clause"
] | 15 | 2015-07-04T05:38:48.000Z | 2021-05-28T14:01:56.000Z | klustakwik2/tests/test_compute_cluster_masks.py | kwikteam/klustakwik2 | 415c945fa795f62f6dad4d017ccd59323a719d51 | [
"BSD-3-Clause"
] | 45 | 2015-05-28T15:50:16.000Z | 2022-02-23T11:43:21.000Z | klustakwik2/tests/test_compute_cluster_masks.py | kwikteam/klustakwik2 | 415c945fa795f62f6dad4d017ccd59323a719d51 | [
"BSD-3-Clause"
] | 17 | 2015-05-29T16:16:35.000Z | 2020-11-16T06:52:23.000Z | from numpy import *
from klustakwik2 import *
from numpy.testing import assert_raises, assert_array_almost_equal, assert_array_equal
from nose import with_setup
from nose.tools import nottest
from numpy.random import randint, rand
from six.moves import range
# we use the version that is used in klustakwik2 rather than separately testing the numba/cython
# versions
from klustakwik2.clustering import accumulate_cluster_mask_sum
from .test_io import generate_simple_test_raw_data
@nottest
def generate_simple_test_kk(**params):
raw_data, fet, fmask, features, correction_terms = generate_simple_test_raw_data()
data = raw_data.to_sparse_data()
kk = KK(data, **params)
kk.orig_fet = fet
kk.orig_fmask = fmask
kk.orig_features = features
kk.orig_correction_terms = correction_terms
clusters = array([2, 3, 3, 4])
kk.initialise_clusters(clusters)
return kk
def test_accumulate_cluster_mask_sum():
kk = generate_simple_test_kk()
fet = kk.orig_fet
fmask = kk.orig_fmask
assert kk.num_clusters_alive==5
num_clusters = kk.num_clusters_alive
num_features = kk.num_features
cluster_mask_sum = zeros((num_clusters, num_features))
cluster_mask_sum[:2, :] = -1 # ensure that clusters 0 and 1 are masked
for cluster in range(2, num_clusters):
accumulate_cluster_mask_sum(kk, cluster_mask_sum[cluster, :], kk.get_spikes_in_cluster(cluster))
# cluster 2 has only point 0 in it, so the cluster_mask sum should be just the corresponding
# fmask line
assert_array_almost_equal(cluster_mask_sum[2, :], fmask[0, :])
# similarly for the others
assert_array_almost_equal(cluster_mask_sum[3, :], fmask[1, :]+fmask[2, :])
assert_array_almost_equal(cluster_mask_sum[4, :], fmask[3, :])
assert (cluster_mask_sum[0, :]==-1).all()
assert (cluster_mask_sum[1, :]==-1).all()
if __name__=='__main__':
test_accumulate_cluster_mask_sum()
| 34.803571 | 104 | 0.74038 |
acde5d2ff14e25d95c5eac4d0cd1d2d51d763532 | 1,524 | py | Python | LossNetwork.py | orhunguley/unsupervised_object_learning | bae764a7ff3fb77f0050617f19c37fa2d44ed3e2 | [
"MIT"
] | null | null | null | LossNetwork.py | orhunguley/unsupervised_object_learning | bae764a7ff3fb77f0050617f19c37fa2d44ed3e2 | [
"MIT"
] | null | null | null | LossNetwork.py | orhunguley/unsupervised_object_learning | bae764a7ff3fb77f0050617f19c37fa2d44ed3e2 | [
"MIT"
] | null | null | null | import argparse
import os
import time
import torch
import numpy as np
from torch.utils.data import DataLoader
import torch.optim
from torch.nn.utils import clip_grad_norm_
from data import TrainStation
from motsynth import MOTSynth, MOTSynthBlackBG
from log_utils import log_summary
from utils import save_ckpt, load_ckpt, print_scalor
from common import *
import parse
import pickle
from collections import namedtuple
# https://discuss.pytorch.org/t/how-to-extract-features-of-an-image-from-a-trained-model/119/3
class LossNetwork(torch.nn.Module):
def __init__(self, vgg_model):
super(LossNetwork, self).__init__()
self.vgg_layers = vgg_model.features
# self.layer_name_mapping = {
# '3': "relu1_2",
# '8': "relu2_2",
# '15': "relu3_3",
# '22': "relu4_3"
# }
self.layer_name_mapping = {
'3': "relu1_2",
'8': "relu2_2"
}
# self.layer_name_mapping = {
# '3': "relu1_2" }
def forward(self, x):
# LossOutput = namedtuple("LossOutput", ["relu1_2", "relu2_2", "relu3_3", "relu4_3"])
LossOutput = namedtuple("LossOutput", ["relu1_2", "relu2_2"])
# LossOutput = namedtuple("LossOutput", ["relu1_2"])
output = {}
for name, module in self.vgg_layers._modules.items():
x = module(x)
if name in self.layer_name_mapping:
output[self.layer_name_mapping[name]] = x
return LossOutput(**output) | 33.866667 | 94 | 0.630577 |
acde5dffd16052698549e471ad735173c20b1f14 | 2,834 | py | Python | tests/matrix_conv2d/test_matrix_conv2d_int16_1x1_stride1_par_och2_concur_och4_axi64.py | RyusukeYamano/nngen | 9ed1f7fb83908794aa94d70287d89545d45fe875 | [
"Apache-2.0"
] | 207 | 2019-11-12T11:42:25.000Z | 2022-03-20T20:32:17.000Z | tests/matrix_conv2d/test_matrix_conv2d_int16_1x1_stride1_par_och2_concur_och4_axi64.py | RyusukeYamano/nngen | 9ed1f7fb83908794aa94d70287d89545d45fe875 | [
"Apache-2.0"
] | 31 | 2019-11-25T07:33:30.000Z | 2022-03-17T12:34:34.000Z | tests/matrix_conv2d/test_matrix_conv2d_int16_1x1_stride1_par_och2_concur_och4_axi64.py | RyusukeYamano/nngen | 9ed1f7fb83908794aa94d70287d89545d45fe875 | [
"Apache-2.0"
] | 29 | 2019-11-07T02:25:48.000Z | 2022-03-12T16:22:57.000Z | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
import nngen as ng
import veriloggen
import matrix_conv2d
act_shape = (1, 7, 7, 15)
weight_shape = (7, 1, 1, 15)
bias_shape = None
scale_shape = None
act_dtype = ng.int16
weight_dtype = ng.int16
bias_dtype = ng.int16
scale_dtype = ng.int16
out_dtype = ng.int16
stride = (1, 1, 1, 1)
rshift_mul = None
rshift_sum = None
rshift_out = None
act_func = None
par_ich = 1
par_och = 2
par_col = 1
par_row = 1
concur_och = 4
stationary = 'filter'
input_ram_size = None
filter_ram_size = None
bias_ram_size = None
scale_ram_size = None
out_ram_size = None
axi_datawidth = 64
def test(request, silent=True):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = matrix_conv2d.run(act_shape, weight_shape,
bias_shape, scale_shape,
act_dtype, weight_dtype,
bias_dtype, scale_dtype,
out_dtype,
stride,
rshift_mul, rshift_sum, rshift_out,
act_func,
par_ich, par_och, par_col, par_row,
concur_och, stationary,
input_ram_size, filter_ram_size,
bias_ram_size, scale_ram_size,
out_ram_size,
axi_datawidth, silent,
filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
if __name__ == '__main__':
rslt = matrix_conv2d.run(act_shape, weight_shape,
bias_shape, scale_shape,
act_dtype, weight_dtype,
bias_dtype, scale_dtype,
out_dtype,
stride,
rshift_mul, rshift_sum, rshift_out,
act_func,
par_ich, par_och, par_col, par_row,
concur_och, stationary,
input_ram_size, filter_ram_size,
bias_ram_size, scale_ram_size,
out_ram_size,
axi_datawidth, silent=False,
filename='tmp.v',
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
print(rslt)
| 31.842697 | 97 | 0.529993 |
acde5e9542d0c3a8a8669ee25a6e85933e7de0c1 | 3,898 | py | Python | debile/master/incoming_dud.py | tcc-unb-fga/debile | 84accc5fe14e9f8d0c1df1c3c32afaab19b0dcbc | [
"MIT"
] | 3 | 2015-11-21T15:28:15.000Z | 2020-02-05T18:51:01.000Z | debile/master/incoming_dud.py | tcc-unb-fga/debile | 84accc5fe14e9f8d0c1df1c3c32afaab19b0dcbc | [
"MIT"
] | null | null | null | debile/master/incoming_dud.py | tcc-unb-fga/debile | 84accc5fe14e9f8d0c1df1c3c32afaab19b0dcbc | [
"MIT"
] | 2 | 2015-07-14T07:14:33.000Z | 2019-09-14T03:34:15.000Z | # Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
from firewoes.lib.hash import idify
from firewoes.lib.uniquify import uniquify
from sqlalchemy.orm.exc import NoResultFound
from debile.master.utils import emit
from debile.master.dud import Dud, DudFileException
from debile.master.filerepo import FileRepo, FilesAlreadyRegistered
from debile.master.orm import Builder, Job
def process_dud(config, session, path):
try:
dud = Dud(path)
except Exception as e:
print('SKIP: Error loading dud file {tag} - ({exception}: {args})'.format(
tag=path,
exception=type(e),
args=e.args))
return
try:
dud.validate()
except Exception as e:
print('SKIP: Invalid dud file {tag} ({exception}: {args})'.format(
tag=path,
exception=type(e),
args=e.args))
return
try:
fingerprint = dud.validate_signature(config['keyrings']['pgp'])
except DudFileException:
return reject_dud(session, dud, "invalid-signature")
try:
builder = session.query(Builder).filter_by(pgp=fingerprint).one()
except NoResultFound:
return reject_dud(session, dud, "invalid-dud-builder")
jid = dud.get("X-Debile-Job", None)
if jid is None:
return reject_dud(session, dud, "missing-dud-job")
job = session.query(Job).get(jid)
if job is None:
return reject_dud(session, dud, "invalid-dud-job")
if dud.get("X-Debile-Failed", None) is None:
return reject_dud(session, dud, "no-failure-notice")
if job.builder != builder:
return reject_dud(session, dud, "invalid-dud-uploader")
accept_dud(config, session, dud, builder)
def reject_dud(session, dud, tag):
session.rollback()
print "REJECT: {source} because {tag}".format(
tag=tag, source=dud['Source'])
emit('reject', 'result', {
"tag": tag,
"source": dud['Source'],
})
for fp in [dud.get_dud_file()] + dud.get_files():
os.unlink(fp)
# Note this in the log.
def accept_dud(config, session, dud, builder):
fire = dud.get_firehose()
failed = True if dud.get('X-Debile-Failed', None) == "Yes" else False
job = session.query(Job).get(dud['X-Debile-Job'])
fire, _ = idify(fire)
fire = uniquify(session.bind, fire)
result = job.new_result(fire, failed)
session.add(result)
try:
repo = FileRepo()
repo.add_dud(result.path, dud, config['filerepo_chmod_mode'])
except FilesAlreadyRegistered:
return reject_dud(session, dud, "dud-files-already-registered")
emit('receive', 'result', result.debilize())
# OK. It's safely in the database and repo. Let's cleanup.
for fp in [dud.get_dud_file()] + dud.get_files():
os.unlink(fp)
| 32.756303 | 82 | 0.677783 |
acde5ed2ee2461c6cd56230551f2a0d68846b724 | 4,989 | py | Python | obfsproxy/pyobfsproxy.py | aallai/pyobfsproxy | 9c88a40f0475a0c510f14d92a10437321ac14da5 | [
"BSD-3-Clause"
] | 1 | 2016-12-17T11:26:46.000Z | 2016-12-17T11:26:46.000Z | obfsproxy/pyobfsproxy.py | aallai/pyobfsproxy | 9c88a40f0475a0c510f14d92a10437321ac14da5 | [
"BSD-3-Clause"
] | null | null | null | obfsproxy/pyobfsproxy.py | aallai/pyobfsproxy | 9c88a40f0475a0c510f14d92a10437321ac14da5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This is the command line interface to py-obfsproxy.
It is designed to be a drop-in replacement for the obfsproxy executable.
Currently, not all of the obfsproxy command line options have been implemented.
"""
import sys
import argparse
import obfsproxy.network.launch_transport as launch_transport
import obfsproxy.transports.transports as transports
import obfsproxy.common.log as logging
import obfsproxy.common.heartbeat as heartbeat
import obfsproxy.managed.server as managed_server
import obfsproxy.managed.client as managed_client
from obfsproxy import __version__
from pyptlib.util import checkClientMode
from twisted.internet import task # for LoopingCall
log = logging.get_obfslogger()
def set_up_cli_parsing():
"""Set up our CLI parser. Register our arguments and options and
query individual transports to register their own external-mode
arguments."""
parser = argparse.ArgumentParser(
description='py-obfsproxy: A pluggable transports proxy written in Python')
subparsers = parser.add_subparsers(title='supported transports', dest='name')
parser.add_argument('--log-file', help='set logfile')
parser.add_argument('--log-min-severity',
choices=['error', 'warning', 'info', 'debug'],
help='set minimum logging severity (default: %(default)s)')
parser.add_argument('--no-log', action='store_true', default=False,
help='disable logging')
parser.add_argument('--no-safe-logging', action='store_true',
default=False,
help='disable safe (scrubbed address) logging')
# Managed mode is a subparser for now because there are no
# optional subparsers: bugs.python.org/issue9253
subparsers.add_parser("managed", help="managed mode")
# Add a subparser for each transport. Also add a
# transport-specific function to later validate the parsed
# arguments.
for transport, transport_class in transports.transports.items():
subparser = subparsers.add_parser(transport, help='%s help' % transport)
transport_class['base'].register_external_mode_cli(subparser)
subparser.set_defaults(validation_function=transport_class['base'].validate_external_mode_cli)
return parser
def do_managed_mode():
"""This function starts obfsproxy's managed-mode functionality."""
if checkClientMode():
log.info('Entering client managed-mode.')
managed_client.do_managed_client()
else:
log.info('Entering server managed-mode.')
managed_server.do_managed_server()
def do_external_mode(args):
"""This function starts obfsproxy's external-mode functionality."""
assert(args)
assert(args.name)
assert(args.name in transports.transports)
from twisted.internet import reactor
launch_transport.launch_transport_listener(args.name, args.listen_addr, args.mode, args.dest, args.ext_cookie_file)
log.info("Launched '%s' listener at '%s:%s' for transport '%s'." % \
(args.mode, log.safe_addr_str(args.listen_addr[0]), args.listen_addr[1], args.name))
reactor.run()
def consider_cli_args(args):
"""Check out parsed CLI arguments and take the appropriate actions."""
if args.log_file:
log.set_log_file(args.log_file)
if args.log_min_severity:
log.set_log_severity(args.log_min_severity)
if args.no_log:
log.disable_logs()
if args.no_safe_logging:
log.set_no_safe_logging()
# validate:
if (args.name == 'managed') and (not args.log_file) and (args.log_min_severity):
log.error("obfsproxy in managed-proxy mode can only log to a file!")
sys.exit(1)
elif (args.name == 'managed') and (not args.log_file):
# managed proxies without a logfile must not log at all.
log.disable_logs()
def pyobfsproxy():
"""Actual pyobfsproxy entry-point."""
parser = set_up_cli_parsing()
args = parser.parse_args()
consider_cli_args(args)
log.warning('Pyobfsproxy (version: %s) starting up.' % (__version__))
log.debug('argv: ' + str(sys.argv))
log.debug('args: ' + str(args))
# Fire up our heartbeat.
l = task.LoopingCall(heartbeat.heartbeat.talk)
l.start(3600.0, now=False) # do heartbeat every hour
# Initiate obfsproxy.
if (args.name == 'managed'):
do_managed_mode()
else:
# Pass parsed arguments to the appropriate transports so that
# they can initialize and setup themselves. Exit if the
# provided arguments were corrupted.
# XXX use exceptions
if (args.validation_function(args) == False):
sys.exit(1)
do_external_mode(args)
def run():
"""Fake entry-point so that we can log unhandled exceptions."""
try:
pyobfsproxy()
except Exception, e:
log.exception(e)
raise
if __name__ == '__main__':
run()
| 34.406897 | 119 | 0.687513 |
acde5ee9059e0299c23ec85ce58d256be8f952b8 | 549 | py | Python | Web/WrestlerNameGenerator/solve.py | davidjmaria/ctf-challenges | 8877dc8355128637886ccd72fefc39cad368258b | [
"Unlicense"
] | 9 | 2019-04-14T01:47:27.000Z | 2020-09-12T19:57:37.000Z | Web/WrestlerNameGenerator/solve.py | davidjmaria/ctf-challenges | 8877dc8355128637886ccd72fefc39cad368258b | [
"Unlicense"
] | null | null | null | Web/WrestlerNameGenerator/solve.py | davidjmaria/ctf-challenges | 8877dc8355128637886ccd72fefc39cad368258b | [
"Unlicense"
] | 4 | 2019-04-15T22:08:33.000Z | 2020-10-13T12:27:24.000Z | #!/usr/bin/env python
import requests
import re
URL = "http://archive.sunshinectf.org:19007"
r = requests.get(URL+"/generate.php?input=PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iSVNPLTg4NTktMSI%2FPgogPCFET0NUWVBFIGZvbyBbIDwhRUxFTUVOVCBmb28gQU5ZID4KICAgPCFFTlRJVFkgeHhlIFNZU1RFTSAiaHR0cDovLzEyNy4wLjAuMS9nZW5lcmF0ZS5waHAiID5dPgogICAgPGlucHV0PgogICAgICAgPGZpcnN0TmFtZT4meHhlOzwvZmlyc3ROYW1lPgogICAgICAgPGxhc3ROYW1lPndldzwvbGFzdE5hbWU%2BCiAgICA8L2lucHV0Pg%3D%3D")
flag = re.findall("sun{.+?}", r.text)
if flag:
print(flag[0])
else:
print("RIP")
| 42.230769 | 364 | 0.848816 |
acde5f279ead9f298779b3e7b8be87439c0cf1aa | 4,089 | py | Python | audio_signal_processing_Rostock/plot_direction.py | chipmuenk/acoustics | c85ac95a10c09d7fa15d63b2bdb24acab89fec60 | [
"MIT"
] | 54 | 2016-01-14T11:47:41.000Z | 2021-10-14T10:15:09.000Z | audio_signal_processing_Rostock/plot_direction.py | chipmuenk/acoustics | c85ac95a10c09d7fa15d63b2bdb24acab89fec60 | [
"MIT"
] | null | null | null | audio_signal_processing_Rostock/plot_direction.py | chipmuenk/acoustics | c85ac95a10c09d7fa15d63b2bdb24acab89fec60 | [
"MIT"
] | 18 | 2016-10-20T13:54:51.000Z | 2021-11-18T18:53:01.000Z | #!/usr/bin/env python3
"""Plot the live microphone signal(s) with matplotlib."""
import argparse
from queue import Queue, Empty
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
parser.add_argument(
'-d', '--device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument(
'-w', '--window', type=float, default=200, metavar='DURATION',
help='visible time slot (default: %(default)s ms)')
parser.add_argument(
'-i', '--interval', type=float, default=30,
help='minimum time between plot updates (default: %(default)s ms)')
parser.add_argument(
'-b', '--blocksize', type=int, help='block size (in samples)')
parser.add_argument(
'-r', '--samplerate', type=float, help='sampling rate of audio device')
parser.add_argument(
'-n', '--downsample', type=int, default=1, metavar='N',
help='display every Nth sample (default: %(default)s)')
parser.add_argument(
'channels', type=int, default=[1,2], nargs='*', metavar='CHANNEL',
help='input channels for estimation of direction (default: the first two)')
parser.add_argument(
'-D', '--distance', type=float, default=0.2, metavar='DISTANCE',
help='distance of the two microphones channels (default: $(default) m)')
args = parser.parse_args()
if any(c < 1 for c in args.channels):
parser.error('argument CHANNEL: must be >= 1')
mapping = [c - 1 for c in args.channels] # Channel numbers start with 1
queue = Queue()
def audio_callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status, flush=True)
# Fancy indexing with mapping creates a (necessary!) copy:
queue.put(indata[::args.downsample, mapping])
def update_plot(frame):
"""This is called by matplotlib for each plot update.
Typically, audio callbacks happen more frequently than plot updates,
therefore the queue tends to contain multiple blocks of audio data.
"""
global newdata
global corr
block = True # The first read from the queue is blocking ...
while True:
try:
data = queue.get(block=block)
except Empty:
break
shift = len(data)
newdata = np.roll(newdata, -shift, axis=0)
newdata[-shift:,:] = data
block=False # ... all further reads are non-blocking
corr = np.correlate(newdata[:,0], newdata[:,1], mode='full')
corr = np.abs(corr)/np.max(np.max(corr))
lines[0].set_ydata(corr)
return lines
try:
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import numpy as np
import sounddevice as sd
if args.list_devices:
print(sd.query_devices())
parser.exit()
if args.samplerate is None:
device_info = sd.query_devices(args.device, 'input')
args.samplerate = device_info['default_samplerate']
length = int(args.window * args.samplerate/ (1000 * args.downsample))
ran = args.distance/343*args.samplerate/args.downsample
newdata = np.zeros((length, len(args.channels)))
corr = np.zeros((2*length-1))
phi = np.linspace(0, 180, 6);
phi = np.append(phi, 90)
xtics = ran*np.cos(phi*np.pi/180) +length-1
xlabels = phi
fig, ax = plt.subplots()
lines = ax.plot(corr)
ax.axis((-ran+length-1, ran+length-1, 0, 1))
ax.set_xticks(xtics)
ax.set_xticklabels(xlabels)
ax.yaxis.grid(True)
fig.tight_layout(pad=0)
stream = sd.InputStream(
device=args.device, channels=max(args.channels),
samplerate=args.samplerate, callback=audio_callback)
ani = FuncAnimation(fig, update_plot, interval=args.interval, blit=True)
with stream:
plt.show()
except Exception as e:
parser.exit(type(e).__name__ + ': ' + str(e))
| 32.975806 | 79 | 0.657618 |
acde5fbe6da53d4db6afc072ef36861bbe5c121e | 68 | py | Python | fs/_version.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | fs/_version.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | fs/_version.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | """Version, used in module and setup.py.
"""
__version__ = "2.0.17"
| 17 | 40 | 0.647059 |
acde5ff50ba857ec32f57df8193c81787943cc74 | 4,934 | py | Python | kubernetes/client/models/v1_rolling_update_daemon_set.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | 3 | 2019-05-19T05:05:37.000Z | 2020-03-20T04:56:20.000Z | kubernetes/client/models/v1_rolling_update_daemon_set.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_rolling_update_daemon_set.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1RollingUpdateDaemonSet(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'max_unavailable': 'object'
}
attribute_map = {
'max_unavailable': 'maxUnavailable'
}
def __init__(self, max_unavailable=None):
"""
V1RollingUpdateDaemonSet - a model defined in Swagger
"""
self._max_unavailable = None
self.discriminator = None
if max_unavailable is not None:
self.max_unavailable = max_unavailable
@property
def max_unavailable(self):
"""
Gets the max_unavailable of this V1RollingUpdateDaemonSet.
The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.
:return: The max_unavailable of this V1RollingUpdateDaemonSet.
:rtype: object
"""
return self._max_unavailable
@max_unavailable.setter
def max_unavailable(self, max_unavailable):
"""
Sets the max_unavailable of this V1RollingUpdateDaemonSet.
The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.
:param max_unavailable: The max_unavailable of this V1RollingUpdateDaemonSet.
:type: object
"""
self._max_unavailable = max_unavailable
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1RollingUpdateDaemonSet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 38.850394 | 838 | 0.641873 |
acde6078188fb23f820b5db76c529b384f587631 | 3,527 | py | Python | examples/example4.py | foamliu/Neural_Renderer | ab6e4b2adb4dad77b8b122d89a74ed4ec78f6e75 | [
"MIT"
] | null | null | null | examples/example4.py | foamliu/Neural_Renderer | ab6e4b2adb4dad77b8b122d89a74ed4ec78f6e75 | [
"MIT"
] | null | null | null | examples/example4.py | foamliu/Neural_Renderer | ab6e4b2adb4dad77b8b122d89a74ed4ec78f6e75 | [
"MIT"
] | null | null | null | """
Example 4. Finding camera parameters.
"""
import argparse
import glob
import os
import imageio
import numpy as np
import torch
import torch.nn as nn
import tqdm
from skimage.io import imread, imsave
import neural_renderer as nr
current_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(current_dir, 'data')
class Model(nn.Module):
def __init__(self, filename_obj, filename_ref=None):
super(Model, self).__init__()
# load .obj
vertices, faces = nr.load_obj(filename_obj)
self.register_buffer('vertices', vertices[None, :, :])
self.register_buffer('faces', faces[None, :, :])
# create textures
texture_size = 2
textures = torch.ones(1, self.faces.shape[1], texture_size, texture_size, texture_size, 3, dtype=torch.float32)
self.register_buffer('textures', textures)
# load reference image
image_ref = torch.from_numpy((imread(filename_ref).max(-1) != 0).astype(np.float32))
self.register_buffer('image_ref', image_ref)
# camera parameters
self.camera_position = nn.Parameter(torch.from_numpy(np.array([6, 10, -14], dtype=np.float32)))
# setup renderer
renderer = nr.Renderer(camera_mode='look_at')
renderer.eye = self.camera_position
self.renderer = renderer
def forward(self):
image = self.renderer(self.vertices, self.faces, mode='silhouettes')
loss = torch.sum((image - self.image_ref[None, :, :]) ** 2)
return loss
def make_gif(filename):
with imageio.get_writer(filename, mode='I') as writer:
for filename in sorted(glob.glob('/tmp/_tmp_*.png')):
writer.append_data(imread(filename))
os.remove(filename)
writer.close()
def make_reference_image(filename_ref, filename_obj):
model = Model(filename_obj)
model.cuda()
model.renderer.eye = nr.get_points_from_angles(2.732, 30, -15)
images, _, _ = model.renderer.render(model.vertices, model.faces, torch.tanh(model.textures))
image = images.detach().cpu().numpy()[0]
imsave(filename_ref, image)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-io', '--filename_obj', type=str, default=os.path.join(data_dir, 'teapot.obj'))
parser.add_argument('-ir', '--filename_ref', type=str, default=os.path.join(data_dir, 'example4_ref.png'))
parser.add_argument('-or', '--filename_output', type=str, default=os.path.join(data_dir, 'example4_result.gif'))
parser.add_argument('-mr', '--make_reference_image', type=int, default=0)
parser.add_argument('-g', '--gpu', type=int, default=0)
args = parser.parse_args()
if args.make_reference_image:
make_reference_image(args.filename_ref, args.filename_obj)
model = Model(args.filename_obj, args.filename_ref)
model.cuda()
# optimizer = chainer.optimizers.Adam(alpha=0.1)
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
loop = tqdm.tqdm(range(1000))
for i in loop:
optimizer.zero_grad()
loss = model()
loss.backward()
optimizer.step()
images, _, _ = model.renderer(model.vertices, model.faces, torch.tanh(model.textures))
image = images.detach().cpu().numpy()[0].transpose(1, 2, 0)
imsave('/tmp/_tmp_%04d.png' % i, image)
loop.set_description('Optimizing (loss %.4f)' % loss.data)
if loss.item() < 70:
break
make_gif(args.filename_output)
if __name__ == '__main__':
main()
| 33.913462 | 119 | 0.664304 |
acde608803a55ef8fd482239280409cc977a85cb | 19,558 | py | Python | plugins/modules/zhmc_cpc.py | vmorris/zhmc-ansible-modules | 66a956312706481dbb6a2cc4290ad74bdfc41e8c | [
"Apache-2.0"
] | null | null | null | plugins/modules/zhmc_cpc.py | vmorris/zhmc-ansible-modules | 66a956312706481dbb6a2cc4290ad74bdfc41e8c | [
"Apache-2.0"
] | null | null | null | plugins/modules/zhmc_cpc.py | vmorris/zhmc-ansible-modules | 66a956312706481dbb6a2cc4290ad74bdfc41e8c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2018-2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_cpc
version_added: "2.9.0"
short_description: Update CPCs
description:
- Gather facts about a CPC (Z system), including its adapters, partitions,
and storage groups.
- Update the properties of a CPC.
author:
- Andreas Maier (@andy-maier)
- Andreas Scheuring (@scheuran)
requirements:
- Access to the WS API of the HMC of the targeted Z system
(see :term:`HMC API`). The targeted Z system can be in any operational
mode (classic, DPM).
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
type: str
required: true
hmc_auth:
description:
- The authentication credentials for the HMC, as a dictionary of
C(userid), C(password).
type: dict
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
type: str
required: true
password:
description:
- The password for authenticating with the HMC.
type: str
required: true
name:
description:
- The name of the target CPC.
type: str
required: true
state:
description:
- "The desired state for the CPC. All states are fully idempotent
within the limits of the properties that can be changed:"
- "* C(set): Ensures that the CPC has the specified properties."
- "* C(facts): Returns the CPC properties including its child resources."
type: str
required: true
choices: ['set', 'facts']
properties:
description:
- "Only for C(state=set): New values for the properties of the CPC.
Properties omitted in this dictionary will remain unchanged.
This parameter will be ignored for C(state=facts)."
- "The parameter is a dictionary. The key of each dictionary item is the
property name as specified in the data model for CPC resources, with
underscores instead of hyphens. The value of each dictionary item is
the property value (in YAML syntax). Integer properties may also be
provided as decimal strings."
- "The possible properties in this dictionary are the properties
defined as writeable in the data model for CPC resources."
type: dict
required: false
default: null
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
type: str
required: false
default: null
_faked_session:
description:
- "An internal parameter used for testing the module."
required: false
type: raw
default: null
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about the CPC
zhmc_cpc:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
name: "{{ my_cpc_name }}"
state: facts
register: cpc1
- name: Ensure the CPC has the desired property values
zhmc_cpc:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
name: "{{ my_cpc_name }}"
state: set
properties:
acceptable_status:
- active
description: "This is CPC {{ my_cpc_name }}"
"""
RETURN = """
changed:
description: Indicates if any change has been made by the module.
For C(state=facts), always will be false.
returned: always
type: bool
msg:
description: An error message that describes the failure.
returned: failure
type: str
cpc:
description: "The CPC and its adapters, partitions, and storage groups."
returned: success
type: dict
contains:
name:
description: "CPC name"
type: str
"{property}":
description: "Additional properties of the CPC, as described in the data
model of the 'CPC' object in the :term:`HMC API` book.
The property names have hyphens (-) as described in that book."
adapters:
description: "The adapters of the CPC, with a subset of their
properties. For details, see the :term:`HMC API` book."
type: list
elements: dict
contains:
name:
description: "Adapter name"
type: str
object-uri:
description: "Canonical URI of the adapter"
type: str
adapter-id:
description: "Adapter ID (PCHID)"
type: str
type:
description: "Adapter type"
type: str
adapter-family:
description: "Adapter family"
type: str
status:
description: "Status of the adapter"
type: str
partitions:
description: "The defined partitions of the CPC, with a subset of their
properties. For details, see the :term:`HMC API` book."
type: list
elements: dict
contains:
name:
description: "Partition name"
type: str
object-uri:
description: "Canonical URI of the partition"
type: str
type:
description: "Type of the partition"
type: str
status:
description: "Status of the partition"
type: str
storage-groups:
description: "The storage groups associated with the CPC, with a subset
of their properties. For details, see the :term:`HMC API` book."
type: list
elements: dict
contains:
name:
description: "Storage group name"
type: str
object-uri:
description: "Canonical URI of the storage group"
type: str
type:
description: "Storage group type"
type: str
fulfillment-status:
description: "Fulfillment status of the storage group"
type: str
cpc-uri:
description: "Canonical URI of the associated CPC"
type: str
sample:
{
"name": "CPCA",
"{property}": "... more properties ... ",
"adapters": [
{
"adapter-family": "ficon",
"adapter-id": "120",
"name": "FCP_120_SAN1_02",
"object-uri": "/api/adapters/dfb2147a-e578-11e8-a87c-00106f239c31",
"status": "active",
"type": "fcp"
},
{
"adapter-family": "osa",
"adapter-id": "10c",
"name": "OSM1",
"object-uri": "/api/adapters/ddde026c-e578-11e8-a87c-00106f239c31",
"status": "active",
"type": "osm"
},
],
"partitions": [
{
"name": "PART1",
"object-uri": "/api/partitions/c44338de-351b-11e9-9fbb-00106f239d19",
"status": "stopped",
"type": "linux"
},
{
"name": "PART2",
"object-uri": "/api/partitions/6a46d18a-cf79-11e9-b447-00106f239d19",
"status": "active",
"type": "ssc"
},
],
"storage-groups": [
{
"cpc-uri": "/api/cpcs/66942455-4a14-3f99-8904-3e7ed5ca28d7",
"fulfillment-state": "complete",
"name": "CPCA_SG_PART1",
"object-uri": "/api/storage-groups/58e41a42-20a6-11e9-8dfc-00106f239c31",
"type": "fcp"
},
{
"cpc-uri": "/api/cpcs/66942455-4a14-3f99-8904-3e7ed5ca28d7",
"fulfillment-state": "complete",
"name": "CPCA_SG_PART2",
"object-uri": "/api/storage-groups/4947c6d0-f433-11ea-8f73-00106f239d19",
"type": "fcp"
},
],
}
"""
import logging # noqa: E402
import traceback # noqa: E402
from ansible.module_utils.basic import AnsibleModule # noqa: E402
from ..module_utils.common import log_init, Error, ParameterError, \
get_hmc_auth, get_session, to_unicode, process_normal_property, \
missing_required_lib # noqa: E402
try:
import requests.packages.urllib3
IMP_URLLIB3 = True
except ImportError:
IMP_URLLIB3 = False
IMP_URLLIB3_ERR = traceback.format_exc()
try:
import zhmcclient
IMP_ZHMCCLIENT = True
except ImportError:
IMP_ZHMCCLIENT = False
IMP_ZHMCCLIENT_ERR = traceback.format_exc()
# Python logger name for this module
LOGGER_NAME = 'zhmc_cpc'
LOGGER = logging.getLogger(LOGGER_NAME)
# Dictionary of properties of CPC resources, in this format:
# name: (allowed, create, update, eq_func, type_cast)
# where:
# name: Name of the property according to the data model, with hyphens
# replaced by underscores (this is how it is or would be specified in
# the 'properties' module parameter).
# allowed: Indicates whether it is allowed in the 'properties' module
# parameter.
# create: Not applicable for CPCs.
# update: Indicates whether it can be specified for the "Modify CPC
# Properties" operation (at all).
# update_while_active: Indicates whether it can be specified for the "Modify
# CPC Properties" operation while the CPC is active. None means
# "not applicable" (used for update=False).
# eq_func: Equality test function for two values of the property; None means
# to use Python equality.
# type_cast: Type cast function for an input value of the property; None
# means to use it directly. This can be used for example to convert
# integers provided as strings by Ansible back into integers (that is a
# current deficiency of Ansible).
ZHMC_CPC_PROPERTIES = {
# update properties for any mode:
'description': (True, None, True, True, None, to_unicode),
'acceptable_status': (True, None, True, True, None, None),
# update properties for classic mode:
'next_activation_profile_name': (True, None, True, True, None, to_unicode),
'processor_running_time_type': (True, None, True, True, None, to_unicode),
'processor_running_time': (True, None, True, True, None, int),
# Following property is read-only on z14 and higher:
'does_wait_state_end_time_slice': (True, None, True, True, None, None),
# read-only properties (subset):
'name': (False, None, False, None, None, None), # provided in 'name' parm
'object_uri': (False, None, False, None, None, None),
'object_id': (False, None, False, None, None, None),
'parent': (False, None, False, None, None, None),
'class': (False, None, False, None, None, None),
# The properties not specified here default to allow=False.
}
def process_properties(cpc, params):
"""
Process the properties specified in the 'properties' module parameter,
and return a dictionary (update_props) that contains the properties that
can be updated. The input property values are compared with the existing
resource property values and the returned set of properties is the minimal
set of properties that need to be changed.
- Underscores in the property names are translated into hyphens.
- The presence of properties that cannot be updated is surfaced by raising
ParameterError.
Parameters:
cpc (zhmcclient.Cpc): CPC to be updated.
params (dict): Module input parameters.
Returns:
update_props: dict of properties for zhmcclient.Cpc.update_properties()
Raises:
ParameterError: An issue with the module parameters.
"""
input_props = params.get('properties', None)
if input_props is None:
input_props = {}
update_props = {}
for prop_name in input_props:
try:
allowed, create, update, update_active, eq_func, type_cast = \
ZHMC_CPC_PROPERTIES[prop_name]
except KeyError:
allowed = False
if not allowed:
raise ParameterError(
"CPC property {0!r} specified in the 'properties' module "
"parameter cannot be updated.".format(prop_name))
# Process a normal (= non-artificial) property
_create_props, _update_props, _stop = process_normal_property(
prop_name, ZHMC_CPC_PROPERTIES, input_props, cpc)
update_props.update(_update_props)
if _create_props:
raise AssertionError()
if _stop:
raise AssertionError()
return update_props
def add_artificial_properties(cpc):
"""
Add artificial properties to the CPC object.
Upon return, the properties of the cpc object have been
extended by these artificial properties:
* 'partitions': List of partitions of the CPC, with the list subset of
their properties.
* 'adapters': List of adapters of the CPC, with the list subset of their
properties.
* 'storage-groups': List of storage groups attached to the partition, with
the list subset of their properties.
"""
partitions = cpc.partitions.list()
cpc.properties['partitions'] = [p.properties for p in partitions]
adapters = cpc.adapters.list()
cpc.properties['adapters'] = [a.properties for a in adapters]
storage_groups = cpc.manager.console.storage_groups.list(
filter_args={'cpc-uri': cpc.uri})
cpc.properties['storage-groups'] = [sg.properties
for sg in storage_groups]
def ensure_set(params, check_mode):
"""
Identify the target CPC and ensure that the specified properties are set on
the target CPC.
Raises:
ParameterError: An issue with the module parameters.
Error: Other errors during processing.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
# Note: Defaults specified in argument_spec will be set in params dict
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['name']
_faked_session = params.get('_faked_session', None) # No default specified
changed = False
try:
session = get_session(_faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
cpc.pull_full_properties()
update_props = process_properties(cpc, params)
if update_props:
if not check_mode:
cpc.update_properties(update_props)
# Some updates of CPC properties are not reflected in a new
# retrieval of properties until after a few seconds (usually the
# second retrieval).
# Therefore, we construct the modified result based upon the input
# changes, and not based upon newly retrieved properties.
cpc.properties.update(update_props)
changed = True
add_artificial_properties(cpc)
result = cpc.properties
return changed, result
finally:
session.logoff()
def facts(params, check_mode):
"""
Identify the target CPC and return facts about the target CPC and its
child resources.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['name']
_faked_session = params.get('_faked_session', None) # No default specified
try:
session = get_session(_faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
cpc.pull_full_properties()
add_artificial_properties(cpc)
result = cpc.properties
return False, result
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"set": ensure_set,
"facts": facts,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
name=dict(required=True, type='str'),
state=dict(required=True, type='str', choices=['set', 'facts']),
properties=dict(required=False, type='dict', default={}),
log_file=dict(required=False, type='str', default=None),
_faked_session=dict(required=False, type='raw'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
if not IMP_URLLIB3:
module.fail_json(msg=missing_required_lib("requests"),
exception=IMP_URLLIB3_ERR)
requests.packages.urllib3.disable_warnings()
if not IMP_ZHMCCLIENT:
module.fail_json(msg=missing_required_lib("zhmcclient"),
exception=IMP_ZHMCCLIENT_ERR)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: %r", _params)
try:
changed, result = perform_task(module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{0}: {1}".format(exc.__class__.__name__, exc)
LOGGER.debug("Module exit (failure): msg: %r", msg)
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug("Module exit (success): changed: %s, cpc: %r",
changed, result)
module.exit_json(
changed=changed, cpc=result)
if __name__ == '__main__':
main()
| 33.205433 | 89 | 0.639738 |
acde639244969a29a1511f019a567261f170dd37 | 883 | py | Python | setup.py | antacua/openexchangerates | 11348c753520496ee076b18b2ca105dd9851ede5 | [
"MIT"
] | 10 | 2015-07-12T17:08:56.000Z | 2021-08-02T05:36:17.000Z | setup.py | antacua/openexchangerates | 11348c753520496ee076b18b2ca105dd9851ede5 | [
"MIT"
] | 6 | 2015-09-07T12:35:59.000Z | 2021-08-03T02:45:30.000Z | setup.py | antacua/openexchangerates | 11348c753520496ee076b18b2ca105dd9851ede5 | [
"MIT"
] | 29 | 2015-01-06T01:33:37.000Z | 2022-01-19T07:30:17.000Z | from setuptools import setup
setup(
name='openexchangerates',
version='0.1.1',
description='openexchangerates.org python API client',
long_description=open('README.rst').read(),
url='https://github.com/metglobal/openexchangerates',
license='MIT',
author='Metglobal',
author_email='kadir.pekel@metglobal.com',
packages=['openexchangerates'],
install_requires=[
'requests',
],
tests_require=[
'httpretty',
'mock'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
],
)
| 28.483871 | 58 | 0.609287 |
acde6505e62b433f18ed85df9565c6999cc94343 | 15,454 | py | Python | src/logic/azext_logic/vendored_sdks/logic/operations/_integration_account_batch_configuration_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/logic/azext_logic/vendored_sdks/logic/operations/_integration_account_batch_configuration_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/logic/azext_logic/vendored_sdks/logic/operations/_integration_account_batch_configuration_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class IntegrationAccountBatchConfigurationOperations(object):
"""IntegrationAccountBatchConfigurationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~logic_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
integration_account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.BatchConfigurationCollection"
"""List the batch configurations for an integration account.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param integration_account_name: The integration account name.
:type integration_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchConfigurationCollection or the result of cls(response)
:rtype: ~logic_management_client.models.BatchConfigurationCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BatchConfigurationCollection"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-05-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'integrationAccountName': self._serialize.url("integration_account_name", integration_account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
else:
url = next_link
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('BatchConfigurationCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/batchConfigurations'}
def get(
self,
resource_group_name, # type: str
integration_account_name, # type: str
batch_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.BatchConfiguration"
"""Get a batch configuration for an integration account.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param integration_account_name: The integration account name.
:type integration_account_name: str
:param batch_configuration_name: The batch configuration name.
:type batch_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchConfiguration or the result of cls(response)
:rtype: ~logic_management_client.models.BatchConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BatchConfiguration"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-05-01"
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'integrationAccountName': self._serialize.url("integration_account_name", integration_account_name, 'str'),
'batchConfigurationName': self._serialize.url("batch_configuration_name", batch_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BatchConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/batchConfigurations/{batchConfigurationName}'}
def create_or_update(
self,
resource_group_name, # type: str
integration_account_name, # type: str
batch_configuration_name, # type: str
properties, # type: "models.BatchConfigurationProperties"
location=None, # type: Optional[str]
tags=None, # type: Optional[Dict[str, str]]
**kwargs # type: Any
):
# type: (...) -> "models.BatchConfiguration"
"""Create or update a batch configuration for an integration account.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param integration_account_name: The integration account name.
:type integration_account_name: str
:param batch_configuration_name: The batch configuration name.
:type batch_configuration_name: str
:param properties: The batch configuration properties.
:type properties: ~logic_management_client.models.BatchConfigurationProperties
:param location: The resource location.
:type location: str
:param tags: The resource tags.
:type tags: dict[str, str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchConfiguration or the result of cls(response)
:rtype: ~logic_management_client.models.BatchConfiguration or ~logic_management_client.models.BatchConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BatchConfiguration"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
_batch_configuration = models.BatchConfiguration(location=location, tags=tags, properties=properties)
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'integrationAccountName': self._serialize.url("integration_account_name", integration_account_name, 'str'),
'batchConfigurationName': self._serialize.url("batch_configuration_name", batch_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_batch_configuration, 'BatchConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BatchConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BatchConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/batchConfigurations/{batchConfigurationName}'}
def delete(
self,
resource_group_name, # type: str
integration_account_name, # type: str
batch_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete a batch configuration for an integration account.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param integration_account_name: The integration account name.
:type integration_account_name: str
:param batch_configuration_name: The batch configuration name.
:type batch_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-05-01"
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'integrationAccountName': self._serialize.url("integration_account_name", integration_account_name, 'str'),
'batchConfigurationName': self._serialize.url("batch_configuration_name", batch_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/batchConfigurations/{batchConfigurationName}'}
| 49.060317 | 225 | 0.679953 |
acde6656263190541c8c4a8dceab5a383c23725e | 6,764 | py | Python | graph_objs/treemap/marker/colorbar/_title.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | graph_objs/treemap/marker/colorbar/_title.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | graph_objs/treemap/marker/colorbar/_title.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap.marker.colorbar"
_path_str = "treemap.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`new_plotly.graph_objs.treemap.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
new_plotly.graph_objs.treemap.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`new_plotly.graph_objs.treemap.marker
.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.treemap.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.treemap.marker.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 32.995122 | 92 | 0.540656 |
acde666580fdd247fe799f66f09ca5d9c3d0365c | 5,057 | py | Python | wellcomeml/ml/spacy_entity_linking.py | wellcometrust/WellcomeML | f7f5427f6dfdc6e5ee1342764263c6411e0f9bdf | [
"MIT"
] | 29 | 2020-01-31T17:05:38.000Z | 2021-12-14T14:17:55.000Z | wellcomeml/ml/spacy_entity_linking.py | wellcometrust/WellcomeML | f7f5427f6dfdc6e5ee1342764263c6411e0f9bdf | [
"MIT"
] | 342 | 2020-02-05T10:40:43.000Z | 2022-03-17T19:50:23.000Z | wellcomeml/ml/spacy_entity_linking.py | wellcometrust/WellcomeML | f7f5427f6dfdc6e5ee1342764263c6411e0f9bdf | [
"MIT"
] | 9 | 2020-06-07T17:01:00.000Z | 2021-11-24T16:03:38.000Z | """
TODO: Fill this
"""
from pathlib import Path
import random
from wellcomeml.utils import throw_extra_import_message
try:
from spacy.training import Example
from spacy.util import minibatch, compounding
import spacy
from spacy.kb import KnowledgeBase
except ImportError as e:
throw_extra_import_message(error=e, required_module='spacy', extra='spacy')
class SpacyEntityLinker(object):
def __init__(self, kb_path, n_iter=50, print_output=True):
self.kb_path = kb_path
self.n_iter = n_iter
self.print_output = print_output
def _format_examples(self, data):
# Remove examples with unknown identifiers to the knowledge base
# Convert text to spacy.tokens.doc.Doc format
# Return list of Examples objects
kb_ids = self.nlp.get_pipe("entity_linker").kb.get_entity_strings()
examples = []
for text, annotation in data:
with self.nlp.select_pipes(disable="entity_linker"):
doc = self.nlp(text)
annotation_clean = annotation
for offset, kb_id_dict in annotation["links"].items():
new_dict = {}
for kb_id, value in kb_id_dict.items():
if kb_id in kb_ids:
new_dict[kb_id] = value
else:
print(
"Removed",
kb_id,
"from training because it is not in the KB.",
)
annotation_clean["links"][offset] = new_dict
example = Example.from_dict(doc, annotation_clean)
examples.append(example)
return examples
def train(self, data):
"""
Args:
data: list of training data in the form::
[('A sentence about Farrar',
{'links': {(17, 22): {'Q1': 1.0, 'Q2': 0.0}}})]
See https://spacy.io/usage/linguistic-features#entity-linking
for where I got this code from
"""
n_iter = self.n_iter
vocab_folder = self.kb_path + "/vocab"
kb_folder = self.kb_path + "/kb"
self.nlp = spacy.load("en_core_web_sm")
self.nlp.vocab.from_disk(vocab_folder)
self.nlp.add_pipe("sentencizer", before="parser")
def create_kb(vocab):
entity_vector_length = 300
kb = KnowledgeBase(vocab=vocab, entity_vector_length=entity_vector_length)
kb.from_disk(kb_folder)
return kb
entity_linker = self.nlp.add_pipe("entity_linker")
entity_linker.set_kb(create_kb)
examples = self._format_examples(data)
optimizer = entity_linker.initialize(
lambda: iter(examples), nlp=self.nlp, kb_loader=create_kb
)
with self.nlp.select_pipes(enable=[]):
for itn in range(n_iter):
random.shuffle(examples)
losses = {}
batches = minibatch(examples, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
self.nlp.update(
batch,
drop=0.2,
losses=losses,
sgd=optimizer,
)
if self.print_output:
print(itn, "Losses", losses)
return self.nlp
def _get_token_nums(self, doc, char_idx):
"""
Convert a character index to a token index
i.e. what number token is character number char_idx in ?
"""
for i, token in enumerate(doc):
if char_idx > token.idx:
continue
if char_idx == token.idx:
return i
if char_idx < token.idx:
return i
def predict(self, data):
"""
See how well the model predicts which entity you are referring to in your data
Args:
data: list of test data in the form::
[('A sentence about Farrar',
{'links': {(17, 22): {'Q1': 1.0, 'Q2': 0.0}}})]
Returns:
list: pred_entities_ids: [['Q1'], ['Q1'], ['Q2']
"""
pred_entities_ids = []
for text, annotation in data:
doc = self.nlp(text)
names = [text[s:e] for s, e in annotation["links"].keys()]
doc_entities_ids = []
for ent in doc.ents:
if (ent.label_ == "PERSON") and (ent.text in names):
doc_entities_ids.append(ent.kb_id_)
pred_entities_ids.append(doc_entities_ids)
return pred_entities_ids
def save(self, output_dir):
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
self.nlp.to_disk(output_dir)
print("Saved model to", output_dir)
def load(self, output_dir):
print("Loading from", output_dir)
self.nlp = spacy.load(output_dir)
return self.nlp
| 33.713333 | 86 | 0.545778 |
acde66e7dc975d52cca19ddb6d4f3d2cb62330b9 | 4,013 | py | Python | packages/syft/src/syft/lib/python/range.py | vishalbelsare/PySyft | fb04404fcfbef82fad1fb47407b35a24e9afb599 | [
"Apache-1.1"
] | 8,428 | 2017-08-10T09:17:49.000Z | 2022-03-31T08:20:14.000Z | packages/syft/src/syft/lib/python/range.py | vishalbelsare/PySyft | fb04404fcfbef82fad1fb47407b35a24e9afb599 | [
"Apache-1.1"
] | 4,779 | 2017-08-09T23:19:00.000Z | 2022-03-29T11:49:36.000Z | packages/syft/src/syft/lib/python/range.py | vishalbelsare/PySyft | fb04404fcfbef82fad1fb47407b35a24e9afb599 | [
"Apache-1.1"
] | 2,307 | 2017-08-10T08:52:12.000Z | 2022-03-30T05:36:07.000Z | # stdlib
from typing import Any
from typing import Optional
from typing import Union
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
# syft absolute
import syft as sy
# relative
from ...core.common import UID
from ...core.common.serde.serializable import serializable
from ...proto.lib.python.range_pb2 import Range as Range_PB
from .iterator import Iterator
from .primitive_factory import PrimitiveFactory
from .primitive_interface import PyPrimitive
from .types import SyPrimitiveRet
@serializable()
class Range(PyPrimitive):
__slots__ = ["_id", "_index"]
def __init__(
self,
start: Any = None,
stop: Union[Any] = None,
step: Union[Any] = 1,
id: Optional[UID] = None,
):
if stop is None:
stop = start
start = 0
self.value = range(start, stop, step)
self._id: UID = id if id else UID()
@property
def id(self) -> UID:
"""We reveal PyPrimitive.id as a property to discourage users and
developers of Syft from modifying .id attributes after an object
has been initialized.
:return: returns the unique id of the object
:rtype: UID
"""
return self._id
def __contains__(self, other: Any) -> SyPrimitiveRet:
res = self.value.__contains__(other)
return PrimitiveFactory.generate_primitive(value=res)
def __eq__(self, other: Any) -> SyPrimitiveRet:
res = self.value.__eq__(other)
return PrimitiveFactory.generate_primitive(value=res)
def __ne__(self, other: Any) -> SyPrimitiveRet:
res = self.value.__ne__(other)
return PrimitiveFactory.generate_primitive(value=res)
def __sizeof__(self) -> SyPrimitiveRet:
res = self.value.__sizeof__()
return PrimitiveFactory.generate_primitive(value=res)
def __bool__(self) -> SyPrimitiveRet:
# res = self.value.__bool__()
# mypy error: "range" has no attribute "__bool__"
# work around:
try:
res = bool(self.value.__len__())
except OverflowError:
res = True
return PrimitiveFactory.generate_primitive(value=res)
def __len__(self) -> Any:
res = self.value.__len__()
return PrimitiveFactory.generate_primitive(value=res)
def __getitem__(self, key: Union[int]) -> Any:
res = self.value.__getitem__(key)
return PrimitiveFactory.generate_primitive(value=res)
def __iter__(self, max_len: Optional[int] = None) -> Iterator:
return Iterator(self.value, max_len=max_len)
@property
def start(self) -> SyPrimitiveRet:
res = self.value.start
return PrimitiveFactory.generate_primitive(value=res)
@property
def step(self) -> SyPrimitiveRet:
res = self.value.step
return PrimitiveFactory.generate_primitive(value=res)
@property
def stop(self) -> SyPrimitiveRet:
res = self.value.stop
return PrimitiveFactory.generate_primitive(value=res)
def index(self, value: int) -> SyPrimitiveRet:
res = self.value.index(value)
return PrimitiveFactory.generate_primitive(value=res)
def count(self, value: int) -> SyPrimitiveRet:
res = self.value.count(value)
return PrimitiveFactory.generate_primitive(value=res)
def upcast(self) -> range:
return self.value
def _object2proto(self) -> Range_PB:
range_pb = Range_PB()
range_pb.start = self.start
range_pb.stop = self.stop
range_pb.step = self.step
range_pb.id.CopyFrom(self._id._object2proto())
return range_pb
@staticmethod
def _proto2object(proto: Range_PB) -> "Range":
return Range(
start=proto.start,
stop=proto.stop,
step=proto.step,
id=sy.deserialize(blob=proto.id),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
return Range_PB
| 29.507353 | 73 | 0.654124 |
acde67378931c5f8e283a3d253fe64a233e23730 | 2,512 | py | Python | bitbots_behavior/bitbots_body_behavior/src/bitbots_body_behavior/actions/search_ball.py | MosHumanoid/bitbots_thmos_meta | f45ccc362dc689b69027be5b0d000d2a08580de4 | [
"MIT"
] | null | null | null | bitbots_behavior/bitbots_body_behavior/src/bitbots_body_behavior/actions/search_ball.py | MosHumanoid/bitbots_thmos_meta | f45ccc362dc689b69027be5b0d000d2a08580de4 | [
"MIT"
] | null | null | null | bitbots_behavior/bitbots_body_behavior/src/bitbots_body_behavior/actions/search_ball.py | MosHumanoid/bitbots_thmos_meta | f45ccc362dc689b69027be5b0d000d2a08580de4 | [
"MIT"
] | null | null | null | from dynamic_stack_decider.abstract_action_element import AbstractActionElement
from humanoid_league_msgs.msg import HeadMode
from geometry_msgs.msg import PoseStamped
from tf.transformations import quaternion_from_euler
import rospy
import math
class SearchBall(AbstractActionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(SearchBall, self).__init__(blackboard, dsd, parameters)
self.time_last_turn = rospy.Time.now()
def perform(self, reevaluate=False):
# TODO make parameter value
if self.time_last_turn < self.blackboard.world_model.ball_last_seen():
self.time_last_turn = rospy.Time.now()
if rospy.Time.now() - self.time_last_turn > rospy.Duration(20):
# remember that we turned around
self.time_last_turn = rospy.Time.now()
# goal to turn by 90 deg left
pose_msg = PoseStamped()
pose_msg.header.stamp = rospy.Time.now()
pose_msg.header.frame_id = self.blackboard.base_footprint_frame
quaternion = quaternion_from_euler(0, 0, - math.pi / 2.0)
pose_msg.pose.orientation.x = quaternion[0]
pose_msg.pose.orientation.y = quaternion[1]
pose_msg.pose.orientation.z = quaternion[2]
pose_msg.pose.orientation.w = quaternion[3]
self.blackboard.pathfinding.publish(pose_msg)
class SearchBallPenalty(AbstractActionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(SearchBallPenalty, self).__init__(blackboard, dsd, parameters)
self.time_last_movement = rospy.Time.now()
def perform(self, reevaluate=False):
self.blackboard.blackboard.set_head_duty(HeadMode.BALL_MODE_PENALTY)
# TODO make parameter value
if rospy.Time.now() - self.time_last_movement > rospy.Duration(3):
self.time_last_movement = rospy.Time.now()
# goal to go straight
pose_msg = PoseStamped()
pose_msg.header.stamp = rospy.Time.now()
pose_msg.header.frame_id = self.blackboard.base_footprint_frame
pose_msg.pose.position.x = 0.75
quaternion = quaternion_from_euler(0, 0, 0)
pose_msg.pose.orientation.x = quaternion[0]
pose_msg.pose.orientation.y = quaternion[1]
pose_msg.pose.orientation.z = quaternion[2]
pose_msg.pose.orientation.w = quaternion[3]
self.blackboard.pathfinding.publish(pose_msg)
| 39.873016 | 79 | 0.678344 |
acde679ad9e4d5c31f70669dff72eed8c9d414a7 | 4,454 | bzl | Python | base/base.bzl | streamsets/distroless | 01a151cc742d986ff8b288012d9f5d2e4f2140d1 | [
"Apache-2.0"
] | 1 | 2020-05-19T18:42:05.000Z | 2020-05-19T18:42:05.000Z | base/base.bzl | streamsets/distroless | 01a151cc742d986ff8b288012d9f5d2e4f2140d1 | [
"Apache-2.0"
] | null | null | null | base/base.bzl | streamsets/distroless | 01a151cc742d986ff8b288012d9f5d2e4f2140d1 | [
"Apache-2.0"
] | 1 | 2022-01-14T05:14:57.000Z | 2022-01-14T05:14:57.000Z | # defines a function to replicate the container images for different distributions
load("@io_bazel_rules_docker//container:container.bzl", "container_image")
load("@io_bazel_rules_docker//contrib:test.bzl", "container_test")
load("@package_bundle//file:packages.bzl", "packages")
load("@package_bundle_debian10//file:packages.bzl", packages_debian10 = "packages")
load("//cacerts:cacerts.bzl", "cacerts")
NONROOT = 65532
DISTRO_PACKAGES = {
"_debian9": packages,
"_debian10": packages_debian10,
}
DISTRO_REPOSITORY = {
"_debian9": "@debian_stretch",
"_debian10": "@debian10",
}
# Replicate everything for debian9 and debian10
def distro_components(distro_suffix):
cacerts(
name = "cacerts" + distro_suffix,
deb = DISTRO_PACKAGES[distro_suffix]["ca-certificates"],
)
container_image(
name = "static" + distro_suffix,
debs = [
DISTRO_PACKAGES[distro_suffix]["base-files"],
DISTRO_PACKAGES[distro_suffix]["netbase"],
DISTRO_PACKAGES[distro_suffix]["tzdata"],
],
env = {
"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
# allows openssl to find the certificates by default
# TODO: We should run update-ca-certifaces, but that requires "openssl rehash"
# which would probably need to be run inside the container
"SSL_CERT_FILE": "/etc/ssl/certs/ca-certificates.crt",
},
tars = [
":passwd",
":group_tar",
# Create /tmp, too many things assume it exists.
# tmp.tar has a /tmp with the correct permissions 01777
# A tar is needed because at the moment there is no way to create a
# directory with specific permissions.
":tmp.tar",
":nsswitch.tar",
DISTRO_REPOSITORY[distro_suffix] + "//file:os_release.tar",
":cacerts" + distro_suffix + ".tar",
],
)
container_image(
name = "base" + distro_suffix,
base = ":static" + distro_suffix,
debs = [
DISTRO_PACKAGES[distro_suffix]["libc6"],
DISTRO_PACKAGES[distro_suffix]["libssl1.1"],
DISTRO_PACKAGES[distro_suffix]["openssl"],
],
)
# A debug image with busybox available.
container_image(
name = "debug" + distro_suffix,
base = ":base" + distro_suffix,
directory = "/",
entrypoint = ["/busybox/sh"],
env = {"PATH": "$$PATH:/busybox"},
tars = ["//experimental/busybox:busybox.tar"],
)
# Non-root base images
container_image(
name = "static-nonroot" + distro_suffix,
base = ":static" + distro_suffix,
user = "%d" % NONROOT,
workdir = "/home/nonroot",
)
container_image(
name = "base-nonroot" + distro_suffix,
base = ":base" + distro_suffix,
user = "%d" % NONROOT,
workdir = "/home/nonroot",
)
container_image(
name = "debug-nonroot" + distro_suffix,
base = ":debug" + distro_suffix,
user = "%d" % NONROOT,
workdir = "/home/nonroot",
)
container_test(
name = "debug" + distro_suffix + "_test",
configs = ["testdata/debug.yaml"],
image = ":debug" + distro_suffix,
)
container_test(
name = "base" + distro_suffix + "_test",
configs = ["testdata/base.yaml"],
image = ":base" + distro_suffix,
)
container_image(
name = "check_certs_image" + distro_suffix,
base = "//base:base" + distro_suffix,
files = [":check_certs"],
visibility = ["//visibility:private"],
)
container_test(
name = "certs" + distro_suffix + "_test",
configs = ["testdata/certs.yaml"],
image = ":check_certs_image" + distro_suffix,
)
container_test(
name = "base_release" + distro_suffix + "_test",
configs = ["testdata/" + distro_suffix[1:] + ".yaml"],
image = ":base" + distro_suffix,
)
container_test(
name = "debug_release" + distro_suffix + "_test",
configs = ["testdata/" + distro_suffix[1:] + ".yaml"],
image = ":debug" + distro_suffix,
)
container_test(
name = "static_release" + distro_suffix + "_test",
configs = ["testdata/" + distro_suffix[1:] + ".yaml"],
image = ":static" + distro_suffix,
)
| 31.814286 | 90 | 0.583071 |
acde6825dbfc556fbf67d07e331c34c7551f6cf0 | 4,145 | py | Python | jc/parsers/csv_s.py | shaikustin/jc | b59e38cfd2c8a7f5868e05d5562557b1c27e5e56 | [
"MIT"
] | null | null | null | jc/parsers/csv_s.py | shaikustin/jc | b59e38cfd2c8a7f5868e05d5562557b1c27e5e56 | [
"MIT"
] | null | null | null | jc/parsers/csv_s.py | shaikustin/jc | b59e38cfd2c8a7f5868e05d5562557b1c27e5e56 | [
"MIT"
] | null | null | null | """jc - JSON CLI output utility `csv` file streaming parser
> This streaming parser outputs JSON Lines
The `csv` streaming parser will attempt to automatically detect the delimiter character. If the delimiter cannot be detected it will default to comma. The first row of the file must be a header row.
Note: The first 100 rows are read into memory to enable delimiter detection, then the rest of the rows are loaded lazily.
Usage (cli):
$ cat file.csv | jc --csv-s
Usage (module):
import jc.parsers.csv_s
result = jc.parsers.csv_s.parse(csv_output)
Schema:
csv file converted to a Dictionary: https://docs.python.org/3/library/csv.html
{
"column_name1": string,
"column_name2": string
}
Examples:
$ cat homes.csv
"Sell", "List", "Living", "Rooms", "Beds", "Baths", "Age", "Acres", "Taxes"
142, 160, 28, 10, 5, 3, 60, 0.28, 3167
175, 180, 18, 8, 4, 1, 12, 0.43, 4033
129, 132, 13, 6, 3, 1, 41, 0.33, 1471
...
$ cat homes.csv | jc --csv-s
{"Sell":"142","List":"160","Living":"28","Rooms":"10","Beds":"5","Baths":"3","Age":"60","Acres":"0.28","Taxes":"3167"}
{"Sell":"175","List":"180","Living":"18","Rooms":"8","Beds":"4","Baths":"1","Age":"12","Acres":"0.43","Taxes":"4033"}
{"Sell":"129","List":"132","Living":"13","Rooms":"6","Beds":"3","Baths":"1","Age":"41","Acres":"0.33","Taxes":"1471"}
...
"""
import itertools
import csv
import jc.utils
from jc.utils import stream_success, stream_error
from jc.exceptions import ParseError
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.0'
description = 'CSV file streaming parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
details = 'Using the python standard csv library'
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
streaming = True
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Each Dictionary represents a row in the csv file.
"""
# No further processing
return proc_data
def parse(data, raw=False, quiet=False, ignore_exceptions=False):
"""
Main text parsing generator function. Returns an iterator object.
Parameters:
data: (iterable) line-based text data to parse (e.g. sys.stdin or str.splitlines())
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
ignore_exceptions: (boolean) ignore parsing exceptions if True
Yields:
Dictionary. Raw or processed structured data.
Returns:
Iterator object
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
# convert data to an iterable in case a sequence like a list is used as input.
# this allows the exhaustion of the input so we don't double-process later.
data = iter(data)
temp_list = []
# first, load the first 100 lines into a list to detect the CSV dialect
for line in itertools.islice(data, 100):
temp_list.append(line)
# check for Python bug that does not split on `\r` newlines from sys.stdin correctly
# https://bugs.python.org/issue45617
if len(temp_list) == 1:
raise ParseError('Unable to detect line endings. Please try the non-streaming CSV parser instead.')
sniffdata = '\n'.join(temp_list)
dialect = None
try:
dialect = csv.Sniffer().sniff(sniffdata)
except Exception:
pass
# chain `temp_list` and `data` together to lazy load the rest of the CSV data
new_data = itertools.chain(temp_list, data)
reader = csv.DictReader(new_data, dialect=dialect)
for row in reader:
try:
yield stream_success(row, ignore_exceptions) if raw else stream_success(_process(row), ignore_exceptions)
except Exception as e:
yield stream_error(e, ignore_exceptions, row)
| 31.401515 | 198 | 0.646803 |
acde68a942b27047491124f3b4c463c2d0aa00ac | 1,736 | py | Python | mindhome_alpha/erpnext/regional/report/professional_tax_deductions/professional_tax_deductions.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/regional/report/professional_tax_deductions/professional_tax_deductions.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/regional/report/professional_tax_deductions/professional_tax_deductions.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.regional.report.provident_fund_deductions.provident_fund_deductions import get_conditions
def execute(filters=None):
data = get_data(filters)
columns = get_columns(filters) if len(data) else []
return columns, data
def get_columns(filters):
columns = [
{
"label": _("Employee"),
"options": "Employee",
"fieldname": "employee",
"fieldtype": "Link",
"width": 200
},
{
"label": _("Employee Name"),
"options": "Employee",
"fieldname": "employee_name",
"fieldtype": "Link",
"width": 160
},
{
"label": _("Amount"),
"fieldname": "amount",
"fieldtype": "Currency",
"width": 140
}
]
return columns
def get_data(filters):
data = []
component_type_dict = frappe._dict(frappe.db.sql(""" select name, component_type from `tabSalary Component`
where component_type = 'Professional Tax' """))
if not len(component_type_dict):
return []
conditions = get_conditions(filters)
entry = frappe.db.sql(""" select sal.employee, sal.employee_name, ded.salary_component, ded.amount
from `tabSalary Slip` sal, `tabSalary Detail` ded
where sal.name = ded.parent
and ded.parentfield = 'deductions'
and ded.parenttype = 'Salary Slip'
and sal.docstatus = 1 %s
and ded.salary_component in (%s)
""" % (conditions , ", ".join(['%s']*len(component_type_dict))), tuple(component_type_dict.keys()), as_dict=1)
for d in entry:
employee = {
"employee": d.employee,
"employee_name": d.employee_name,
"amount": d.amount
}
data.append(employee)
return data | 24.111111 | 111 | 0.690668 |
acde68ed520dd9eb7e3c894e4befc8c9642ded81 | 733 | py | Python | src/job.py | amradk/pybackupper | 5ccbd583fddec5817555d54e5fd279ff61c1849a | [
"BSD-2-Clause"
] | null | null | null | src/job.py | amradk/pybackupper | 5ccbd583fddec5817555d54e5fd279ff61c1849a | [
"BSD-2-Clause"
] | null | null | null | src/job.py | amradk/pybackupper | 5ccbd583fddec5817555d54e5fd279ff61c1849a | [
"BSD-2-Clause"
] | null | null | null | from task import Task
class Job():
def __init__(self, name, tasks=[]):
self.name = name
self.tasks = []
def add_task(self, task):
self.tasks.append(task)
def prepare_tasks(self):
for t in self.tasks:
t.build_commands()
def execute_tasks(self):
for t in self.tasks:
t.execute()
def set_connection(self, conn):
for t in self.tasks:
t.set_connection(conn)
def set_storage(self, storage):
for t in self.tasks:
t.set_storage(storage)
def transfer_artifacts(self):
for t in self.tasks:
t.transfer(self.name)
def clean(self):
for t in self.tasks:
t.clean() | 22.212121 | 39 | 0.559345 |
acde68ef945ce212125b8220924b2b82e4bdcd8e | 6,887 | py | Python | mak/libs/ircc/ir_grammar/lex.py | motor-dev/Motor | 98cb099fe1c2d31e455ed868cc2a25eae51e79f0 | [
"BSD-3-Clause"
] | null | null | null | mak/libs/ircc/ir_grammar/lex.py | motor-dev/Motor | 98cb099fe1c2d31e455ed868cc2a25eae51e79f0 | [
"BSD-3-Clause"
] | null | null | null | mak/libs/ircc/ir_grammar/lex.py | motor-dev/Motor | 98cb099fe1c2d31e455ed868cc2a25eae51e79f0 | [
"BSD-3-Clause"
] | null | null | null | from ply.lex import TOKEN
from motor_typing import TYPE_CHECKING
t_ignore = ' \t'
keywords = (
# header
'source_filename',
'target',
'datalayout',
'triple',
'attributes',
# structs
'type',
'opaque',
'addrspace',
'x',
# COMDAT
'comdat',
'any',
'exactmatch',
'largest',
'noduplicates',
'samesize',
# types
'i1',
'i8',
'i16',
'i32',
'i64',
'float',
'double',
'half',
'void',
'metadata',
# values
'null',
'None',
'undef',
'true',
'false',
'zeroinitializer',
'poison',
# methods
'declare',
'define',
# linkage
'private',
'linker_private',
'linker_private_weak',
'internal',
'available_externally',
'linkonce',
'weak',
'common',
'appending',
'extern_weak',
'linkonce_odr',
'weak_odr',
'external',
# DLL attributes
'dllimport',
'dllexport',
# visibility
'default',
'hidden',
'protected',
# variables
'global',
'constant',
# method tags
'section',
'gc',
'prefix',
'prologue',
'personality',
# calling convention
'spir_func',
'spir_kernel',
'ccc',
'fastcc',
'coldcc',
'webkit_jscc',
'anyregcc',
'preserve_mostcc',
'preserve_allcc',
'cxx_fast_tlscc',
'swiftcc',
'tailcc',
'cfguard_checkcc',
'cc',
# preemption
'dso_local',
'dso_preemptable',
# method attribute
'unnamed_addr',
'local_unnamed_addr',
'alignstack',
'allocsize',
'alwaysinline',
'builtin',
'cold',
'convergent',
'inaccessiblememonly',
'inaccessiblemem_or_argmemonly',
'inlinehint',
'jumptable',
'minsize',
'naked',
'no-inline-line-tables',
'no-jump-tables',
'nobuiltin',
'noduplicate',
'nofree',
'noimplicitfloat',
'noinline',
'nonlazybind',
'noredzone',
'indirect-tls-seg-refs',
'noreturn',
'norecurse',
'willreturn',
'nosync',
'nounwind',
'null-pointer-is-valid',
'optforfuzzing',
'optnone',
'optsize',
'patchable-function',
'probe-stack',
'readnone',
'readonly',
'stack-probe-size',
'no-stack-arg-probe',
'writeonly',
'argmemonly',
'returns_twice',
'safestack',
'sanitize_address',
'sanitize_memory',
'sanitize_memtag',
'speculative_load_hardening',
'speculatable',
'ssp',
'sspreq',
'sspstrong',
'strictfp',
'denormal-fp-math',
'denormal-fp-math-f32',
'thunk',
'uwtable',
'nocf_check',
'shadowcallstack',
'mustprogress',
# fp
'ieee',
'preserve-sign',
'positive-zero',
# parameter attributes
'zeroext',
'signext',
'inreg',
'byval',
'preallocated',
'inalloca',
'sret',
'align',
'noalias',
'nocapture',
'nest',
'returned',
'nonnull',
'dereferenceable',
'dereferenceable_or_null',
'swiftself',
'swifterror',
'immarg',
# debug metadata
'distinct',
# opcodes
'to',
'ret',
'br',
'label',
'switch',
'unreachable',
'fneg',
'nuw',
'nsw',
'add',
'fadd',
'sub',
'fsub',
'mul',
'fmul',
'udiv',
'sdiv',
'fdiv',
'urem',
'srem',
'frem',
'shl',
'lshr',
'ashr',
'and',
'or',
'xor',
'extractelement',
'insertelement',
'shufflevector',
'extractvalue',
'insertvalue',
'alloca',
'volatile',
'load',
'store',
'getelementptr',
'inbounds',
'inrange',
'trunc',
'zext',
'sext',
'fptrunc',
'fpext',
'fptoui',
'fptosi',
'uitofp',
'sitofp',
'ptrtoint',
'inttoptr',
'bitcast',
'addrspacecast',
'icmp',
'fcmp',
'eq',
'ne',
'ueq',
'une',
'ugt',
'uge',
'ult',
'ule',
'uno',
'sgt',
'sge',
'slt',
'sle',
'oeq',
'one',
'ogt',
'oge',
'olt',
'ole',
'ord',
'phi',
'select',
'tail',
'musttail',
'notail',
'call',
)
simple_escape = r"""([a-zA-Z\\?'"])"""
octal_escape = r"""([0-7]{1,3})"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
escape_sequence = r"""(\\(""" + simple_escape + '|' + octal_escape + '|' + hex_escape + '))'
string_char = r"""([^"\\\n]|%s)""" % escape_sequence
@TOKEN('"' + string_char + '*"')
def t_LITERAL_STRING(t):
# type: (LexToken) -> LexToken
t.value = t.value[1:-1]
return t
t_ID_COMDAT = '\\$[a-zA-Z\._][a-zA-Z\._0-9]*'
def t_ID_LABEL(t):
# type: (LexToken) -> LexToken
r'[a-zA-Z\._][a-zA-Z\._0-9]*'
if t.lexer.ir_lexer._keywords_enabled:
if t.value in keywords:
t.type = t.value.upper()
return t
t_LITERAL_DECIMAL = '[-+]?[0-9]+'
t_ATTRIBUTE_GROUP = '\\#[0-9]+'
t_METADATA_NAME = '![-a-zA-Z\$\._][-a-zA-Z\$\._0-9]+'
t_METADATA_REF = '![0-9]+'
t_METADATA_MARK = '!'
t_EQUAL = '='
t_LBRACE = '{'
t_RBRACE = '}'
t_COMMA = ','
t_COLON = ':'
t_LBRACKET = '\\['
t_RBRACKET = '\\]'
t_LPAREN = '\\('
t_RPAREN = '\\)'
t_LANGLE = '<'
t_RANGLE = '>'
t_STAR = '\\*'
t_PIPE = '\\|'
for kw in keywords:
globals()['t_%s' % kw.upper().replace('-', '_')] = kw
t_ID = '[%@]([-a-zA-Z\$\._0-9]+|("[^"]*"))'
def t_COMMENT(t):
# type: (LexToken) -> Optional[LexToken]
r';[^\n]*\n+'
t.lexer.ir_lexer._lineno += t.value.count("\n")
return None
def t_NEWLINE(t):
# type: (LexToken) -> Optional[LexToken]
r'\n+'
t.lexer.ir_lexer._lineno += t.value.count("\n")
return None
def t_error(t):
# type: (LexToken) -> Optional[LexToken]
t.value = t.value[0]
t.lexer.ir_lexer.logger.C0000(t.lexer.ir_lexer.position(t), t.value[0])
t.lexer.skip(1)
return None
tokens = (
'ID', 'ID_COMDAT', 'ID_LABEL', 'LITERAL_STRING', 'LITERAL_DECIMAL', 'ATTRIBUTE_GROUP', 'METADATA_NAME',
'METADATA_REF', 'METADATA_MARK', 'LBRACE', 'RBRACE', 'LPAREN', 'LPAREN_MARK', 'RPAREN', 'LBRACKET', 'RBRACKET',
'LANGLE', 'RANGLE', 'EQUAL', 'STAR', 'COMMA', 'COLON', 'PIPE'
) + tuple(k.upper().replace('-', '_') for k in keywords)
if TYPE_CHECKING:
from typing import Optional
from ply.lex import LexToken | 19.130556 | 115 | 0.474953 |
acde6b746b9140761bc9f6d9beacf3c2825974ae | 1,526 | py | Python | 2/2_4_1_linked_list.py | DingJunyao/aha-algorithms-py | 98912e569ed22bcbbe462c1a01b153ce99e030e5 | [
"CC0-1.0"
] | 2 | 2019-06-27T15:44:18.000Z | 2020-02-24T06:57:26.000Z | 2/2_4_1_linked_list.py | DingJunyao/aha-algorithms-py | 98912e569ed22bcbbe462c1a01b153ce99e030e5 | [
"CC0-1.0"
] | null | null | null | 2/2_4_1_linked_list.py | DingJunyao/aha-algorithms-py | 98912e569ed22bcbbe462c1a01b153ce99e030e5 | [
"CC0-1.0"
] | null | null | null | """
链表
输入数据,使用链表存储,追加数据,输出输入和追加的数据。
Python中没有指针,因此链表的结构也不一样。本代码使用对象实现链表及其节点。
"""
class Node:
def __init__(self, data=None, next_node=0):
"""
节点初始化
:param data=None: 数据,默认为None
:param next_node=0: 下一个节点,默认为0
"""
self.data = data
self.next = next_node
class LinkedList:
def __init__(self):
"""
链表初始化
"""
self.head = None
def initData(self, in_data):
"""
在链表中加入数据
:param in_data: 数据,以列表形式添加
"""
self.head = Node(in_data[0])
p = self.head
for i in in_data[1:]:
an = Node(i)
p.next = an
p = p.next
def iterate(self):
"""
遍历数据,输出列表
"""
ll = []
lp = self.head
while lp != 0:
ll.append(lp.data)
lp = lp.next
return ll
def ins(self, data):
"""
在链表中插入数据。如果链表和数据内容为整数,且已经按从小到大的顺序排列,数据会按顺序放到合适位置
:param data: 数据
"""
lp = self.head
while lp.next != 0 and lp.next.data < data:
lp = lp.next
an = Node(data)
if lp is self.head and lp.next.data > data:
an.next = lp
self.head = an
else:
an.next = lp.next
lp.next = an
ll = LinkedList()
ll.initData([int(i) for i in input("请输入从小到大已排好序的整数,以空格分隔: ").split(' ')])
ll.ins(int(input("请输入要添加的数,该数会按顺序放到合适位置: ")))
print(' '.join([str(i) for i in ll.iterate()]))
| 21.194444 | 73 | 0.484928 |
acde6f2fd3fce20a822e4dfe7cb13f4c0c0a7dd0 | 76 | py | Python | sentiment/__init__.py | tech-team/sentiment | 10e34409c8ff83b0778501370b9681dfc071f0dd | [
"MIT"
] | null | null | null | sentiment/__init__.py | tech-team/sentiment | 10e34409c8ff83b0778501370b9681dfc071f0dd | [
"MIT"
] | null | null | null | sentiment/__init__.py | tech-team/sentiment | 10e34409c8ff83b0778501370b9681dfc071f0dd | [
"MIT"
] | null | null | null |
class SentimentAnalysisModel(object):
def __init__(self):
pass
| 15.2 | 37 | 0.684211 |
acde6f3c534630364b3a9c17dff894b0f5b7c819 | 4,213 | py | Python | examples/0_full_pipeline_no_hydra.py | dumpmemory/weasel | c6bccc5cb919b9cc6f4296a0ac34fcd128f628d4 | [
"Apache-2.0"
] | 81 | 2021-11-03T11:55:16.000Z | 2022-03-19T18:41:07.000Z | examples/0_full_pipeline_no_hydra.py | dumpmemory/weasel | c6bccc5cb919b9cc6f4296a0ac34fcd128f628d4 | [
"Apache-2.0"
] | 4 | 2021-12-03T16:53:46.000Z | 2022-03-22T17:35:56.000Z | examples/0_full_pipeline_no_hydra.py | dumpmemory/weasel | c6bccc5cb919b9cc6f4296a0ac34fcd128f628d4 | [
"Apache-2.0"
] | 4 | 2021-11-29T02:12:58.000Z | 2022-01-16T07:25:07.000Z | import warnings
warnings.filterwarnings("ignore")
# %%
import os
import numpy as np
import torch
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
seed_everything(seed=7)
# Full pipeline for a new WeaSEL problem
# %% md
# %%
n, n_evaluation = 10_000, 1_000 # number of training and test samples
n_channels = 3 # e.g. could be RGB
height = width = 28 # grid resolution
X_train = np.random.randn(n, n_channels, height, width)
X_test = np.random.randn(n_evaluation, n_channels, height, width)
# %%
C = 3
possible_labels = list(range(C))
Y_test = np.random.choice(possible_labels, size=n_evaluation)
# %% md
# %%
m = 10
ABSTAIN = -1
possible_LF_outputs = [ABSTAIN] + list(range(C))
label_matrix = np.empty((n, m))
for LF in range(m):
label_matrix[:, LF] = np.random.choice(
possible_LF_outputs, size=n, p=[0.85] + [(1 - 0.85) * 1 / C for _ in range(C)]
)
# %% md
# From data to DataModule
# %%
from weasel.datamodules.base_datamodule import BasicWeaselDataModule
weasel_datamodule = BasicWeaselDataModule(
label_matrix=label_matrix,
X_train=X_train,
X_test=X_test,
Y_test=Y_test,
batch_size=256,
val_test_split=(200, 800) # 200 validation, 800 test points will be split from (X_test, Y_test)
)
# %% md
## Defining an End-model
# %%
from weasel.models.downstream_models.base_model import DownstreamBaseModel
class MyCNN(DownstreamBaseModel):
def __init__(self, in_channels,
hidden_dim,
conv_layers: int,
n_classes: int,
kernel_size=(3, 3),
*args, **kwargs):
super().__init__()
# Good practice:
self.out_dim = n_classes
self.example_input_array = torch.randn((1, in_channels, height, width))
cnn_modules = []
in_dim = in_channels
for layer in range(conv_layers):
cnn_modules += [
nn.Conv2d(in_dim, hidden_dim, kernel_size),
nn.GELU(),
nn.MaxPool2d(2, 2)
]
in_dim = hidden_dim
self.convs = nn.Sequential(*cnn_modules)
self.flattened_dim = torch.flatten(
self.convs(self.example_input_array), start_dim=1
).shape[1]
mlp_modules = [
nn.Linear(self.flattened_dim, int(self.flattened_dim / 2)),
nn.GELU()
]
mlp_modules += [nn.Linear(int(self.flattened_dim / 2), n_classes)]
self.readout = nn.Sequential(*mlp_modules)
def forward(self, X: torch.Tensor, readout=True):
conv_out = self.convs(X)
flattened = torch.flatten(conv_out, start_dim=1)
if not readout:
return flattened
logits = self.readout(flattened)
return logits # We predict the raw logits in forward!
# %%
cnn_end_model = MyCNN(in_channels=n_channels, hidden_dim=16, conv_layers=2, n_classes=C)
# %% md
# Coupling end-model into Weasel
#%%
from weasel.models import Weasel
weasel = Weasel(
end_model=cnn_end_model,
num_LFs=m,
n_classes=C,
encoder={'hidden_dims': [32, 10]},
optim_encoder={'name': 'adam', 'lr': 1e-4},
optim_end_model={'name': 'adam', 'lr': 1e-4} # different way of getting the same optim with Hydra
)
# %% md
## Training Weasel and end-model
# %%
from pytorch_lightning.callbacks import ModelCheckpoint
checkpoint_callback = ModelCheckpoint(monitor="Val/f1_macro", mode="max")
trainer = pl.Trainer(
gpus=0, # >= 1 to use GPU(s)
max_epochs=3, # since just for illustratory purposes
logger=False,
deterministic=True,
callbacks=[checkpoint_callback]
)
trainer.fit(model=weasel, datamodule=weasel_datamodule)
# %% md
## Evaluation
# The below will give the same test results
# test_stats = trainer.test(datamodule=weasel_datamodule, ckpt_path='best')
final_cnn_model = weasel.load_from_checkpoint(
trainer.checkpoint_callback.best_model_path
).end_model
# Test the stand-alone, fully-trained CNN model (the metrics have of course no meaning in this simulated example):
test_statd = pl.Trainer().test(model=final_cnn_model, test_dataloaders=weasel_datamodule.test_dataloader())
| 24.212644 | 114 | 0.663423 |
acde6f77bb83c1c77badc777673ca980411ae024 | 3,103 | py | Python | derrida/urls.py | making-books-ren-today/test_eval_4_derrmar | 615796efeb517cd12cfb1f8b67e0150f6aaaea66 | [
"Apache-2.0"
] | null | null | null | derrida/urls.py | making-books-ren-today/test_eval_4_derrmar | 615796efeb517cd12cfb1f8b67e0150f6aaaea66 | [
"Apache-2.0"
] | null | null | null | derrida/urls.py | making-books-ren-today/test_eval_4_derrmar | 615796efeb517cd12cfb1f8b67e0150f6aaaea66 | [
"Apache-2.0"
] | null | null | null | """derrida URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
"""
from annotator_store import views as annotator_views
# from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.sitemaps import views as sitemap_views
from django.views.generic import TemplateView
from django.views.generic.base import RedirectView
import mezzanine.urls
import mezzanine.pages.views
from derrida.books.views import SearchView
from derrida.books import sitemaps as book_sitemaps
from derrida.outwork import sitemaps as page_sitemaps
sitemaps = {
'pages': page_sitemaps.PageSitemap,
'books': book_sitemaps.InstanceSitemap,
'book-references': book_sitemaps.InstanceReferencesSitemap,
'book-gallery': book_sitemaps.InstanceGallerySitemap,
'book-pages': book_sitemaps.CanvasSitemap,
}
urlpatterns = [
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt',
content_type='text/plain')),
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/favicon.ico',
permanent=True)),
url(r'^sitemap\.xml$', sitemap_views.index, {'sitemaps': sitemaps},
name='sitemap-index'),
url(r'^sitemap-(?P<section>.+)\.xml$',
sitemap_views.sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
# home page managed via mezzanine, but needs a named url
url(r'^$', mezzanine.pages.views.page, {"slug": "/"}, name="home"),
# alternate homepage named url needed for djiffy templates
url(r'^$', mezzanine.pages.views.page, {"slug": "/"}, name="site-index"),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('pucas.cas_urls')),
url(r'^', include('derrida.books.urls', namespace='books')),
url(r'^search/$', SearchView.as_view(), name='search'),
url(r'^people/', include('derrida.people.urls', namespace='people')),
url(r'^places/', include('derrida.places.urls', namespace='places')),
url(r'^interventions/', include('derrida.interventions.urls',
namespace='interventions')),
url(r'^viaf/', include(('viapy.urls', 'viapy'), namespace='viaf')),
url(r'^outwork/', include('derrida.outwork.urls', namespace='outwork')),
# local version of djiffy urls
url(r'^admin/iiif-books/',
include(('derrida.interventions.iiif_urls', 'djiffy'),
namespace='djiffy')),
# annotations API
url(r'^annotations/api/', include('annotator_store.urls',
namespace='annotation-api')),
# annotatorjs doesn't handle trailing slash in api prefix url
url(r'^annotations/api', annotator_views.AnnotationIndex.as_view(),
name='annotation-api-prefix'),
# content pages managed by mezzanine
url("^", include(mezzanine.urls))
]
# Adds ``STATIC_URL`` to the context of error pages, so that error
# pages can use JS, CSS and images.
handler404 = "mezzanine.core.views.page_not_found"
handler500 = "mezzanine.core.views.server_error"
| 40.298701 | 77 | 0.702546 |
acde7059b7a0eafee47ced2fe1a5dca33f9e9fc1 | 13,082 | py | Python | core/domain/question_domain_test.py | steve7158/oppia | e2cae72fa5d3503c64d195f09d3460507697730c | [
"Apache-2.0"
] | null | null | null | core/domain/question_domain_test.py | steve7158/oppia | e2cae72fa5d3503c64d195f09d3460507697730c | [
"Apache-2.0"
] | 5 | 2018-06-09T02:05:45.000Z | 2018-09-20T13:53:42.000Z | core/domain/question_domain_test.py | steve7158/oppia | e2cae72fa5d3503c64d195f09d3460507697730c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for question domain objects."""
import datetime
from core.domain import question_domain
from core.domain import state_domain
from core.tests import test_utils
import feconf
import utils
class QuestionChangeTest(test_utils.GenericTestBase):
"""Test for Question Change object."""
def test_to_dict(self):
"""Test to verify to_dict method of the Question Change object."""
expected_object_dict = {
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': 'new_value',
'old_value': 'old_value',
}
change_dict = {
'cmd': 'update_question_property',
'property_name': 'question_state_data',
'new_value': 'new_value',
'old_value': 'old_value',
}
observed_object = question_domain.QuestionChange(
change_dict=change_dict,
)
self.assertEqual(expected_object_dict, observed_object.to_dict())
def test_change_dict_without_cmd(self):
"""Test to verify __init__ method of the Question Change object
when change_dict is without cmd key.
"""
self.assertRaises(
Exception,
callableObj=question_domain.QuestionChange,
change_dict={}
)
def test_change_dict_with_wrong_cmd(self):
"""Test to verify __init__ method of the Question Change object
when change_dict is with wrong cmd value.
"""
self.assertRaises(
Exception,
callableObj=question_domain.QuestionChange,
change_dict={'cmd': 'wrong', }
)
def test_update_question_property_with_wrong_property_name(self):
"""Test to verify __init__ method of the Question Change object
when cmd is update_question_property and wrong property_name is given.
"""
self.assertRaises(
Exception,
callableObj=question_domain.QuestionChange,
change_dict={
'cmd': 'update_question_property',
'property_name': 'wrong',
}
)
def test_create_new_fully_specified_question(self):
"""Test to verify __init__ method of the Question Change object
when cmd is create_new_fully_specified_question.
"""
change_dict = {
'cmd': 'create_new_fully_specified_question',
'question_dict': {},
'skill_id': '10',
}
observed_object = question_domain.QuestionChange(
change_dict=change_dict,
)
self.assertEqual('10', observed_object.skill_id)
self.assertEqual({}, observed_object.question_dict)
def test_migrate_state_schema_to_latest_version(self):
"""Test to verify __init__ method of the Question Change object
when cmd is migrate_state_schema_to_latest_version.
"""
change_dict = {
'cmd': 'migrate_state_schema_to_latest_version',
'from_version': 0,
'to_version': 10,
}
observed_object = question_domain.QuestionChange(
change_dict=change_dict,
)
self.assertEqual(0, observed_object.from_version)
self.assertEqual(10, observed_object.to_version)
class QuestionDomainTest(test_utils.GenericTestBase):
"""Tests for Question domain object."""
def setUp(self):
"""Before each individual test, create a question."""
super(QuestionDomainTest, self).setUp()
question_state_data = self._create_valid_question_data('ABC')
self.question = question_domain.Question(
'question_id', question_state_data,
feconf.CURRENT_STATES_SCHEMA_VERSION, 'en', 1)
def test_to_and_from_dict(self):
"""Test to verify to_dict and from_dict methods
of Question domain object.
"""
default_question_state_data = (
question_domain.Question.create_default_question_state())
question_dict = {
'id': 'col1.random',
'question_state_data': default_question_state_data.to_dict(),
'question_state_data_schema_version': (
feconf.CURRENT_STATES_SCHEMA_VERSION),
'language_code': 'en',
'version': 1
}
observed_object = question_domain.Question.from_dict(question_dict)
self.assertEqual(question_dict, observed_object.to_dict())
def _assert_validation_error(self, expected_error_substring):
"""Checks that the skill passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.question.validate()
def test_strict_validation(self):
"""Test to verify validate method of Question domain object with
strict as True.
"""
state = self.question.question_state_data
state.interaction.solution = None
self._assert_validation_error(
'Expected the question to have a solution')
state.interaction.hints = []
self._assert_validation_error(
'Expected the question to have at least one hint')
state.interaction.default_outcome.dest = 'abc'
self._assert_validation_error(
'Expected all answer groups to have destination as None.')
state.interaction.default_outcome.labelled_as_correct = False
self._assert_validation_error(
'Expected at least one answer group to have a correct answer')
def test_strict_validation_for_answer_groups(self):
"""Test to verify validate method of Question domain object with
strict as True for interaction with answer group.
"""
state = self.question.question_state_data
state.interaction.default_outcome.labelled_as_correct = False
state.interaction.answer_groups = [
state_domain.AnswerGroup.from_dict({
'outcome': {
'dest': 'abc',
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_misconception_id': None
})
]
self._assert_validation_error(
'Expected all answer groups to have destination as None.')
def test_strict_validation_passes(self):
"""Test to verify validate method of a finalized Question domain object
with correct input.
"""
try:
self.question.validate()
except utils.ValidationError:
self.fail(msg='validate() raised ValidationError unexpectedly!')
def test_not_strict_validation(self):
"""Test to verify validate method of Question domain object with
strict as False.
"""
self.question.language_code = 'abc'
self._assert_validation_error('Invalid language code')
self.question.question_state_data = 'State data'
self._assert_validation_error(
'Expected question state data to be a State object')
self.question.question_state_data_schema_version = 'abc'
self._assert_validation_error(
'Expected schema version to be an integer')
self.question.language_code = 1
self._assert_validation_error('Expected language_code to be a string')
self.question.version = 'abc'
self._assert_validation_error('Expected version to be an integer')
self.question.id = 123
self._assert_validation_error('Expected ID to be a string')
def test_create_default_question(self):
"""Test to verify create_default_question method of the Question domain
object.
"""
question_id = 'col1.random'
question = question_domain.Question.create_default_question(
question_id)
default_question_data = (
question_domain.Question.create_default_question_state().to_dict())
self.assertEqual(question.id, question_id)
self.assertEqual(
question.question_state_data.to_dict(), default_question_data)
self.assertEqual(question.language_code, 'en')
self.assertEqual(question.version, 0)
def test_update_language_code(self):
"""Test to verify update_language_code method of the Question domain
object.
"""
self.question.update_language_code('pl')
self.assertEqual('pl', self.question.language_code)
def test_update_question_state_data(self):
"""Test to verify update_question_state_data method of the Question
domain object.
"""
question_state_data = self._create_valid_question_data('Test')
self.question.update_question_state_data(question_state_data.to_dict())
self.assertEqual(
question_state_data.to_dict(),
self.question.question_state_data.to_dict()
)
class QuestionSummaryTest(test_utils.GenericTestBase):
"""Test for Question Summary object."""
def test_to_dict(self):
"""Test to verify to_dict method of the Question Summary
object.
"""
fake_date_created = datetime.datetime(2018, 11, 17, 20, 2, 45, 0)
fake_date_updated = datetime.datetime(2018, 11, 17, 20, 3, 14, 0)
expected_object_dict = {
'id': 'question_1',
'creator_id': 'user_1',
'question_content': u'question content',
'last_updated_msec': utils.get_time_in_millisecs(fake_date_updated),
'created_on_msec': utils.get_time_in_millisecs(fake_date_created),
}
observed_object = question_domain.QuestionSummary(
creator_id='user_1',
question_id='question_1',
question_content='question content',
question_model_created_on=fake_date_created,
question_model_last_updated=fake_date_updated,
)
self.assertEqual(expected_object_dict, observed_object.to_dict())
class QuestionSkillLinkDomainTest(test_utils.GenericTestBase):
"""Test for Question Skill Link Domain object."""
def test_to_dict(self):
"""Test to verify to_dict method of the Question Skill Link Domain
object.
"""
expected_object_dict = {
'question_id': 'testquestion',
'skill_id': 'testskill',
'skill_description': 'testskilldescription',
'skill_difficulty': 0.5,
}
observed_object = question_domain.QuestionSkillLink(
'testquestion', 'testskill', 'testskilldescription', 0.5)
self.assertEqual(expected_object_dict, observed_object.to_dict())
class QuestionRightsDomainTest(test_utils.GenericTestBase):
"""Test for Question Rights Domain object."""
def setUp(self):
"""Before each individual test, create a question and user."""
super(QuestionRightsDomainTest, self).setUp()
self.question_id = 'question_id'
self.signup('user@example.com', 'User')
self.question = question_domain.Question.create_default_question(
self.question_id)
self.user_id = self.get_user_id_from_email('user@example.com')
def test_to_dict(self):
"""Test to verify to_dict method of the Question Rights Domain
object.
"""
question_rights = question_domain.QuestionRights(
self.question_id, self.user_id)
expected_dict = {
'question_id': self.question_id,
'creator_id': self.user_id
}
self.assertEqual(expected_dict, question_rights.to_dict())
def test_is_creator(self):
"""Test to verify is_creator method of the Question Rights Domain
object.
"""
question_rights = question_domain.QuestionRights(
self.question_id, self.user_id)
self.assertTrue(question_rights.is_creator(self.user_id))
self.assertFalse(question_rights.is_creator('fakeuser'))
| 37.164773 | 80 | 0.640498 |
acde71535406cd5520f37855e7593219386bbb08 | 125 | py | Python | mod/test6.py | kinten108101/yolo-skiesnet | 0ea169e6a0423636537c3ee97bd82832eec865a5 | [
"Apache-2.0"
] | null | null | null | mod/test6.py | kinten108101/yolo-skiesnet | 0ea169e6a0423636537c3ee97bd82832eec865a5 | [
"Apache-2.0"
] | null | null | null | mod/test6.py | kinten108101/yolo-skiesnet | 0ea169e6a0423636537c3ee97bd82832eec865a5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
class Tearable:
m = 1
def __init__(self):
print(self)
self.n = 1
m = Tearable()
m.n = 2
print(m.n) | 11.363636 | 20 | 0.632 |
acde7206bf78ec14f92b21c95341359be7b4df74 | 48 | py | Python | ledger/__init__.py | joatuapp/joatu-django | 5626d03ba89c55650ff5bff2e706ca0883ae3b9c | [
"MIT"
] | 10 | 2018-05-13T18:01:57.000Z | 2018-12-23T17:11:14.000Z | ledger/__init__.py | joatuapp/joatu-django | 5626d03ba89c55650ff5bff2e706ca0883ae3b9c | [
"MIT"
] | 88 | 2018-05-04T15:33:46.000Z | 2022-03-08T21:09:21.000Z | ledger/__init__.py | joatuapp/joatu-django | 5626d03ba89c55650ff5bff2e706ca0883ae3b9c | [
"MIT"
] | 7 | 2018-05-08T16:05:06.000Z | 2018-09-13T05:49:05.000Z | default_app_config = 'ledger.apps.LedgerConfig'
| 24 | 47 | 0.833333 |
acde729a3eaa1dd5a80b3d9ecf2d199126e778c6 | 548 | gyp | Python | library/boost-integer/1.62.0.gyp | KjellSchubert/bru | dd70b721d07fbd27c57c845cc3a29cd8f2dfc587 | [
"MIT"
] | 3 | 2015-01-06T15:22:16.000Z | 2015-11-27T18:13:04.000Z | library/boost-integer/1.62.0.gyp | KjellSchubert/bru | dd70b721d07fbd27c57c845cc3a29cd8f2dfc587 | [
"MIT"
] | 7 | 2015-02-10T15:13:38.000Z | 2021-05-30T07:51:13.000Z | library/boost-integer/1.62.0.gyp | KjellSchubert/bru | dd70b721d07fbd27c57c845cc3a29cd8f2dfc587 | [
"MIT"
] | 3 | 2015-01-29T17:19:53.000Z | 2016-01-06T12:50:06.000Z | {
"targets": [
{
"target_name": "boost-integer",
"type": "none",
"include_dirs": [
"1.62.0/integer-boost-1.62.0/include"
],
"all_dependent_settings": {
"include_dirs": [
"1.62.0/integer-boost-1.62.0/include"
]
},
"dependencies": [
"../boost-config/boost-config.gyp:*",
"../boost-static_assert/boost-static_assert.gyp:*"
]
}
]
}
| 26.095238 | 66 | 0.390511 |
acde73aa8a72b3318a15090378af859c36a0149a | 13,709 | py | Python | lightautoml/dataset/base.py | tony20202021/LightAutoML | 2eaa05b27c63c613965d50cdb7d52da5d245d9af | [
"Apache-2.0"
] | 1 | 2021-11-06T20:08:37.000Z | 2021-11-06T20:08:37.000Z | lightautoml/dataset/base.py | DESimakov/LightAutoML | 2eaa05b27c63c613965d50cdb7d52da5d245d9af | [
"Apache-2.0"
] | null | null | null | lightautoml/dataset/base.py | DESimakov/LightAutoML | 2eaa05b27c63c613965d50cdb7d52da5d245d9af | [
"Apache-2.0"
] | null | null | null | """Contains base classes for internal dataset interface."""
from copy import copy # , deepcopy
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TypeVar
from typing import Union
from ..tasks.base import Task
from .roles import ColumnRole
valid_array_attributes = ("target", "group", "folds", "weights")
array_attr_roles = ("Target", "Group", "Folds", "Weights")
# valid_tasks = ('reg', 'binary', 'multiclass') # TODO: Add multiclass and multilabel. Refactor for some dataset and pipes needed
# valid_tasks = ('reg', 'binary')
RolesDict = Dict[str, ColumnRole]
IntIdx = Union[Sequence[int], int]
RowSlice = Optional[Union[Sequence[int], Sequence[bool]]]
ColSlice = Optional[Union[Sequence[str], str]]
class LAMLColumn:
"""Basic class for pair - column, role."""
def __init__(self, data: Any, role: ColumnRole):
"""Set a pair column/role.
Args:
data: 1d array like.
role: Column role.
"""
self.data = data
self.role = role
def __repr__(self) -> str:
"""Repr method.
Returns:
String with data representation.
"""
return self.data.__repr__()
class LAMLDataset:
"""Basic class to create dataset."""
# TODO: Create checks here
_init_checks = () # list of functions that checks that _array_like_attrs are valid
_data_checks = () # list of functions that checks that data in .set_data is valid for _array_like_attrs
_concat_checks = () # list of functions that checks that datasets for concatenation are valid
_dataset_type = "LAMLDataset"
def __init__(
self,
data: Any,
features: Optional[list],
roles: Optional[RolesDict],
task: Optional[Task] = None,
**kwargs: Any
):
"""Create dataset with given data, features, roles and special attributes.
Args:
data: 2d array of data of special type for each dataset type.
features: Feature names or None for empty data.
roles: Features roles or None for empty data.
task: Task for dataset if train/valid.
**kwargs: Special named array of attributes (target, group etc..).
"""
if features is None:
features = []
if roles is None:
roles = {}
self._initialize(task, **kwargs)
if data is not None:
self.set_data(data, features, roles)
def __len__(self):
"""Get count of rows in dataset.
Returns:
Number of rows in dataset.
"""
return self.shape[0]
def __repr__(self):
"""Get str representation.
Returns:
String with data representation.
"""
# TODO: View for empty
return self.data.__repr__()
# default behavior and abstract methods
def __getitem__(
self, k: Tuple[RowSlice, ColSlice]
) -> Union["LAMLDataset", LAMLColumn]:
"""Select a subset of dataset.
Define how to slice a dataset
in way ``dataset[[1, 2, 3...], ['feat_0', 'feat_1'...]]``.
Default behavior based on ``._get_cols``, ``._get_rows``, ``._get_2d``.
Args:
k: First element optional integer columns indexes,
second - optional feature name or list of features names.
"""
# TODO: Maybe refactor this part?
if type(k) is tuple:
rows, cols = k
else:
rows = k
cols = None
# case when columns are defined
if cols is not None:
idx = self._get_cols_idx(cols)
data = self._get_2d(self.data, (rows, idx))
# case of single column - return LAMLColumn
if isinstance(cols, str):
dataset = LAMLColumn(
self._get_2d(self.data, (rows, idx)), role=self.roles[cols]
)
return dataset
# case of multiple columns - return LAMLDataset
roles = dict(((x, self.roles[x]) for x in self.roles if x in cols))
features = [x for x in cols if x in set(self.features)]
else:
data, roles, features = self.data, self.roles, self.features
# case when rows are defined
if rows is None:
dataset = self.empty()
else:
dataset = copy(self)
params = dict(
(
(x, self._get_rows(self.__dict__[x], rows))
for x in self._array_like_attrs
)
)
dataset._initialize(self.task, **params)
data = self._get_rows(data, rows)
dataset.set_data(data, features, roles)
return dataset
def __setitem__(self, k: str, val: Any):
"""Inplace set values for single column (in default implementation).
Args:
k: Feature name.
val: :class:`~lightautoml.dataset.base.LAMLColumn`
or 1d array like.
"""
assert (
k in self.features
), "Can only replace existed columns in default implementations."
idx = self._get_cols_idx(k)
# for case when setting col and change role
if type(val) is LAMLColumn:
assert (
val.role.dtype == self.roles[k].dtype
), "Inplace changing types unavaliable."
self._set_col(self.data, idx, val.data)
self.roles[k] = val.role
# for case only changing column values
else:
self._set_col(self.data, idx, val)
def __getattr__(self, item: str) -> Any:
"""Get item for key features as target/folds/weights etc.
Args:
item: Attribute name.
Returns:
Attribute value.
"""
if item in valid_array_attributes:
return None
raise AttributeError
@property
def features(self) -> list:
"""Define how to get features names list.
Returns:
Features names.
"""
return list(self._features)
@features.setter
def features(self, val: list):
"""Define how to set features list.
Args:
val: Features names.
"""
self._features = copy(val)
@property
def data(self) -> Any:
"""Get data attribute.
Returns:
Any, array like or ``None``.
"""
return self._data
@data.setter
def data(self, val: Any):
"""Set data array or ``None``.
Args:
val: Some data or ``None``.
"""
self._data = val
@property
def roles(self) -> RolesDict:
"""Get roles dict.
Returns:
Dict of feature roles.
"""
return copy(self._roles)
@roles.setter
def roles(self, val: RolesDict):
"""Set roles dict.
Args:
val: Roles dict.
"""
self._roles = dict(((x, val[x]) for x in self.features))
@property
def inverse_roles(self) -> Dict[ColumnRole, List[str]]:
"""Get inverse dict of feature roles.
Returns:
dict, keys - roles, values - features names.
"""
inv_roles = {}
roles = self.roles
for k in roles:
r = roles[k]
if r in inv_roles:
inv_roles[r].append(k)
else:
inv_roles[r] = [k]
return inv_roles
def _initialize(self, task: Optional[Task], **kwargs: Any):
"""Initialize empty dataset with task and array like attributes.
Args:
task: Task name for dataset.
**kwargs: 1d arrays like attrs like target, group etc.
"""
assert all(
[x in valid_array_attributes for x in kwargs]
), "Unknown array attribute. Valid are {0}".format(valid_array_attributes)
self.task = task
# here we set target and group and so ...
self._array_like_attrs = []
for k in kwargs:
self._array_like_attrs.append(k)
self.__dict__[k] = kwargs[k]
# checks for valid values in target, groups ...
for check in self._init_checks:
check(self)
# set empty attributes
self._data = None
self._features = []
self._roles = {}
def set_data(self, data: Any, features: Any, roles: Any):
"""Inplace set data, features, roles for empty dataset.
Args:
data: 2d array like or ``None``.
features: List of features names.
roles: Roles dict.
"""
self.data = data
self.features = features
self.roles = roles
# data checks
for check in self._data_checks:
check(self)
def empty(self) -> "LAMLDataset":
"""Get new dataset for same task and targets, groups, without features.
Returns:
New empty dataset.
"""
dataset = copy(self)
params = dict(((x, self.__dict__[x]) for x in self._array_like_attrs))
dataset._initialize(self.task, **params)
return dataset
def _get_cols_idx(self, columns: Sequence) -> Union[List[int], int]:
"""Get numeric index of columns by column names.
Args:
columns: Features names.
Returns:
List of integer indexes of single int.
"""
if isinstance(columns, str):
idx = self.features.index(columns)
else:
idx = [self.features.index(x) for x in columns]
return idx
# default calculated properties
@property
def shape(self) -> Tuple[Optional[int], Optional[int]]:
"""Get size of 2d feature matrix.
Returns:
Tuple of 2 elements.
"""
rows, cols = None, None
try:
rows, cols = len(self.data), len(self.features)
except TypeError:
if len(self._array_like_attrs) > 0:
rows = len(self.__dict__[self._array_like_attrs[0]])
return rows, cols
# static methods - how to make 1d slice, 2s slice, concat of feature matrix etc ...
@staticmethod
def _hstack(datasets: Sequence[Any]) -> Any:
"""Abstract method - define horizontal stack of feature arrays.
Args:
datasets: Sequence of feature arrays.
Returns:
Single feature array.
"""
raise NotImplementedError("Horizontal Stack not implemented.")
@staticmethod
def _get_rows(data, k: IntIdx) -> Any:
"""Abstract - define how to make rows slice of feature array.
Args:
data: 2d feature array.
k: Sequence of int indexes or int.
Returns:
2d feature array.
"""
raise NotImplementedError("Row Slice not Implemented.")
@staticmethod
def _get_cols(data, k: IntIdx) -> Any:
"""Abstract - define how to make columns slice of feature array.
Args:
data: 2d feature array.
k: Sequence indexes or single index.
Returns:
2d feature array.
"""
raise NotImplementedError("Column Slice not Implemented.")
# TODO: remove classmethod here ?
@classmethod
def _get_2d(cls, data: Any, k: Tuple[IntIdx, IntIdx]) -> Any:
"""Default implementation of 2d slice based on rows slice and columns slice.
Args:
data: 2d feature array.
k: Tuple of integer sequences or 2 int.
Returns:
2d feature array.
"""
rows, cols = k
return cls._get_rows(cls._get_cols(data, cols), rows)
@staticmethod
def _set_col(data: Any, k: int, val: Any):
"""Abstract - set a value of single column by column name inplace.
Args:
data: 2d feature array.
k: Column idx.
val: 1d column value.
"""
raise NotImplementedError("Column setting inplace not implemented.")
@classmethod
def concat(cls, datasets: Sequence["LAMLDataset"]) -> "LAMLDataset":
"""Concat multiple dataset.
Default behavior - takes empty dataset from datasets[0]
and concat all features from others.
Args:
datasets: Sequence of datasets.
Returns:
Concated dataset.
"""
for check in cls._concat_checks:
check(datasets)
dataset = datasets[0].empty()
data = []
features = []
roles = {}
for ds in datasets:
data.append(ds.data)
features.extend(ds.features)
roles = {**roles, **ds.roles}
data = cls._hstack(data)
dataset.set_data(data, features, roles)
return dataset
def drop_features(self, droplist: Sequence[str]):
"""Inplace drop columns from dataset.
Args:
droplist: Feature names.
Returns:
Dataset without columns.
"""
if len(droplist) == 0:
return self
return self[:, [x for x in self.features if x not in droplist]]
@staticmethod
def from_dataset(dataset: "LAMLDataset") -> "LAMLDataset":
"""Abstract method - how to create this type of dataset from others.
Args:
dataset: Original type dataset.
Returns:
Converted type dataset.
"""
raise NotImplementedError
@property
def dataset_type(self):
return self._dataset_type
| 26.775391 | 129 | 0.561529 |
acde74c98093267f9468ef8ad80c6f2530cb5dbb | 4,057 | py | Python | sandbox/lib/jumpscale/JumpScale9Lib/clients/zero_os/sal/grafana/grafana.py | Jumpscale/sandbox_linux | 2aacd36b467ef30ac83718abfa82c6883b67a02f | [
"Apache-2.0"
] | 2 | 2017-06-07T08:11:47.000Z | 2017-11-10T02:19:48.000Z | JumpScale9Lib/clients/zero_os/sal/grafana/grafana.py | Jumpscale/lib9 | 82224784ef2a7071faeb48349007211c367bc673 | [
"Apache-2.0"
] | 188 | 2017-06-21T06:16:13.000Z | 2020-06-17T14:20:24.000Z | sandbox/lib/jumpscale/JumpScale9Lib/clients/zero_os/sal/grafana/grafana.py | Jumpscale/sandbox_linux | 2aacd36b467ef30ac83718abfa82c6883b67a02f | [
"Apache-2.0"
] | 3 | 2018-06-12T05:18:28.000Z | 2019-09-24T06:49:17.000Z | import signal
import time
import requests
from js9 import j
class Grafana():
def __init__(self, container, ip, port, url):
self.container = container
self.ip = ip
self.port = port
self.url = url
self._client = None
@property
def client(self):
if not self._client:
self._client = j.clients.grafana.get(url='http://%s:%d' % (
self.ip, self.port), username='admin', password='admin')
return self._client
def apply_config(self):
f = self.container.client.filesystem.open('/opt/grafana/conf/defaults.ini')
try:
template = self.container.client.filesystem.read(f)
finally:
self.container.client.filesystem.close(f)
template = template.replace(b'3000', str(self.port).encode())
if self.url:
template = template.replace(b'root_url = %(protocol)s://%(domain)s:%(http_port)s/', b'root_url = %s' % self.url.encode())
self.container.client.filesystem.mkdir('/etc/grafana/')
self.container.upload_content('/etc/grafana/grafana.ini', template)
@property
def PID(self):
for process in self.container.client.process.list():
if 'grafana-server' in process['cmdline']:
return process['pid']
return None
def is_running(self):
if self.client.ping():
return True
return False
def stop(self, timeout=30):
if not self.is_running():
return
self.container.client.process.kill(self.PID, signal.SIGTERM)
start = time.time()
end = start + timeout
is_running = self.is_running()
while is_running and time.time() < end:
time.sleep(1)
is_running = self.is_running()
if is_running:
raise RuntimeError('Failed to stop grafana.')
if self.container.node.client.nft.rule_exists(self.port):
self.container.node.client.nft.drop_port(self.port)
def start(self, timeout=45):
is_running = self.is_running()
if is_running:
return
self.apply_config()
if not self.container.node.client.nft.rule_exists(self.port):
self.container.node.client.nft.open_port(self.port)
self.container.client.system(
'grafana-server -config /etc/grafana/grafana.ini -homepath /opt/grafana')
time.sleep(1)
start = time.time()
end = start + timeout
is_running = self.is_running()
while not is_running and time.time() < end:
time.sleep(1)
is_running = self.is_running()
if not is_running:
if self.container.node.client.nft.rule_exists(self.port):
self.container.node.client.nft.drop_port(self.port)
raise RuntimeError('Failed to start grafana.')
def add_data_source(self, database, name, ip, port, count):
data = {
'type': 'influxdb',
'access': 'proxy',
'database': database,
'name': name,
'url': 'http://%s:%u' % (ip, port),
'user': 'admin',
'password': 'passwd',
'default': True,
}
now = time.time()
while time.time() - now < 10:
try:
self.client.addDataSource(data)
if len(self.client.listDataSources()) == count + 1:
continue
break
except requests.exceptions.ConnectionError:
time.sleep(1)
pass
def delete_data_source(self, name):
count = len(self.client.listDataSources())
now = time.time()
while time.time() - now < 10:
try:
self.client.deleteDataSource(name)
if len(self.client.listDataSources()) == count - 1:
continue
break
except requests.exceptions.ConnectionError:
time.sleep(1)
pass
| 30.969466 | 133 | 0.559034 |
acde76eeed500368246f9dfcdf01f7682caf163b | 3,690 | py | Python | src/classes/dino.py | lauramorillo/pyRex | 193692a0d9f9e589599b4714416995c21d6fb1d1 | [
"MIT"
] | null | null | null | src/classes/dino.py | lauramorillo/pyRex | 193692a0d9f9e589599b4714416995c21d6fb1d1 | [
"MIT"
] | 8 | 2020-01-28T22:32:05.000Z | 2022-02-10T00:09:55.000Z | src/classes/dino.py | lauramorillo/pyRex | 193692a0d9f9e589599b4714416995c21d6fb1d1 | [
"MIT"
] | 2 | 2018-11-24T00:58:22.000Z | 2019-07-09T22:11:53.000Z | import os
import sys
import pygame
import random
import pygame
from pygame import *
from src.utils.images import load_image, load_sprite_sheet
from src.utils.numeric import extractDigits
from src.utils.sounds import *
class Dino():
def __init__(self, screen, sizex=-1,sizey=-1, scr_size=(600,150)):
self.screen = screen
self.scr_width = scr_size[0]
self.scr_height = scr_size[1]
self.gravity = 0.6
self.images,self.rect = load_sprite_sheet('dino.png',5,1,sizex,sizey,-1)
self.images1,self.rect1 = load_sprite_sheet('dino_ducking.png',2,1,59,sizey,-1)
self.rect.bottom = int(0.98*self.scr_height)
self.rect.left = self.scr_width/15
self.image = self.images[0]
self.index = 0
self.counter = 0
self.score = 0
self.isJumping = False
self.isDead = False
self.isDucking = False
self.isBlinking = False
self.movement = [0,0]
self.jumpSpeed = 11.5
self.last_instruction = 0
self.JUMP_ORDER = 2
self.DUCK_ORDER = 1
self.stand_pos_width = self.rect.width
self.duck_pos_width = self.rect1.width
def draw(self):
self.screen.blit(self.image,self.rect)
def checkbounds(self):
if self.rect.bottom > int(0.98*self.scr_height):
self.rect.bottom = int(0.98*self.scr_height)
self.isJumping = False
def jump(self):
if self.rect.bottom == int(0.98 * self.scr_height):
self.isJumping = True
if pygame.mixer.get_init() != None:
jump_sound.play()
self.movement[1] = - self.jumpSpeed
def duck(self):
if not (self.isJumping and self.isDead):
self.isDucking = True
def stand_up(self):
self.isDucking = False
def autopilot(self, action):
if action != self.last_instruction:
if action == self.JUMP_ORDER:
self.jump()
elif action == self.DUCK_ORDER:
self.duck()
else:
self.stand_up()
self.last_instruction = action
def check_collision(self, obj):
if pygame.sprite.collide_mask(self, obj):
self.isDead = True
if pygame.mixer.get_init() != None:
die_sound.play()
def update(self):
if self.isJumping:
self.movement[1] = self.movement[1] + self.gravity
if self.isJumping:
self.index = 0
elif self.isBlinking:
if self.index == 0:
if self.counter % 400 == 399:
self.index = (self.index + 1)%2
else:
if self.counter % 20 == 19:
self.index = (self.index + 1)%2
elif self.isDucking:
if self.counter % 5 == 0:
self.index = (self.index + 1)%2
else:
if self.counter % 5 == 0:
self.index = (self.index + 1)%2 + 2
if self.isDead:
self.index = 4
if not self.isDucking:
self.image = self.images[self.index]
self.rect.width = self.stand_pos_width
else:
self.image = self.images1[(self.index)%2]
self.rect.width = self.duck_pos_width
self.rect = self.rect.move(self.movement)
self.checkbounds()
if not self.isDead and self.counter % 7 == 6 and self.isBlinking == False:
self.score += 1
if self.score % 100 == 0 and self.score != 0:
if pygame.mixer.get_init() != None:
checkPoint_sound.play()
self.counter = (self.counter + 1) | 31.538462 | 87 | 0.556369 |
acde7742a46725d35bfe1278ffbe738989f5c244 | 6,796 | py | Python | sto/ethereum/issuance.py | SiddharthMalhotra/sto | 7f3bab889b49e730b2fdd550546fd1e22d0d63d2 | [
"Apache-2.0"
] | null | null | null | sto/ethereum/issuance.py | SiddharthMalhotra/sto | 7f3bab889b49e730b2fdd550546fd1e22d0d63d2 | [
"Apache-2.0"
] | null | null | null | sto/ethereum/issuance.py | SiddharthMalhotra/sto | 7f3bab889b49e730b2fdd550546fd1e22d0d63d2 | [
"Apache-2.0"
] | null | null | null | """Issuing out tokenised shares."""
from decimal import Decimal
from logging import Logger
import colorama
import requests
from tqdm import tqdm
from sto.ethereum.txservice import EthereumStoredTXService, verify_on_etherscan
from sto.ethereum.utils import get_abi, check_good_private_key, create_web3
from sto.ethereum.exceptions import BadContractException
from sto.models.implementation import BroadcastAccount, PreparedTransaction
from sqlalchemy.orm import Session
from typing import Union, Optional, List, Iterable
from web3 import Web3
from web3.exceptions import BadFunctionCallOutput
class NeedAPIKey(RuntimeError):
pass
def deploy_token_contracts(logger: Logger,
dbsession: Session,
network: str,
ethereum_node_url: Union[str, Web3],
ethereum_abi_file: Optional[str],
ethereum_private_key: Optional[str],
ethereum_gas_limit: Optional[int],
ethereum_gas_price: Optional[int],
name: str,
symbol: str,
url: str,
amount: int,
transfer_restriction: str):
"""Issue out a new Ethereum token."""
assert type(amount) == int
decimals = 18 # Everything else is bad idea
check_good_private_key(ethereum_private_key)
abi = get_abi(ethereum_abi_file)
web3 = create_web3(ethereum_node_url)
# We do not have anything else implemented yet
assert transfer_restriction == "unrestricted"
service = EthereumStoredTXService(network, dbsession, web3, ethereum_private_key, ethereum_gas_price, ethereum_gas_limit, BroadcastAccount, PreparedTransaction)
# Deploy security token
note = "Deploying token contract for {}".format(name)
deploy_tx1 = service.deploy_contract("SecurityToken", abi, note, constructor_args={"_name": name, "_symbol": symbol, "_url": url}) # See SecurityToken.sol
# Deploy transfer agent
note = "Deploying unrestricted transfer policy for {}".format(name)
deploy_tx2 = service.deploy_contract("UnrestrictedTransferAgent", abi, note)
# Set transfer agent
note = "Making transfer restriction policy for {} effective".format(name)
contract_address = deploy_tx1.contract_address
update_tx1 = service.interact_with_contract("SecurityToken", abi, contract_address, note, "setTransactionVerifier", {"newVerifier": deploy_tx2.contract_address})
# Issue out initial shares
note = "Creating {} initial shares for {}".format(amount, name)
contract_address = deploy_tx1.contract_address
amount_18 = int(amount * 10**decimals)
update_tx2 = service.interact_with_contract("SecurityToken", abi, contract_address, note, "issueTokens", {"value": amount_18})
logger.info("Prepared transactions for broadcasting for network %s", network)
logger.info("STO token contract address will be %s%s%s", colorama.Fore.LIGHTGREEN_EX, deploy_tx1.contract_address, colorama.Fore.RESET)
return [deploy_tx1, deploy_tx2, update_tx1, update_tx2]
def contract_status(logger: Logger,
dbsession: Session,
network: str,
ethereum_node_url: str,
ethereum_abi_file: str,
ethereum_private_key: str,
ethereum_gas_limit: str,
ethereum_gas_price: str,
token_contract: str):
"""Poll STO contract status."""
abi = get_abi(ethereum_abi_file)
web3 = create_web3(ethereum_node_url)
service = EthereumStoredTXService(network, dbsession, web3, ethereum_private_key, ethereum_gas_price, ethereum_gas_limit, BroadcastAccount, PreparedTransaction)
contract = service.get_contract_proxy("SecurityToken", abi, token_contract)
try:
logger.info("Name: %s", contract.functions.name().call())
logger.info("Symbol: %s", contract.functions.symbol().call())
supply = contract.functions.totalSupply().call()
human_supply = Decimal(supply) / Decimal(10 ** contract.functions.decimals().call())
raw_balance = contract.functions.balanceOf(service.get_or_create_broadcast_account().address).call()
normal_balance = Decimal(raw_balance) / Decimal(10 ** contract.functions.decimals().call())
logger.info("Total supply: %s", human_supply)
logger.info("Decimals: %d", contract.functions.decimals().call())
logger.info("Owner: %s", contract.functions.owner().call())
logger.info("Broadcast account token balance: %f", normal_balance)
logger.info("Transfer verified: %s", contract.functions.transferVerifier().call())
except BadFunctionCallOutput as e:
raise BadContractException("Looks like this is not a token contract address. Please check on EtherScan that the address presents the token contract")
return {
"name": contract.functions.name().call(),
"symbol": contract.functions.symbol().call(),
"totalSupply": contract.functions.totalSupply().call(),
"broadcastBalance": raw_balance,
}
def verify_source_code(logger: Logger,
dbsession: Session,
network: str,
etherscan_api_key: str,
):
"""Verify source code of all unverified deployment transactions."""
if not etherscan_api_key:
raise NeedAPIKey("You need to give EtherScan API key in the configuration file. Get one from https://etherscan.io")
unverified_txs = dbsession.query(PreparedTransaction).filter_by(verified_at=None, result_transaction_success=True, contract_deployment=True)
logger.info("Found %d unverified contract deployments on %s", unverified_txs.count(), network)
if unverified_txs.count() == 0:
logger.info("No transactions to verify.")
return []
unverified_txs = list(unverified_txs)
# HTTP keep-alive
session = requests.Session()
# https://stackoverflow.com/questions/41985993/tqdm-show-progress-for-a-generator-i-know-the-length-of
for tx in unverified_txs: # type: _PreparedTx
logger.info("Verifying %s for %s", tx.contract_address, tx.human_readable_description)
verify_on_etherscan(logger, network, tx, etherscan_api_key, session)
dbsession.commit() # Try to minimise file system sync issues
return unverified_txs
def past_issuances(logger: Logger, dbsession: Session) -> Iterable[PreparedTransaction]:
"""Get list of past issuances."""
txs = dbsession.query(PreparedTransaction).filter_by(contract_deployment=True)
for tx in txs:
if tx.is_token_contract_deployment():
yield tx
| 42.475 | 165 | 0.683343 |
acde780aa05115b2bb4a7a211d8c22602f8cd44c | 3,625 | py | Python | VirtualBox-5.0.0/src/VBox/GuestHost/OpenGL/packer/opcodes.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | 1 | 2015-04-30T14:18:45.000Z | 2015-04-30T14:18:45.000Z | VirtualBox-5.0.0/src/VBox/GuestHost/OpenGL/packer/opcodes.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | null | null | null | VirtualBox-5.0.0/src/VBox/GuestHost/OpenGL/packer/opcodes.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | null | null | null | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
# This script generates include/cr_opcodes.h from the gl_header.parsed file.
import sys;
import cPickle;
import string;
import re;
import apiutil
apiutil.CopyrightC()
print ""
print "/* DO NOT EDIT - THIS FILE GENERATED BY THE opcodes.py SCRIPT */"
print ""
print "#ifndef CR_OPCODES_H"
print "#define CR_OPCODES_H"
print ""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
assert len(keys) > 0
print "/* Functions with no return value and input-only parameters */"
print "typedef enum {"
enum_index = 0
for func in keys:
if "pack" in apiutil.ChromiumProps(func):
print "\t%s = %d," % ( apiutil.OpcodeName(func), enum_index )
enum_index = enum_index + 1
print "\tCR_EXTEND_OPCODE=%d," % enum_index
enum_index = enum_index + 1
print "\tCR_CMDBLOCKBEGIN_OPCODE=%d," % enum_index
enum_index = enum_index + 1
print "\tCR_CMDBLOCKEND_OPCODE=%d," % enum_index
enum_index = enum_index + 1
print "\tCR_CMDBLOCKFLUSH_OPCODE=%d," % enum_index
print "\tCR_NOP_OPCODE=255"
if enum_index > 254:
# This would have saved Mike some grief if it had been here earlier.
print >> sys.stderr, "You have more than 255 opcodes! You've been adding functions to"
print >> sys.stderr, "glapi_parser/APIspec! Each new function you add"
print >> sys.stderr, "gets an opcode assigned to it. Fortunately for you, we have"
print >> sys.stderr, "an ``extend'' opcode. Please mark the function as"
print >> sys.stderr, "'extpack' in APIspec so as to keep the main opcode pool"
print >> sys.stderr, "less than 255! THIS IS A CATASTROPHIC FAILURE, and I WILL NOT CONTINUE!"
print >> sys.stderr, "I'm putting an error in the generated header file so you won't miss"
print >> sys.stderr, "this even if you're doing a 'make -k.'"
print "#error -- more than 255 opcodes!"
sys.exit(-1)
print "} CROpcode;\n"
# count up number of extended opcode commands
num_extends = 0
num_auto_codes = 0
for func in keys:
if "extpack" in apiutil.ChromiumProps(func):
num_extends += 1
if apiutil.ChromiumRelOpCode(func) < 0:
num_auto_codes += 1
# sanity check for compatibility breakage
# we currently have 304
if num_auto_codes != 304:
print >> sys.stderr, "number of auto-generated op-codes should be 304, but is " + str(num_auto_codes)
print >> sys.stderr, "which breaks backwards compatibility"
print >> sys.stderr, "if this is really what you want to do, please adjust this script"
print >> sys.stderr, "to handle a new auto-generated opcodes count"
print "#error -- num_auto_codes should be 304, but is " + str(num_auto_codes)
sys.exit(-1)
print "/* Functions with a return value or output parameters */"
print "typedef enum {"
opcode_index = 0
enum_index = 0
chrelopcodes = {}
for func in keys:
if "extpack" in apiutil.ChromiumProps(func):
opcodeName = apiutil.ExtendedOpcodeName(func)
chrelopcode = apiutil.ChromiumRelOpCode(func)
opcode = -1
if chrelopcode >= 0:
if not chrelopcode in chrelopcodes.keys():
chrelopcodes[chrelopcode] = chrelopcode
else:
print >> sys.stderr, "non-unique chrelopcode: " + str(chrelopcode)
print "#error -- non-unique chrelopcode: " + str(num_auto_codes)
sys.exit(-1)
opcode = num_auto_codes + chrelopcode
else:
opcode = opcode_index
opcode_index = opcode_index + 1
if enum_index != num_extends-1:
print "\t%s = %d," % (opcodeName, opcode )
else:
print "\t%s = %d" % (opcodeName, opcode )
enum_index = enum_index + 1
print "} CRExtendOpcode;\n"
print "#endif /* CR_OPCODES_H */"
| 33.878505 | 102 | 0.720276 |
acde78288979b62b77976f83dada62f4422b14c6 | 7,405 | py | Python | parse_file.py | ames0k0/TestWork | 76bfa7bbe8fd6ec076704bbd5407269f4f87f5c4 | [
"MIT"
] | null | null | null | parse_file.py | ames0k0/TestWork | 76bfa7bbe8fd6ec076704bbd5407269f4f87f5c4 | [
"MIT"
] | null | null | null | parse_file.py | ames0k0/TestWork | 76bfa7bbe8fd6ec076704bbd5407269f4f87f5c4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# __author__ = 'kira@-築城院 真鍳'
import re #----------------------------#
from string import ascii_letters #-----#
from random import randint, choice #---#
from os.path import exists #-----------#
from collections import Counter, deque #
"""
@args@ -> _file : path_to_file,
name : name_of_variable
in_place: open_current_file_to_parse or find_file
_sort : sort words
beautify: symbols in one line
@input@
word_list.txt:
A
AA
AAAA
~~~~~~~
@call_func@
convert file in place word_list.txt -> word_list.py:
from parse_file import convert_to_list
convert_to_list(__file__, 'list_of_words', in_place=True, _sort=False)
'''
A
AA
AAAA
~~~~~~~
'''
@call_func@
find file and convert:
convert_to_list(path_to_file, 'list_of_words', in_place=False, _sort=False)
@LIST output@
# len(list_of_words) -> ??
list_of_words = [
A, AA, AAA, ~~~~~~~
]
@DICT output@
# len(dict_of_words) -> ??
dict_of_words = {
'A': [
'A', 'AA', 'AAA'
],
'~~~~~~~': [
...
]
}
"""
def generate_random_words(flag, in_place=False, wlen=50):
# http://members.optusnet.com.au/charles57/Creative/Techniques/random_words.html
"""
@args@ -> flag : list, dict
in_place: generate python file with words
wlen : count of random words
python3.6 : random.choices(list_of_words, k=random.randint(1, 9))
"""
result = ""
may_words = "\n"
letters = list(ascii_letters)
for _ in range(wlen):
for _ in range(randint(5, 9)):
may_words += choice(letters)
result += may_words
may_words = "\n"
if in_place:
_c = """convert_to_{0}(__file__, '{0}_of_words', in_place=True)\n'''""".format(flag)
_i = "from parse_file import convert_to_{}\n".format(flag)
result = "{}{}{}\n'''".format(_i, _c, result)
_file = 'word_list.py'
else:
_file = 'word_list.txt'
with open(_file, 'w') as random_f:
random_f.write(result)
def convert_to_list(_file, name="list_of_words", in_place=False, _sort=True, beautify=100):
if exists(_file):
# i don't know what it calls, i'll call it @baeos: begin and end of string
baeos = ["'''", '"""']
source = ""
with open(_file, 'r') as _f:
in_file = _f.readlines()
flag = False
for get_line in in_file:
if in_place:
# remove python script
get_flag = get_line.strip()
if not flag:
if get_flag in baeos:
flag = True
else:
if get_flag not in baeos:
source += get_line
else:
source += get_line
result = ""
# remove symbols
word = re.compile(r'\w+')
words = word.findall(source)
wlen = len(words)
# make limit words in line
# len('word') \ (100 symbols|beautify)
get_minimum = str(Counter([len(w) for w in words]).most_common(3))
# [(6, 16), (5, 12), (4, 6)] -> '(6(5(4' -> 654
lens_of_most_common = "".join(re.findall(r'\(\d', get_minimum))[1::2]
# ignore words with len 1
start, _, end = [int(lens) for lens in lens_of_most_common]
leno = end if (end - start) > start else start
# len(", ") == 2
line = int(beautify / (leno * 2))
if _sort:
words.sort()
# [['w1', 'w2', 'w3'], ['w4', 'w5', 'w6'], ... ]
words = deque(words)
lines = [
["'{}'".format(words.popleft()) for _ in range(line)] for _ in range(int(wlen/line))
]
# create list with name
tab = "\n "
result += "# len({0}) -> {1}\n{0} = [{2}".format(name, wlen, tab)
for idx, lbl in enumerate(lines):
result += ", ".join(lbl)
if idx < (len(lines) - 1):
result += tab
# add tab if list not empty
if words:
result += tab
result += ", ".join(["'{}'".format(wstr) for wstr in words])
result += "\n]"
# rewrite file
with open(_file, 'w') as _f:
_f.write(result)
print("\n[+] Parsed, open file: {}\n".format(_file))
else:
# generate file of words
# False: .txt file, True: .py file
generate_random_words('list', False)
print("\n[!] {0}: not exists! Generated file: {0}, try again\n".format(_file))
def convert_to_dict(_file, name="list_of_words", in_place=False, _sort=True, beautify=95):
if exists(_file):
baeos = ["'''", '"""']
source = ""
with open(_file, 'r') as _f:
in_file = _f.readlines()
flag = False
for get_line in in_file:
if in_place:
get_flag = get_line.strip()
if not flag:
if get_flag in baeos:
flag = True
else:
if get_flag not in baeos:
source += get_line
else:
source += get_line
result = ""
word = re.compile(r'\w+')
words = word.findall(source)
wlen = len(words)
get_minimum = str(Counter([len(w) for w in words]).most_common(3))
lens_of_most_common = "".join(re.findall(r'\(\d', get_minimum))[1::2]
start, _, end = [int(lens) for lens in lens_of_most_common]
if (end - start) > start:
leno = end
else:
leno = start
line = int(beautify / (leno * 2))
keys = list(set("".join([w[0] for w in words])))
if _sort:
keys.sort()
lines = []
for key in keys:
nword = []
delidx = []
for idx, w in enumerate(words[:]):
if w[0] == key:
nword.append('{}'.format(w))
delidx.append(idx)
delidx.sort()
for dx in reversed(delidx):
del words[dx]
lines.append(nword)
del words
tab = "\n "
result += "# len({0}) -> {1}\n{0} = %s{2}".format(name, wlen, tab) % '{'
for idx, (k, lbl) in enumerate(zip(keys, lines)):
result += "'{}': [{}".format(k, "%s " % tab)
in_line = 0
llen = len(lbl)
while in_line < llen:
result += ", ".join(["'{}'".format(wstr) for wstr in lbl[in_line:in_line+line]])
in_line += line
if in_line < llen:
result += "%s " % tab
result += "{}],".format(tab)
if idx < len(lines) - 1:
result += tab
result += "\n}"
with open(_file, 'w') as _f:
_f.write(result)
print("\n[+] Parsed, open file: {}\n".format(_file))
else:
generate_random_words('dict', False)
print("\n[!] {0}: not exists! Generated file: {0}, try again\n".format(_file))
if __name__ == "__main__":
pass
# generated file (you) importing funcs, and nah need to execute it
# convert_to_dict('word_list.txt', 'girl_names', False, True)
# convert_to_list('word_list.txt', 'girl_names', False, True)
| 29.039216 | 96 | 0.498852 |
acde7837429711390ce5827370e73e15c62952a9 | 476 | py | Python | intent_parser/setup.py | SD2E/experimental-intent-parser-mw | ebe0358f639505cb281a3e0c1c94e79a88d504e7 | [
"BSD-3-Clause"
] | null | null | null | intent_parser/setup.py | SD2E/experimental-intent-parser-mw | ebe0358f639505cb281a3e0c1c94e79a88d504e7 | [
"BSD-3-Clause"
] | null | null | null | intent_parser/setup.py | SD2E/experimental-intent-parser-mw | ebe0358f639505cb281a3e0c1c94e79a88d504e7 | [
"BSD-3-Clause"
] | null | null | null | import setuptools
setuptools.setup(
name='intent-parser',
version='2.5.1',
packages=setuptools.find_packages(),
python_requires='>=3.6',
include_package_data=True, # include everything in source control
entry_points={
'console_scripts': [
'intent_parser_server = intent_parser.server.intent_parser_server:main',
'intent_parser.addons.ip_addon_script = intent_parser.addons.ip_addon_script:main',
],
},
) | 31.733333 | 95 | 0.678571 |
acde78395cd4b9e18514ca145ef74614beeb4d21 | 4,610 | py | Python | codegen/boolean_and_test.py | konkked/Underscore.cs | 02668b93aa59d44c103eb421f84e4c331b7f623f | [
"MIT"
] | 22 | 2015-01-29T05:42:49.000Z | 2021-04-30T17:49:31.000Z | codegen/boolean_and_test.py | konkked/Underscore.cs | 02668b93aa59d44c103eb421f84e4c331b7f623f | [
"MIT"
] | 47 | 2015-01-27T23:42:26.000Z | 2016-11-17T15:12:00.000Z | codegen/boolean_and_test.py | konkked/Underscore.cs | 02668b93aa59d44c103eb421f84e4c331b7f623f | [
"MIT"
] | 1 | 2017-03-03T04:45:13.000Z | 2017-03-03T04:45:13.000Z | # script to generate the tests for Function.Boolean.Negate(),
# because there are ~40 of them
import sys
import os
def lowercase_letters():
return list(map(chr, range(97, 123)))
def generate_all_test_cases():
retval = ''
for i in range(0, 17):
retval += generate_test_case_pair(i)
return retval
def generate_test_case_pair(num_params):
return generate_test_case(True, num_params, False) + generate_test_case(False, num_params, False) + generate_test_case(True, num_params, True)
def generate_test_case(input_bool, num_params, mixed):
lambda_vars = lowercase_letters()[:num_params]
cap_bool_name = 'True' if input_bool else 'False'
lower_bool_name = 'true' if input_bool else 'false'
opposite_bool = 'false' if input_bool else 'true'
num_param_str = str(num_params) if num_params > 0 else 'No'
argument_count_str = 'Arguments_' if num_params != 1 else 'Argument_'
mixed_str = 'Mixed' if mixed else ''
object_declaration = '\t\t\t// this is just used to fill params\n\t\t\tvar obj = new object();\n' if num_params > 0 else ''
retval = '\t\t[TestMethod]\n'
retval += '\t\tpublic void Function_Boolean_And_' + num_param_str + argument_count_str + cap_bool_name + 'Input' + mixed_str + '()\n'
retval += '\t\t{\n'
retval += object_declaration
retval += '\t\t\tvar funcsToCombine = new List<Func<'
# add an object to the func type signature for each param we're using
for i in range(0, num_params):
retval += 'object, '
# add a bool for the return type
retval += 'bool>>\n'
retval += '\t\t\t{\n'
# enter multiple lambdas
for i in range(0, 4):
retval += '\t\t\t\t('
# put all the var names into arg declaration except the last one so we can have proper syntax
for j in range(0, len(lambda_vars) - 1):
retval += lambda_vars[j] + ', '
lastArg = lambda_vars[-1] if num_params > 0 else ''
bool_val = lower_bool_name if not (mixed and i == 2) else opposite_bool
retval += lastArg + ') => (wasCalled[' + str(i) + '] = true) && ' + bool_val + ',\n'
retval += '\t\t\t};\n'
retval += '\n'
retval += '\t\t\tvar combined = component.And(funcsToCombine[0], funcsToCombine[1], funcsToCombine[2], funcsToCombine[3]);\n'
retval += '\n'
assert_val = cap_bool_name if not mixed else 'False'
retval += '\t\t\tAssert.Is' + assert_val + '(combined('
for i in range(0, num_params - 1):
retval += 'obj, '
lastArg = 'obj' if num_params > 0 else ''
retval += lastArg + '));\n'
# the first function always gets called
retval += '\t\t\tAssert.IsTrue(wasCalled[0]);\n'
if input_bool:
for i in range(1, 3):
retval += '\t\t\tAssert.IsTrue(wasCalled[' + str(i) + ']);\n'
else:
for i in range(1, 3):
retval += '\t\t\tAssert.IsFalse(wasCalled[' + str(i) + ']);\n'
last_was_called = 'True' if not mixed and input_bool else 'False'
retval += '\t\t\tAssert.Is' + last_was_called + '(wasCalled[' + str(3) + ']);\n'
# close function block
retval += '\t\t}\n'
retval += '\n'
return retval
def test_file():
return os.path.join(os.path.dirname(__file__), '..', 'Underscore.Test', 'Function', 'Boolean', 'AndTest.cs')
def write_to_file(output):
with open(test_file(), 'w') as f:
f.write(output)
# so we avoid global scope
def main():
output = '';
# includes
output += 'using System;\n'
output += 'using Microsoft.VisualStudio.TestTools.UnitTesting;\n'
output += 'using System.Collections.Generic;\n'
output += 'using Underscore.Function;\n'
output += '\n'
output += 'namespace Underscore.Test.Boolean\n'
# declare our class and our helper function
output += '{\n'
output += '\t// Generated using /codegen/boolean_and_test.py\n'
output += '\t[TestClass]\n'
output += '\tpublic class AndTest\n'
output += '\t{\n'
# declare the initialization variables
output += '\t\tprivate AndComponent component;\n'
output += '\t\tprivate bool[] wasCalled;\n'
output += '\n'
output += '\t\t[TestInitialize]\n'
output += '\t\tpublic void Initialize()\n'
output += '\t\t{\n'
output += '\t\t\tcomponent = new AndComponent();\n'
output += '\t\t\twasCalled = new[] {false, false, false, false};\n'
output += '\t\t}\n'
output += '\n'
# generate the actual test cases
output += generate_all_test_cases()
# end class block
output += '\t}\n'
# end namespace block
output += '}'
write_to_file(output)
main()
| 32.928571 | 146 | 0.621909 |
acde78ddaf40b74856327f3322648c28bccacecb | 857 | py | Python | climatology/clim/setup.py | tloubrieu-jpl/incubator-sdap-nexus | 5bf903f04f12eb27f25ea2aa738c617ca404a87b | [
"Apache-2.0"
] | 17 | 2017-11-16T07:36:33.000Z | 2021-11-07T00:02:20.000Z | climatology/clim/setup.py | ifenty/incubator-sdap-nexus | 3059c66f53d3f3d24c74d557c7632bdcc7f1eeec | [
"Apache-2.0"
] | 35 | 2018-01-11T00:50:20.000Z | 2022-03-17T23:08:07.000Z | climatology/clim/setup.py | ifenty/incubator-sdap-nexus | 3059c66f53d3f3d24c74d557c7632bdcc7f1eeec | [
"Apache-2.0"
] | 25 | 2017-11-16T07:36:38.000Z | 2022-02-03T20:48:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
setup(name='clim',
version='0.1dev0')
| 37.26087 | 74 | 0.767795 |
acde797d930285433d745f864ad499ab8a8f29d1 | 3,037 | py | Python | contrib/linearize/linearize-hashes.py | CaveSpectre11/GambleCoin | 845f13ee071be1c9834e742eef1658fba437dd61 | [
"MIT"
] | 6 | 2018-03-29T00:11:06.000Z | 2019-03-27T11:46:34.000Z | contrib/linearize/linearize-hashes.py | CaveSpectre11/GambleCoin | 845f13ee071be1c9834e742eef1658fba437dd61 | [
"MIT"
] | 26 | 2019-04-01T17:50:42.000Z | 2019-07-21T00:29:14.000Z | contrib/linearize/linearize-hashes.py | CaveSpectre11/GambleCoin | 845f13ee071be1c9834e742eef1658fba437dd61 | [
"MIT"
] | 8 | 2018-04-20T11:52:21.000Z | 2019-03-19T11:51:57.000Z | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 12010
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 26.640351 | 90 | 0.682581 |
acde79b46902910e0ba4801f06e246369e347926 | 17,968 | py | Python | latency_reliability_test/CastThread.py | marcus-nystrom/share-gaze | 09fc693f53fca164ed8c0e5f08580beac1cb675d | [
"MIT"
] | 5 | 2016-03-25T14:15:14.000Z | 2021-11-02T13:02:08.000Z | latency_reliability_test/CastThread.py | marcus-nystrom/share-gaze | 09fc693f53fca164ed8c0e5f08580beac1cb675d | [
"MIT"
] | null | null | null | latency_reliability_test/CastThread.py | marcus-nystrom/share-gaze | 09fc693f53fca164ed8c0e5f08580beac1cb675d | [
"MIT"
] | 1 | 2021-05-27T05:33:42.000Z | 2021-05-27T05:33:42.000Z | # Class to multicast (send and receive) eye gaze or simulated gaze in real time in a separate thread
# and also to syncronize clocks and check time diffs against clients (initiated by teachers PC)
# Clients may connect and disconnect to a session ad hock.
# ==================================================================================================
#
# Usage Notes: See client_xyCastThread.py as a sample on how to use this class.
#
# Example Usage in short:
#
# import CastThread
# etThread = CastThread.xEyeTrack()
# dataMyET = etThread.receiveNoBlock()
# multicastThread = CastThread.MultiCast()
# multicastThread.send(data)
# dataOtherET = multicastThread.receiveNoBlock()
# #do something ...
# multicastThread.stop()
#
# DC Server stuff (run from Teacher PC):
# time_diff, ip = send_to_set_time(time2set)
# run_time_check_loop
#
# 2012-04-17: v0.1: initial implementation
# 2015-04-01: v1.0: full implementation tested
# 2015-06-20: v1.1: time stamping using G_dll.dll
# 2015-09-11: v1.2: removed G_dll.dll time stamping, using run method to read fast
#
# Started by Michael MacAskill <michael.macaskill@nzbri.org>
# Changed by Henrik Garde, Humanities Lab, LU <henrik.garde@humlab.lu.se>
# Marcus Nystrom, Humanities Lab, LU <marcus.nystrom@humlab.lu.se>
# Diederick Niehorster, Humanities Lab, LU <diederick_c.niehorster@humlab.lu.se>
# - incorporates code from IOhub written by Sol Simpson
import threading # this class is a thread sub-class
import socket
import time
import datetime
from psychopy import core, event, visual, monitors, misc
import numpy as np
import sys
import struct
import os
from collections import deque
# 26 colors from The Colour Alphabet Project suggested by Paul Green-Armytage
# designed for use with white background:
col = (240,163,255),(0,117,220),(153,63,0),(76,0,92),(25,25,25),\
(0,92,49),(43,206,72),(255,204,153),(128,128,128),(148,255,181),\
(143,124,0),(157,204,0),(194,0,136),(0,51,128),(255,164,5),\
(255,168,187),(66,102,0),(255,0,16),(94,241,242),(0,153,143),\
(224,255,102),(116,10,255),(153,0,0),(255,255,128),(255,255,0),(255,80,5)
def get26colors(i):
if i > 25:
i = 0
return '#%02x%02x%02x' % col[i]
# for timestamping. Don't use Unix epoch but something
# recent so we have enough precision in the returned timestamp
epoch = datetime.datetime(2015, 9, 1, 2)
class xSmiEyeTrack(threading.Thread):
# initialise with defaults:
def __init__(self, port=5555, iViewIP="192.168.0.1", iViewPort=4444):
# UDP port to listen for iView data on.
# Set iView software to duplicate stream to this port number so that we don't conflict with
# the listening and sending on the main port number.
# Ports that we send and receive on:
self.port = port
self.iViewPort = iViewPort
# address to send some messages to iView
self.iViewIP = iViewIP
# Bind to all interfaces:
self.host = '0.0.0.0'
# Setup the socket:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# The size of the buffer we use for receiving:
self.buffer = 4096
# Bind to the local host and port
self.sock.bind((self.host, self.port))
# get iView to start streaming data
self.send('ET_FRM "%ET %TU %SX %SY"') # set the format of the datagram (see iView manual)
self.send('ET_STR') # start streaming (can also add optional integer to specify rate)
# create self as a thread
threading.Thread.__init__(self)
self.__stop = False
def getTimestamp(self):
return (datetime.datetime.now() - epoch).total_seconds()
#HG: Maybe '.utcnow.' is needed for pos diff tz on clients unless 2 overwrites the time zone
# total_seconds() is equivalent to
# (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 computed with true division enabled.
def disconnect(self):
pass
def stop_recording(self):
pass
class iViewEyeTrack(threading.Thread):
# initialise with own IP address and an optional standard calibration based on a PsychoPy win window:
# mon = monitors.Monitor('default')
# mon.setWidth(53.2) # Width of screen (cm)
# mon.setDistance(65) # Distance eye monitor (cm)
# screenSize = [1680, 1050] # 'debug';: 800,600'
# win = visual.Window(screenSize, fullscr=False, allowGUI=False, color=(0, 0, 0), monitor=mon, units='deg', screen=0)
def __init__(self, mon, screenSize=[1680, 1050], skip_ringbuf=True,
port=5555, myIP=socket.gethostbyname(socket.gethostname()), iViewPort=4444,
pc_setup='one_PC',
calib_skip=False, calib_instruction_text='', calib_bg_color=128, calib_fg_color=64,
win=None):
from iView import iview_SDK, iViewXAPI
"""
above: 'None' inserted 20150422 by hg instead of:
visual.Window(size=(800, 600),
fullscr=False,
allowGUI=False,
color=(0, 0, 0),
monitor=monitors.Monitor('default'),
units='deg',
screen=0)
"""
# TODO??: (Henrik)
# UDP port to listen for iView data on.
# Set iView software to duplicate stream to this port number so that we don't conflict with
# the listening and sending on the main port number.
self.mon = mon
self.mon.setWidth(53.2) # Width of screen (cm)
self.mon.setDistance(65) # Distance eye monitor (cm)
self.screenSize = screenSize
self.skip_ringbuf = skip_ringbuf
#misc (Henrik)
self.my_ip = socket.gethostbyname(socket.gethostname())
self.i = 0
self.msg_i = 0
self.x = 0
self.y = 0
self.state = 0
self.res = 0
# Create an instance of the eye tracker class (connects automatically)
self.et = iview_SDK.mySDK_class(computer_setup=pc_setup) # Also initializes the eye tracker
# Calibrate and validate the eye tracker
if not calib_skip:
# Create PsychoPy window and text to show stimulus for calibration
# TODO: Consider moving more text properties to class parameters
self.calib_text = visual.TextStim(win, text=calib_instruction_text, wrapWidth=20, height=0.5)
self.et.setup_calibration_parameters(bg_color=calib_bg_color, fg_color=calib_fg_color)
self.et.calibrate(win, self.calib_text)
# create self as a thread
threading.Thread.__init__(self)
self.__stop = False
def getTimestamp(self):
#return core.getTime()
return (datetime.datetime.now() - epoch).total_seconds()
#HG: Maybe '.utcnow.' is needed for pos diff tz on clients unless 2 overwrites the time zone
# total_seconds() is equivalent to
# (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 computed with true division enabled.
def start_recording(self):
self.et.start_recording()
def save_data(self, filename='test.idf'):
# Save data
self.et.save_data(filename)
def disconnect(self):
# Disconnect eye tracker
self.et.disconnect()
def stop_recording(self):
# Stop eye tracker after 1 second
core.wait(1.0)
self.et.stop_recording()
def next(self):
# Get samples
try:
self.res, self.x, self.y = self.get_valid_data_sample()
# print(self.res, self.x, self.y)
if self.res is not 1:
return 0, 0, 0
except Exception:
return 0, 0, 0
else:
return self.state, self.x, self.y
def nextMsg(self):
self.msg_i += 1
# call next() first to update x,y, state
message = ','.join((str(self.x),
str(self.y),
str(self.getTimestamp()),
str(self.msg_i),
str(self.state),
self.my_ip))
return message
def get_valid_data_sample(self):
"""
Get onscreen samples, and convert to degrees
Coordinate system is centered in the middle of the screen (0,0)
"""
# Get samples
x = 0
y = 0
res, sampleData = self.et.get_sample()
if res == 1:
# Make sure the the samples are on the screen
xt = sampleData.rightEye.gazeX
yt = sampleData.rightEye.gazeY
# Make sure the the samples are on the screen and valid
if np.any(xt <= 0 or xt > self.screenSize[0] or yt <= 0 or yt > self.screenSize[1]):
pass
else:
x = xt - self.screenSize[0]/2
y = -1 * (yt - self.screenSize[1]/2)
x = misc.pix2deg(x, self.mon)
y = misc.pix2deg(y, self.mon)
return res, x, y
class simTrack(threading.Thread):
# initialise:
def __init__(self, simType="pingpong", casting=True):
# A thread that streams a simulated track of x,y moving in optional patterns
# Default is 'pinpong', a dot pendling from -1 to plus 1 at 60 Hz.
# Type of data stream to generate
self.simType = simType
# Multicast data? True/False
self.casting = casting
# Iterator, stepsize and gaze direction
self.my_ip = socket.gethostbyname(socket.gethostname())
self.i = 0
self.msg_i = 0
self.x = 0
self.y = 0
self.x_step = 1.0 / 60.0 * 2.0
self.x_dir = 1
self.turn_i = 0
self.state = 0
self.state_changed = False
# Create self as a thread
threading.Thread.__init__(self)
self.__stop = False
def getTimestamp(self):
return (datetime.datetime.now() - epoch).total_seconds()
#HG: Maybe '.utcnow.' is needed for pos diff tz on clients unless 2 overwrites the time zone
# total_seconds() is equivalent to
# (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 computed with true division enabled.
def start_recording(self):
self.msg_i = 0
def nextMsg(self):
self.msg_i += 1
# call next() first to update x,y, state
message = ','.join((str(self.x),
str(self.y),
str(self.getTimestamp()),
str(self.msg_i),
str(self.state),
self.my_ip))
return message
def next(self):
self.x, turn_i = self.nextX()
# Visualize state change (like dot color change) every 10th second
if self.turn_i % 5 == 0:
if not self.state_changed:
self.state_changed = True
self.state = -1
else:
self.state_changed = False
self.state = 1
return self.state, self.x, self.y
def nextX(self):
# returns x in a veritcal motion + a flag counting the turns if 'pingpong'
if self.simType == "sinus":
self.x = self.sinus(self.i * 0.1)
else:
self.x, self.x_dir, self.turn_i \
= self.pingpong(self.x, self.x_step, self.x_dir, self.turn_i)
self.i += 0.1
return self.x, self.turn_i
def sinus(self, i):
return 0.5 * np.sin(i)
def pingpong(self, x, x_step, x_dir, turn_i):
# Animation for visual latency check
x -= x_step * x_dir
if x < -1.0:
x_dir *= -1 # change direction
turn_i += 1
if x > 1.0:
x_dir *= -1 # change direction
return x, x_dir, turn_i
def disconnect(self):
pass
def stop_recording(self):
pass
class MultiCast(threading.Thread):
# initialise:
def __init__(self, myIP="169.254.173.49", dcMultiPort=10000, dcMultiIP="224.0.0.9"):
self.myIP = myIP
# Ports that we send and receive on:
self.dcMultiPort = dcMultiPort
self.dcMultiIP = dcMultiIP
self.multicastGroup = (dcMultiIP, dcMultiPort)
# Create Socket object
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#TODO : Check meaning of this binding 'bind':
self.server_address = ('', 10000)
self.sock.bind(self.server_address)
# TODO: internet suggests to use bind to the self.multicastGroup. difference is that if
# ip is '', you get data from all multicast groups, if specific group ip, then you only get
# data from that group
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(dcMultiIP)
self.mreq = struct.pack('4sL', group, socket.INADDR_ANY)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, self.mreq)
# by defualt, don't receive own data:
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 0)
# blocking, but with non-infinite timeout
self.sock.settimeout(1)
#self.sock.setblocking(0)
self.buffer = 1024
# deque for client to read from
self.outputBuffer = deque()
#self.outputBuffer = None
# UDP port to listen and send x,y from sim or eyetracker data on.
# create self as a thread
threading.Thread.__init__(self)
self.__stop = False
def setReceiveOwn(self,receiveOwn):
if receiveOwn:
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
else:
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 0)
def getTimestamp(self):
return (datetime.datetime.now() - epoch).total_seconds()
#HG: Maybe '.utcnow.' is needed for pos diff tz on clients unless 2 overwrites the time zone
# total_seconds() is equivalent to
# (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 computed with true division enabled.
def receiveNoBlock(self):
# copied from iview.py
""" Get any data that has been received
If there is no data waiting it will return immediately
Returns data (or 0 if nothing)"""
self.sock.setblocking(0)
try:
data, addr = self.sock.recvfrom(1024)
except Exception:
return 0, 0
else:
return data, addr #return xy?
def receiveBlock(self,timeout=None):
# copied from iview.py
""" Get any data that has been received or wait until some is received
If there is no data waiting it will block until some is received
Returns data"""
if timeout:
self.sock.settimeout(timeout)
else:
self.sock.setblocking(1)
try:
data, addr = self.sock.recvfrom(self.buffer)
except Exception:
return 0, 0
except self.sock.timeout:
print >>sys.stderr, 'timed out, no more responses'
else:
return data, addr
def consumeAll(self):
ret = list()
while self.outputBuffer:
ret.append(self.outputBuffer.popleft())
return ret
def send(self, message):
# Sending own data: x,y,time,i,ip
try:
self.sock.sendto(message, self.multicastGroup)
except Exception:
print "Could not send UDP message"
#print(message)
def send_to_client(self, message,ip,port=None):
# Send data to specific client
try:
if port:
self.sock.sendto(message, (ip, self.dcMultiPort))
else:
self.sock.sendto(message, (ip, port))
except Exception:
print "Could not send UDP message"
#print(message)
# method which will run when the thread is called:
# this assumes socket's read mode is blocking and is not changed out from under us
def run(self):
i = 0
while True:
if self.__stop:
break
i += 1
try:
data, addr = self.sock.recvfrom(self.buffer)
self.outputBuffer.append((data,addr,self.getTimestamp()))
except Exception:
# TODO: when does this occur?
pass
except self.sock.timeout:
print >>sys.stderr, 'timed out, no more responses'
# so caller can ask for the thread to stop monitoring:
def stop(self):
#self.send("ET_EST") # tell iView to stop streaming
self.__stop = True # the run() method monitors this flag
def clean_up(self):
'''
Cleans up and makes sure the socket is ready for use next time
'''
# mreq = struct.pack('4sL', group, socket.INADDR_ANY)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, self.mreq)
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close() | 36.008016 | 121 | 0.570681 |
acde7b33d0610d1b3ee740bc0438a3ba46668ca2 | 1,558 | py | Python | DataStructures/Stack.py | nabiharaza/LabLearnings | 686f42d7bf4b0b339d42c3a34121f98a6c527f29 | [
"Apache-2.0"
] | 2 | 2018-10-17T19:11:04.000Z | 2018-10-21T14:49:11.000Z | DataStructures/Stack.py | nabiharaza/LabLearnings | 686f42d7bf4b0b339d42c3a34121f98a6c527f29 | [
"Apache-2.0"
] | null | null | null | DataStructures/Stack.py | nabiharaza/LabLearnings | 686f42d7bf4b0b339d42c3a34121f98a6c527f29 | [
"Apache-2.0"
] | null | null | null | """
stack.py
description: A linked stack (LIFO) implementation
"""
from node import LinkedNode
class Stack:
__slots__ = "top"
def __init__(self):
""" Create a new empty stack.
"""
self.top = None
def __str__(self):
""" Return a string representation of the contents of
this stack, top value first.
"""
result = "Stack["
n = self.top
while n != None:
result += " " + str(n.value)
n = n.link
result += " ]"
return result
def is_empty(self):
return self.top == None
def push(self, newValue):
self.top = LinkedNode(newValue, self.top)
def pop(self):
assert not self.is_empty(), "Pop from empty stack"
self.top = self.top.link
def peek(self):
assert not self.is_empty(), "peek on empty stack"
return self.top.value
insert = push
remove = pop
def test():
s = Stack()
print(s)
for value in 1, 2, 3:
s.push(value)
print(s)
print("Popping:", s.peek())
s.pop()
print(s)
for value in 15, 16:
s.insert(value)
print(s)
print("Removing:", s.peek())
s.remove()
print(s)
while not s.is_empty():
print("Popping:", s.peek())
s.pop()
print(s)
print("Trying one too many pops... ")
try:
s.pop()
print("Problem: it succeeded!")
except Exception as e:
print("Exception was '" + str(e) + "'")
if __name__ == "__main__":
test()
| 20.5 | 61 | 0.525032 |
acde7b9626668c45bd8abefe7e7d30164977e2d5 | 5,675 | py | Python | assignments/assignment2/trainer.py | NikIshmametev/dlcourse_ai | 9b2233bd2befc54b17f30fe048a9d87758956140 | [
"MIT"
] | null | null | null | assignments/assignment2/trainer.py | NikIshmametev/dlcourse_ai | 9b2233bd2befc54b17f30fe048a9d87758956140 | [
"MIT"
] | null | null | null | assignments/assignment2/trainer.py | NikIshmametev/dlcourse_ai | 9b2233bd2befc54b17f30fe048a9d87758956140 | [
"MIT"
] | null | null | null | from copy import deepcopy
import numpy as np
from metrics import multiclass_accuracy
from dataset import load_svhn, random_split_train_val
from model import TwoLayerNet
from optim import SGD, MomentumSGD
import os
class Dataset:
"""
Utility class to hold training and validation data
"""
def __init__(self, train_X, train_y, val_X, val_y):
self.train_X = train_X
self.train_y = train_y
self.val_X = val_X
self.val_y = val_y
class Trainer:
"""
Trainer of the neural network models
Perform mini-batch SGD with the specified data, model,
training parameters and optimization rule
"""
def __init__(self, model, dataset, optim,
num_epochs=20,
batch_size=20,
learning_rate=1e-2,
learning_rate_decay=1.0):
"""
Initializes the trainer
Arguments:
model - neural network model
dataset, instance of Dataset class - data to train on
optim - optimization method (see optim.py)
num_epochs, int - number of epochs to train
batch_size, int - batch size
learning_rate, float - initial learning rate
learning_rate_decal, float - ratio for decaying learning rate
every epoch
"""
self.dataset = dataset
self.model = model
self.optim = optim
self.batch_size = batch_size
self.learning_rate = learning_rate
self.num_epochs = num_epochs
self.learning_rate_decay = learning_rate_decay
self.optimizers = None
def setup_optimizers(self):
params = self.model.params()
self.optimizers = {}
for param_name, _ in params.items():
self.optimizers[param_name] = deepcopy(self.optim)
def compute_accuracy(self, X, y):
"""
Computes accuracy on provided data using mini-batches
"""
indices = np.arange(X.shape[0])
sections = np.arange(self.batch_size, X.shape[0], self.batch_size)
batches_indices = np.array_split(indices, sections)
pred = np.zeros_like(y)
for batch_indices in batches_indices:
batch_X = X[batch_indices]
pred_batch = self.model.predict(batch_X)
pred[batch_indices] = pred_batch
return multiclass_accuracy(pred, y)
def fit(self):
"""
Trains a model
"""
if self.optimizers is None:
self.setup_optimizers()
num_train = self.dataset.train_X.shape[0]
loss_history = []
train_acc_history = []
val_acc_history = []
for _ in range(self.num_epochs):
shuffled_indices = np.arange(num_train)
np.random.shuffle(shuffled_indices)
sections = np.arange(self.batch_size, num_train, self.batch_size)
batches_indices = np.array_split(shuffled_indices, sections)
batch_losses = []
for batch_indices in batches_indices:
# TODO Generate batches based on batch_indices and
# use model to generate loss and gradients for all
# the params
loss = self.model.compute_loss_and_gradients(self.dataset.train_X[batch_indices],
self.dataset.train_y[batch_indices])
for param_name, param in self.model.params().items():
optimizer = self.optimizers[param_name]
param.value = optimizer.update(param.value, param.grad, self.learning_rate)
batch_losses.append(loss)
if np.not_equal(self.learning_rate_decay, 1.0):
self.learning_rate *= self.learning_rate_decay
ave_loss = np.mean(batch_losses)
train_accuracy = self.compute_accuracy(self.dataset.train_X,
self.dataset.train_y)
val_accuracy = self.compute_accuracy(self.dataset.val_X,
self.dataset.val_y)
print("Loss: %f, Train accuracy: %f, val accuracy: %f" %
(ave_loss, train_accuracy, val_accuracy))
loss_history.append(ave_loss)
train_acc_history.append(train_accuracy)
val_acc_history.append(val_accuracy)
return loss_history, train_acc_history, val_acc_history
if __name__=='__main__':
def prepare_for_neural_network(train_X, test_X):
train_flat = train_X.reshape(train_X.shape[0], -1).astype(np.float) / 255.0
test_flat = test_X.reshape(test_X.shape[0], -1).astype(np.float) / 255.0
# Subtract mean
mean_image = np.mean(train_flat, axis = 0)
train_flat -= mean_image
test_flat -= mean_image
return train_flat, test_flat
train_X, train_y, test_X, test_y = load_svhn("./assignments/assignment2/data", max_train=10000, max_test=1000)
train_X, test_X = prepare_for_neural_network(train_X, test_X)
# Split train into train and val
train_X, train_y, val_X, val_y = random_split_train_val(train_X, train_y, num_val = 1000)
data_size = 32
model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 0)
dataset = Dataset(train_X[:data_size], train_y[:data_size], val_X[:data_size], val_y[:data_size])
trainer = Trainer(model, dataset, SGD(), num_epochs=10, batch_size=32, learning_rate=1e-0, learning_rate_decay=0.99)
initial_learning_rate = trainer.learning_rate
loss_history, train_history, val_history = trainer.fit()
| 35.691824 | 120 | 0.620969 |
acde7d5d24b07c21ef49cab37e085e1b28f1070e | 2,011 | py | Python | webapp/user/forms.py | Sitarko/VideoClips | d005d30987b16b959f81098da40e617b7e034605 | [
"MIT"
] | null | null | null | webapp/user/forms.py | Sitarko/VideoClips | d005d30987b16b959f81098da40e617b7e034605 | [
"MIT"
] | null | null | null | webapp/user/forms.py | Sitarko/VideoClips | d005d30987b16b959f81098da40e617b7e034605 | [
"MIT"
] | 2 | 2021-06-26T11:41:01.000Z | 2021-07-12T20:34:37.000Z | import email_validator
from flask_wtf import FlaskForm
from wtforms import BooleanField, StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, EqualTo, ValidationError
from webapp.user.models import User
class LoginForm(FlaskForm):
username = StringField(
'Имя пользователя',
validators=[DataRequired()],
render_kw={"class": "form-control"}
)
password = PasswordField(
'Пароль',
validators=[DataRequired()],
render_kw={"class": "form-control"}
)
remember_me = BooleanField(
'Запомнить меня',
default=True,
render_kw={"class": "form-check-input"}
)
submit = SubmitField(
'Войти',
render_kw={"class": "btn btn-primary"}
)
class RegistrationForm(FlaskForm):
username = StringField(
'Имя пользователя',
validators=[DataRequired()],
render_kw={"class": "form-control"}
)
email = StringField(
'Электронная почта',
validators=[DataRequired(), Email()],
render_kw={"class": "form-control"}
)
password1 = PasswordField(
'Пароль',
validators=[DataRequired()],
render_kw={"class": "form-control"}
)
password2 = PasswordField(
'Повторите пароль',
validators=[DataRequired(), EqualTo('password1', message='Пароли не совпадают')],
render_kw={"class": "form-control"}
)
submit = SubmitField(
'Зарегистрироваться',
render_kw={"class": "btn btn-primary"}
)
def validate_username(self, username):
users_count = User.query.filter_by(username=username.data).count()
if users_count > 0:
raise ValidationError ('Пользователь с таким именем уже существует')
def validate_email(self, email):
users_count = User.query.filter_by(email=email.data).count()
if users_count > 0:
raise ValidationError ('Пользователь с такой электронной почтой уже существует')
| 30.938462 | 92 | 0.636499 |
acde7e8451e4030a165e1b08728a51ddeb98fd37 | 422 | py | Python | examples/ndarray.py | Mclland45/Bitarray | b036a957d4321f4b370cce0413193dc7bd88e055 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | examples/ndarray.py | Mclland45/Bitarray | b036a957d4321f4b370cce0413193dc7bd88e055 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | examples/ndarray.py | Mclland45/Bitarray | b036a957d4321f4b370cce0413193dc7bd88e055 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | #
# This example illusatrates how binary data can be efficiently be passed
# between a bitarray object and an ndarray with dtype bool
#
from __future__ import print_function
import bitarray
import numpy
a = bitarray.bitarray('100011001001')
print(a)
# bitarray -> ndarray
b = numpy.frombuffer(a.unpack(), dtype=bool)
print(repr(b))
# ndarray -> bitarray
c = bitarray.bitarray()
c.pack(b.tostring())
assert a == c
| 19.181818 | 72 | 0.739336 |
acde7fc8e37f1b55c2f7cecc60cedb94ca9ca037 | 3,322 | py | Python | pypureclient/flasharray/FA_2_3/models/protection_group_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_3/models/protection_group_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_3/models/protection_group_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_3 import models
class ProtectionGroupResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[ProtectionGroup]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.ProtectionGroup]
):
"""
Keyword args:
items (list[ProtectionGroup]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ProtectionGroupResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProtectionGroupResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProtectionGroupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.660714 | 198 | 0.560506 |
acde80a0eb67de427225d7973ac0b2f94c3564d0 | 2,743 | py | Python | setup.py | alexdreptu/httpie | e2ba214ac02905ab0ada72c582c9e95023c49496 | [
"BSD-3-Clause"
] | null | null | null | setup.py | alexdreptu/httpie | e2ba214ac02905ab0ada72c582c9e95023c49496 | [
"BSD-3-Clause"
] | 1 | 2022-02-28T21:21:09.000Z | 2022-02-28T21:21:09.000Z | setup.py | alexdreptu/httpie | e2ba214ac02905ab0ada72c582c9e95023c49496 | [
"BSD-3-Clause"
] | null | null | null | # This is purely the result of trial and error.
import sys
from setuptools import setup, find_packages
import httpie
# Note: keep requirements here to ease distributions packaging
tests_require = [
'pytest',
'pytest-httpbin>=0.0.6',
'responses',
]
dev_require = [
*tests_require,
'flake8',
'flake8-comprehensions',
'flake8-deprecated',
'flake8-mutable',
'flake8-tuple',
'pytest-cov',
'twine',
'wheel',
]
install_requires = [
'defusedxml>=0.6.0',
'requests[socks]>=2.22.0',
'Pygments>=2.5.2',
'requests-toolbelt>=0.9.1',
'setuptools',
]
install_requires_win_only = [
'colorama>=0.2.4',
]
# Conditional dependencies:
# sdist
if 'bdist_wheel' not in sys.argv:
if 'win32' in str(sys.platform).lower():
# Terminal colors for Windows
install_requires.extend(install_requires_win_only)
# bdist_wheel
extras_require = {
'dev': dev_require,
'test': tests_require,
# https://wheel.readthedocs.io/en/latest/#defining-conditional-dependencies
':sys_platform == "win32"': install_requires_win_only,
}
def long_description():
with open('README.md', encoding='utf-8') as f:
return f.read()
setup(
name='httpie',
version=httpie.__version__,
description=httpie.__doc__.strip(),
long_description=long_description(),
long_description_content_type='text/markdown',
url='https://httpie.org/',
download_url=f'https://github.com/httpie/httpie/archive/{httpie.__version__}.tar.gz',
author=httpie.__author__,
author_email='jakub@roztocil.co',
license=httpie.__licence__,
packages=find_packages(include=['httpie', 'httpie.*']),
entry_points={
'console_scripts': [
'http = httpie.__main__:main',
'https = httpie.__main__:main',
],
},
python_requires='>=3.6',
extras_require=extras_require,
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
'Topic :: System :: Networking',
'Topic :: Terminals',
'Topic :: Text Processing',
'Topic :: Utilities'
],
project_urls={
'GitHub': 'https://github.com/httpie/httpie',
'Twitter': 'https://twitter.com/httpie',
'Documentation': 'https://httpie.org/docs',
'Online Demo': 'https://httpie.org/run',
},
)
| 26.375 | 89 | 0.63179 |
acde80babface8dc9f3b5687fe3d5ffc10e1cd09 | 2,558 | py | Python | binary_centroid_compression/region_analysis/binary_image.py | sikendershahid91/DIP | feb8ef49a1aeca52ee5c6f8ee2e6b3de93a0ff97 | [
"MIT"
] | null | null | null | binary_centroid_compression/region_analysis/binary_image.py | sikendershahid91/DIP | feb8ef49a1aeca52ee5c6f8ee2e6b3de93a0ff97 | [
"MIT"
] | null | null | null | binary_centroid_compression/region_analysis/binary_image.py | sikendershahid91/DIP | feb8ef49a1aeca52ee5c6f8ee2e6b3de93a0ff97 | [
"MIT"
] | null | null | null | import numpy as np
class binary_image:
def __init__(self):
self.hist = [0]*256
self.threshold = 0
def compute_histogram(self, image):
"""Computes the histogram of the input image
takes as input:
image: a grey scale image
returns a histogram"""
for r in range(0, image.shape[0]):
for c in range(0, image.shape[1]):
self.hist[image[r][c]] = self.hist[image[r][c]]+1
return self.hist
def expectation(self, hist):
"""
takes a histogram,
performs normalization
returns the expectant
"""
norm_hist = [ count_value / sum(hist) for count_value in hist ]
expectant = sum(( norm_hist[pixel_value] * pixel_value for pixel_value in range(0, len(norm_hist)) ))
return int(expectant)
def find_optimal_threshold(self, hist):
"""analyses a histogram it to find the optimal threshold value assuming a bimodal histogram
takes as input
hist: a bimodal histogram
returns: an optimal threshold value"""
self.threshold = len(hist)//2
expectant1 = [0,1]
expectant2 = [0,1]
while (expectant1[0]-expectant1[1]) is not 0 and (expectant2[0]-expectant2[1]) is not 0:
lower_domain = hist[:self.threshold-1]
upper_domain = hist[self.threshold-1:]
expectant1 = [ self.expectation(lower_domain) , expectant1[0] ]
expectant2 = [ self.expectation(upper_domain) , expectant2[0] ]
self.threshold = (expectant1[0] + expectant2[0]) // 2
print('threshold', self.threshold)
print(expectant1[0], expectant2[0])
return self.threshold
def binarize(self, image):
"""Compute the binary image of the the input image based on histogram analysis and thresholding
take as input
image: an grey scale image
returns: a binary image
'Assume that foreground objects are darker than background objects in the input gray-level image'
the cells are lower intensities in the original image
above threshold : set to 0 // black background - '0'
below threshold : set to 255 // white blobs - '1'
"""
bin_img = image.copy()
for r in range(0, image.shape[0]):
for c in range(0, image.shape[1]):
bin_img[r,c] = 0 if image[r][c] > self.threshold else 255
return bin_img
| 32.379747 | 110 | 0.586005 |
acde80f6027fcc3b9b86d4b9c6d3d8f82b326f5e | 12,582 | py | Python | python/ccxt/lykke.py | chxxpeng/ccxt | ea7e8677ab074ec5e08130784b1747b3dc4e7e1f | [
"MIT"
] | null | null | null | python/ccxt/lykke.py | chxxpeng/ccxt | ea7e8677ab074ec5e08130784b1747b3dc4e7e1f | [
"MIT"
] | null | null | null | python/ccxt/lykke.py | chxxpeng/ccxt | ea7e8677ab074ec5e08130784b1747b3dc4e7e1f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
class lykke (Exchange):
def describe(self):
return self.deep_extend(super(lykke, self).describe(), {
'id': 'lykke',
'name': 'Lykke',
'countries': ['CH'],
'version': 'v1',
'rateLimit': 200,
'has': {
'CORS': False,
'fetchOHLCV': False,
'fetchTrades': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchOrder': True,
'fetchOrders': True,
},
'requiredCredentials': {
'apiKey': True,
'secret': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/34487620-3139a7b0-efe6-11e7-90f5-e520cef74451.jpg',
'api': {
'mobile': 'https://public-api.lykke.com/api',
'public': 'https://hft-api.lykke.com/api',
'private': 'https://hft-api.lykke.com/api',
'test': {
'mobile': 'https://public-api.lykke.com/api',
'public': 'https://hft-service-dev.lykkex.net/api',
'private': 'https://hft-service-dev.lykkex.net/api',
},
},
'www': 'https://www.lykke.com',
'doc': [
'https://hft-api.lykke.com/swagger/ui/',
'https://www.lykke.com/lykke_api',
],
'fees': 'https://www.lykke.com/trading-conditions',
},
'api': {
'mobile': {
'get': [
'Market/{market}',
],
},
'public': {
'get': [
'AssetPairs',
'AssetPairs/{id}',
'IsAlive',
'OrderBooks',
'OrderBooks/{AssetPairId}',
],
},
'private': {
'get': [
'Orders',
'Orders/{id}',
'Wallets',
],
'post': [
'Orders/limit',
'Orders/market',
'Orders/{id}/Cancel',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.0, # as of 7 Feb 2018, see https://github.com/ccxt/ccxt/issues/1863
'taker': 0.0, # https://www.lykke.com/cp/wallet-fees-and-limits
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
},
'deposit': {
'BTC': 0,
},
},
},
})
def fetch_balance(self, params={}):
self.load_markets()
balances = self.privateGetWallets()
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currency = balance['AssetId']
total = balance['Balance']
used = balance['Reserved']
free = total - used
result[currency] = {
'free': free,
'used': used,
'total': total,
}
return self.parse_balance(result)
def cancel_order(self, id, symbol=None, params={}):
return self.privatePostOrdersIdCancel({'id': id})
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
query = {
'AssetPairId': market['id'],
'OrderAction': self.capitalize(side),
'Volume': amount,
}
if type == 'market':
query['Asset'] = market['base'] if (side == 'buy') else market['quote']
elif type == 'limit':
query['Price'] = price
method = 'privatePostOrders' + self.capitalize(type)
result = getattr(self, method)(self.extend(query, params))
return {
'id': None,
'info': result,
}
def fetch_markets(self):
markets = self.publicGetAssetPairs()
result = []
for i in range(0, len(markets)):
market = markets[i]
id = market['Id']
base = market['BaseAssetId']
quote = market['QuotingAssetId']
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = market['Name']
precision = {
'amount': market['Accuracy'],
'price': market['InvertedAccuracy'],
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': True,
'info': market,
'lot': math.pow(10, -precision['amount']),
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
},
})
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
close = float(ticker['lastPrice'])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': float(ticker['bid']),
'bidVolume': None,
'ask': float(ticker['ask']),
'askVolume': None,
'vwap': None,
'open': None,
'close': close,
'last': close,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': float(ticker['volume24H']),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
ticker = self.mobileGetMarketMarket(self.extend({
'market': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_order_status(self, status):
if status == 'Pending':
return 'open'
elif status == 'InOrderBook':
return 'open'
elif status == 'Processing':
return 'open'
elif status == 'Matched':
return 'closed'
elif status == 'Cancelled':
return 'canceled'
elif status == 'NotEnoughFunds':
return 'NotEnoughFunds'
elif status == 'NoLiquidity':
return 'NoLiquidity'
elif status == 'UnknownAsset':
return 'UnknownAsset'
elif status == 'LeadToNegativeSpread':
return 'LeadToNegativeSpread'
return status
def parse_order(self, order, market=None):
status = self.parse_order_status(order['Status'])
symbol = None
if not market:
if 'AssetPairId' in order:
if order['AssetPairId'] in self.markets_by_id:
market = self.markets_by_id[order['AssetPairId']]
if market:
symbol = market['symbol']
timestamp = None
if ('LastMatchTime' in list(order.keys())) and(order['LastMatchTime']):
timestamp = self.parse8601(order['LastMatchTime'])
elif ('Registered' in list(order.keys())) and(order['Registered']):
timestamp = self.parse8601(order['Registered'])
elif ('CreatedAt' in list(order.keys())) and(order['CreatedAt']):
timestamp = self.parse8601(order['CreatedAt'])
price = self.safe_float(order, 'Price')
amount = self.safe_float(order, 'Volume')
remaining = self.safe_float(order, 'RemainingVolume')
filled = amount - remaining
cost = filled * price
result = {
'info': order,
'id': order['Id'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': None,
'side': None,
'price': price,
'cost': cost,
'average': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
return result
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.privateGetOrdersId(self.extend({
'id': id,
}, params))
return self.parse_order(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privateGetOrders()
return self.parse_orders(response, None, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privateGetOrders(self.extend({
'status': 'InOrderBook',
}, params))
return self.parse_orders(response, None, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privateGetOrders(self.extend({
'status': 'Matched',
}, params))
return self.parse_orders(response, None, since, limit)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
response = self.publicGetOrderBooksAssetPairId(self.extend({
'AssetPairId': self.market_id(symbol),
}, params))
orderbook = {
'timestamp': None,
'bids': [],
'asks': [],
}
timestamp = None
for i in range(0, len(response)):
side = response[i]
if side['IsBuy']:
orderbook['bids'] = self.array_concat(orderbook['bids'], side['Prices'])
else:
orderbook['asks'] = self.array_concat(orderbook['asks'], side['Prices'])
sideTimestamp = self.parse8601(side['Timestamp'])
timestamp = sideTimestamp if (timestamp is None) else max(timestamp, sideTimestamp)
return self.parse_order_book(orderbook, timestamp, 'bids', 'asks', 'Price', 'Volume')
def parse_bid_ask(self, bidask, priceKey=0, amountKey=1):
price = float(bidask[priceKey])
amount = float(bidask[amountKey])
if amount < 0:
amount = -amount
return [price, amount]
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
self.check_required_credentials()
headers = {
'api-key': self.apiKey,
'Accept': 'application/json',
'Content-Type': 'application/json',
}
if method == 'POST':
if params:
body = self.json(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| 36.155172 | 126 | 0.470672 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.