hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acee39d1e44e2ec1645ec5c06efc6db3b3294e4b | 718 | py | Python | optima-example-consumer.py | schamanski/optima-kafka-client | a026557c2d678d82b8be7ec2a15aade6b45e32b1 | [
"MIT"
] | null | null | null | optima-example-consumer.py | schamanski/optima-kafka-client | a026557c2d678d82b8be7ec2a15aade6b45e32b1 | [
"MIT"
] | null | null | null | optima-example-consumer.py | schamanski/optima-kafka-client | a026557c2d678d82b8be7ec2a15aade6b45e32b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from distutils.log import log
from kafka import KafkaConsumer
import sys
import json
import pprint
if __name__ == '__main__':
if len(sys.argv) != 4:
raise ValueError('Usage: optima-example-consumer.py host port topic')
host = sys.argv[1]
port = sys.argv[2]
topic = sys.argv[3]
pp = pprint.PrettyPrinter(indent=4, sort_dicts=False)
consumer = KafkaConsumer(topic,
bootstrap_servers=host + ':' + port,
value_deserializer=lambda m: json.loads(m.decode('utf-8')),
auto_offset_reset='latest')
for msg in consumer:
data_dict = msg.value
pp.pprint(data_dict)
| 27.615385 | 88 | 0.60585 |
acee3a44de637079c4fa702a60eedd4ad23bb2c3 | 1,070 | py | Python | src/deploy/local.py | gracemukendi-dev/powerbank | a0052d474de013a25c198bcc38009f58b3a5e332 | [
"MIT"
] | null | null | null | src/deploy/local.py | gracemukendi-dev/powerbank | a0052d474de013a25c198bcc38009f58b3a5e332 | [
"MIT"
] | null | null | null | src/deploy/local.py | gracemukendi-dev/powerbank | a0052d474de013a25c198bcc38009f58b3a5e332 | [
"MIT"
] | null | null | null | '''
This class keeps the state of local deployments.
Local Deployment states consist of:
* Corresponding pwb config files
* Corresponding terraform files
Note: This class structure and utility is still under construction
may be changed in the future.
'''
import os
import random
import logging
logger = logging.getLogger('root')
class PWB_Local():
def __init__(self, config_file_loc, file_content):
self.id = random.randint(0,10)
self.config_file_loc = config_file_loc
self.configuration = None
self.configuration = Configuration(file_content)
def run(self):
logger.info("+++Installing localstack+++")
os.system("sudo /vagrant/src/scripts/localstack.sh")
logger.info("+++Running terraform for local resources+++")
for resource in self.configuration.resources:
os.system(f"sudo /vagrant/src/scripts/terraform.sh {resource}")
class Configuration():
def __init__(self, file_content):
self.resources = file_content["resources"]
| 32.424242 | 75 | 0.680374 |
acee3b14a2a69cfdef8bb5ba3de8ac6ec761e4d8 | 6,590 | py | Python | bindings/python/ensmallen_graph/datasets/string/ureaplasmaparvum.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/ureaplasmaparvum.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/ureaplasmaparvum.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Ureaplasma parvum.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:17:38.243996
The undirected graph Ureaplasma parvum has 610 nodes and 28103 weighted
edges, of which none are self-loops. The graph is quite dense as it has
a density of 0.15130 and has 4 connected components, where the component
with most nodes has 595 nodes and the component with the least nodes has
3 nodes. The graph median node degree is 82, the mean node degree is 92.14,
and the node degree mode is 5. The top 5 most central nodes are 273119.UU457
(degree 302), 273119.UU339 (degree 279), 273119.UU141 (degree 254), 273119.UU084
(degree 249) and 273119.UU324 (degree 247).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import UreaplasmaParvum
# Then load the graph
graph = UreaplasmaParvum()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def UreaplasmaParvum(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Ureaplasma parvum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Ureaplasma parvum graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:17:38.243996
The undirected graph Ureaplasma parvum has 610 nodes and 28103 weighted
edges, of which none are self-loops. The graph is quite dense as it has
a density of 0.15130 and has 4 connected components, where the component
with most nodes has 595 nodes and the component with the least nodes has
3 nodes. The graph median node degree is 82, the mean node degree is 92.14,
and the node degree mode is 5. The top 5 most central nodes are 273119.UU457
(degree 302), 273119.UU339 (degree 279), 273119.UU141 (degree 254), 273119.UU084
(degree 249) and 273119.UU324 (degree 247).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import UreaplasmaParvum
# Then load the graph
graph = UreaplasmaParvum()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="UreaplasmaParvum",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.867725 | 223 | 0.69909 |
acee3cc37faa95b720ad349025b8910cc2ce0a07 | 2,343 | py | Python | matplotlib-3.4.3/matplotlib-3.4.3/examples/images_contours_and_fields/demo_bboximage.py | JohnLauFoo/clc_packages_Yu | 259f01d9b5c02154ce258734d519ae8995cd0991 | [
"MIT"
] | 1 | 2021-11-13T17:21:44.000Z | 2021-11-13T17:21:44.000Z | matplotlib-3.4.3/matplotlib-3.4.3/examples/images_contours_and_fields/demo_bboximage.py | JohnLauFoo/clc_packages_Yu | 259f01d9b5c02154ce258734d519ae8995cd0991 | [
"MIT"
] | null | null | null | matplotlib-3.4.3/matplotlib-3.4.3/examples/images_contours_and_fields/demo_bboximage.py | JohnLauFoo/clc_packages_Yu | 259f01d9b5c02154ce258734d519ae8995cd0991 | [
"MIT"
] | null | null | null | """
==============
BboxImage Demo
==============
A `~matplotlib.image.BboxImage` can be used to position an image according to
a bounding box. This demo shows how to show an image inside a `.text.Text`'s
bounding box as well as how to manually create a bounding box for the image.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.image import BboxImage
from matplotlib.transforms import Bbox, TransformedBbox
fig, (ax1, ax2) = plt.subplots(ncols=2)
# ----------------------------
# Create a BboxImage with Text
# ----------------------------
txt = ax1.text(0.5, 0.5, "test", size=30, ha="center", color="w")
kwargs = dict()
bbox_image = BboxImage(txt.get_window_extent,
norm=None,
origin=None,
clip_on=False,
**kwargs
)
a = np.arange(256).reshape(1, 256)/256.
bbox_image.set_data(a)
ax1.add_artist(bbox_image)
# ------------------------------------
# Create a BboxImage for each colormap
# ------------------------------------
a = np.linspace(0, 1, 256).reshape(1, -1)
a = np.vstack((a, a))
# List of all colormaps; skip reversed colormaps.
maps = sorted(m for m in plt.colormaps() if not m.endswith("_r"))
ncol = 2
nrow = len(maps)//ncol + 1
xpad_fraction = 0.3
dx = 1./(ncol + xpad_fraction*(ncol - 1))
ypad_fraction = 0.3
dy = 1./(nrow + ypad_fraction*(nrow - 1))
for i, m in enumerate(maps):
ix, iy = divmod(i, nrow)
bbox0 = Bbox.from_bounds(ix*dx*(1 + xpad_fraction),
1. - iy*dy*(1 + ypad_fraction) - dy,
dx, dy)
bbox = TransformedBbox(bbox0, ax2.transAxes)
bbox_image = BboxImage(bbox,
cmap=plt.get_cmap(m),
norm=None,
origin=None,
**kwargs
)
bbox_image.set_data(a)
ax2.add_artist(bbox_image)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.image.BboxImage`
# - `matplotlib.transforms.Bbox`
# - `matplotlib.transforms.TransformedBbox`
# - `matplotlib.text.Text`
| 28.228916 | 78 | 0.533931 |
acee3cc38d7d23a1b525e57aa1f4524c9ca82604 | 362 | py | Python | setup.py | mgoulao/2v2-Slime-Volleyball | 7b9b2f345daba3e27e4fed3ed4dda9387b5761da | [
"Apache-2.0"
] | 2 | 2021-09-10T11:18:03.000Z | 2021-09-13T18:46:39.000Z | setup.py | mgoulao/2v2-Slime-Volleyball | 7b9b2f345daba3e27e4fed3ed4dda9387b5761da | [
"Apache-2.0"
] | null | null | null | setup.py | mgoulao/2v2-Slime-Volleyball | 7b9b2f345daba3e27e4fed3ed4dda9387b5761da | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
setup(
name='multiagentslimevolleygym',
version='0.0.1',
keywords='games, environment, agent, rl, ai, gym',
url='',
description='2v2 Slime Volleyball Gym Environment',
packages=['slimevolleygym'],
install_requires=[
'gym>=0.9.4',
'numpy>=1.13.0',
'opencv-python>=3.4.2.0'
]
)
| 22.625 | 55 | 0.604972 |
acee3d2b3e9c029e6288ceec636999cb4975f090 | 477 | py | Python | setup.py | onewhaleid/scaling | 18f5dc9b9abddee647d92e2d2dc91a7ec359d28c | [
"MIT"
] | null | null | null | setup.py | onewhaleid/scaling | 18f5dc9b9abddee647d92e2d2dc91a7ec359d28c | [
"MIT"
] | null | null | null | setup.py | onewhaleid/scaling | 18f5dc9b9abddee647d92e2d2dc91a7ec359d28c | [
"MIT"
] | null | null | null | from setuptools import setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='scaling',
version='0.2.3',
packages=['scaling'],
install_requires=['pint'],
author='Dan Howe',
author_email='d.howe@wrl.unsw.edu.au',
url='https://github.com/onewhaleid/scaling',
description='convert units using Froude and Reynolds similitude',
long_description=long_description,
long_description_content_type='text/markdown')
| 28.058824 | 69 | 0.69392 |
acee3d76f8cffdb61e027794d20c8c08261497da | 573 | py | Python | concurrency/futures/futures_future_exception.py | scotthuang1989/Python-3-Module-of-the-Week | 5f45f4602f084c899924ebc9c6b0155a6dc76f56 | [
"Apache-2.0"
] | 2 | 2018-09-17T05:52:12.000Z | 2021-11-09T17:19:29.000Z | concurrency/futures/futures_future_exception.py | scotthuang1989/Python-3-Module-of-the-Week | 5f45f4602f084c899924ebc9c6b0155a6dc76f56 | [
"Apache-2.0"
] | null | null | null | concurrency/futures/futures_future_exception.py | scotthuang1989/Python-3-Module-of-the-Week | 5f45f4602f084c899924ebc9c6b0155a6dc76f56 | [
"Apache-2.0"
] | 2 | 2017-10-18T09:01:27.000Z | 2018-08-22T00:41:22.000Z | """
If a task raises an unhandled exception, it is saved to the Future for the
task and made available through the result() or exception() methods.
"""
from concurrent import futures
def task(n):
print('{}: starting'.format(n))
raise ValueError('the value {} is no good'.format(n))
ex = futures.ThreadPoolExecutor(max_workers=2)
print('main: starting')
f = ex.submit(task, 5)
error = f.exception()
print('main: error: {}'.format(error))
try:
result = f.result()
except ValueError as e:
print('main: saw error "{}" when accessing result'.format(e))
| 22.038462 | 75 | 0.687609 |
acee3db8b2345761ac3bd5d6f97c9ecda5906ec3 | 334 | py | Python | imaginarium/views/users/validation.py | LordFeratum/Imaginarium | ce52f5cad7727aab2e81fcf36f662f55dea9330a | [
"MIT"
] | null | null | null | imaginarium/views/users/validation.py | LordFeratum/Imaginarium | ce52f5cad7727aab2e81fcf36f662f55dea9330a | [
"MIT"
] | null | null | null | imaginarium/views/users/validation.py | LordFeratum/Imaginarium | ce52f5cad7727aab2e81fcf36f662f55dea9330a | [
"MIT"
] | null | null | null | from imaginarium.views.validation import (
Validator, validate_email
)
class UserValidator(Validator):
requirements = {
'id': int,
'name': str,
'surname': str,
'company_id': int,
'enabled': bool,
'email': validate_email,
'password': str,
'username': str
}
| 19.647059 | 42 | 0.553892 |
acee3e47575066ad49a512da9101d563263fec25 | 1,317 | py | Python | v2ray_stats/email.py | Ricky-Hao/V2Ray.Stats | 91c83af03ac5b0d1b3b6d0898df612a266b651bc | [
"Apache-2.0"
] | 13 | 2019-04-06T14:21:59.000Z | 2021-09-15T10:46:54.000Z | v2ray_stats/email.py | Ricky-Hao/V2Ray.Stats | 91c83af03ac5b0d1b3b6d0898df612a266b651bc | [
"Apache-2.0"
] | 2 | 2020-03-07T16:51:23.000Z | 2022-01-26T04:13:01.000Z | v2ray_stats/email.py | Ricky-Hao/V2Ray.Stats | 91c83af03ac5b0d1b3b6d0898df612a266b651bc | [
"Apache-2.0"
] | 7 | 2019-09-28T17:45:46.000Z | 2022-01-26T03:59:55.000Z | import smtplib
from email.header import Header
from email.mime.text import MIMEText
from v2ray_stats.config import Config
from v2ray_stats.utils import V2RayLogger
def send_mail(month: str, data: list):
"""
Send traffic report email to user.
:param month: Report month
:param data: Data
:return:
"""
V2RayLogger.debug('SMTP server: {0}:{1}.'.format(Config.get('mail_host'), Config.get('mail_port')))
smtp = smtplib.SMTP_SSL(Config.get('mail_host'), Config.get('mail_port'))
V2RayLogger.debug('SMTP login with: {0}:{1}.'.format(Config.get('mail_user'), Config.get('mail_pass')))
smtp.login(Config.get('mail_user'), Config.get('mail_pass'))
V2RayLogger.debug('SMTP login successful.')
for row in data:
V2RayLogger.debug('Send email: {0}:{1}.'.format(row[0], row[1]))
message = '<tr align=left><th align="left">{0:30s}</th><th align="left">{1:9s}</th></tr>\n'.format(
row[0], row[1])
message = MIMEText(message, 'html')
message['Subject'] = Header(Config.get('mail_subject') + ': {0}'.format(month))
message['From'] = Config.get('mail_user')
message['To'] = row[0]
smtp.sendmail(Config.get('mail_user'), row[0], message.as_string())
V2RayLogger.info('Send traffic to: {0}.'.format(row[0]))
| 38.735294 | 107 | 0.642369 |
acee3e5b6d2f19333d7df0fa13ff2da7732d0fff | 2,353 | py | Python | batch_requests/settings.py | tanwanirahul/django-batch-requests | 9c5afc42f7542f466247f4ffed9c44e1c49fa20d | [
"MIT"
] | 42 | 2015-03-23T06:58:53.000Z | 2021-10-13T12:40:42.000Z | batch_requests/settings.py | tanwanirahul/django-batch-requests | 9c5afc42f7542f466247f4ffed9c44e1c49fa20d | [
"MIT"
] | 7 | 2016-02-27T20:33:11.000Z | 2021-06-10T17:29:00.000Z | batch_requests/settings.py | tanwanirahul/django-batch-requests | 9c5afc42f7542f466247f4ffed9c44e1c49fa20d | [
"MIT"
] | 22 | 2015-04-01T23:44:08.000Z | 2020-09-24T18:25:09.000Z | '''
@author: Rahul Tanwani
@summary: Contains the default settings.
'''
from django.conf import settings
from django.utils.importlib import import_module
import multiprocessing
DEFAULTS = {
"HEADERS_TO_INCLUDE": ["HTTP_USER_AGENT", "HTTP_COOKIE"],
"DEFAULT_CONTENT_TYPE": "application/json",
"USE_HTTPS": False,
"EXECUTE_PARALLEL": False,
"CONCURRENT_EXECUTOR": "batch_requests.concurrent.executor.ThreadBasedExecutor",
"NUM_WORKERS": multiprocessing.cpu_count() * 4,
"ADD_DURATION_HEADER": True,
"DURATION_HEADER_NAME": "batch_requests.duration",
"MAX_LIMIT": 20
}
USER_DEFINED_SETTINGS = getattr(settings, 'BATCH_REQUESTS', {})
def import_class(class_path):
'''
Imports the class for the given class name.
'''
module_name, class_name = class_path.rsplit(".", 1)
module = import_module(module_name)
claz = getattr(module, class_name)
return claz
class BatchRequestSettings(object):
'''
Allow API settings to be accessed as properties.
'''
def __init__(self, user_settings=None, defaults=None):
self.user_settings = user_settings or {}
self.defaults = defaults or {}
self.executor = self._executor()
def _executor(self):
'''
Creating an ExecutorPool is a costly operation. Executor needs to be instantiated only once.
'''
if self.EXECUTE_PARALLEL is False:
executor_path = "batch_requests.concurrent.executor.SequentialExecutor"
executor_class = import_class(executor_path)
return executor_class()
else:
executor_path = self.CONCURRENT_EXECUTOR
executor_class = import_class(executor_path)
return executor_class(self.NUM_WORKERS)
def __getattr__(self, attr):
'''
Override the attribute access behavior.
'''
if attr not in self.defaults.keys():
raise AttributeError("Invalid API setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Cache the result
setattr(self, attr, val)
return val
br_settings = BatchRequestSettings(USER_DEFINED_SETTINGS, DEFAULTS)
| 28.695122 | 104 | 0.656609 |
acee3ea4b45c377bbce816376b0f4d90370f03ff | 1,805 | py | Python | model/deeplab.py | Originofamonia/pylon | a13f19d64e366545d4052012dcea7f2c0776e5dc | [
"Apache-2.0"
] | 16 | 2020-10-26T20:09:00.000Z | 2021-09-07T12:05:39.000Z | model/deeplab.py | aggelis/pylon | e26202b2c1cfbb8b5c444f840763f0ce839f048a | [
"Apache-2.0"
] | null | null | null | model/deeplab.py | aggelis/pylon | e26202b2c1cfbb8b5c444f840763f0ce839f048a | [
"Apache-2.0"
] | 8 | 2020-10-26T08:01:41.000Z | 2022-03-05T20:25:59.000Z | import segmentation_models_pytorch as smp
from trainer.start import *
from .common import *
@dataclass
class Deeplabv3Config(BaseConfig):
n_out: int
backbone: str = 'resnet50'
n_in: int = 1
n_dec_ch: int = 256
dilate: bool = False
weights: str = 'imagenet'
@property
def name(self):
name = f'deeplabv3+-{self.backbone}-dec{self.n_dec_ch}'
if self.weights is not None:
name += f'-{self.weights}'
return name
def make_model(self):
return Deeplabv3(self)
class Deeplabv3(nn.Module):
def __init__(self, conf: Deeplabv3Config):
super().__init__()
self.net = smp.DeepLabV3Plus(conf.backbone,
encoder_weights=conf.weights,
in_channels=conf.n_in,
decoder_channels=conf.n_dec_ch,
classes=conf.n_out,
upsampling=1)
self.pool = nn.AdaptiveMaxPool2d(1)
def forward(self, img, classification=None, **kwargs):
# enforce float32 is a good idea
# because if the loss function involves a reduction operation
# it would be harmful, this prevents the problem
seg = self.net(img).float()
pred = self.pool(seg)
pred = torch.flatten(pred, start_dim=1)
loss = None
loss_pred = None
loss_bbox = None
if classification is not None:
loss_pred = F.binary_cross_entropy_with_logits(
pred, classification.float())
loss = loss_pred
return ModelReturn(
pred=pred,
pred_seg=seg,
loss=loss,
loss_pred=loss_pred,
loss_bbox=loss_bbox,
)
| 29.590164 | 69 | 0.558449 |
acee3ee119b70828fd38143dc501c895e6e80a42 | 1,916 | py | Python | arcrest/ago.py | williamscraigm/arcrest | 5a381988fe0035678dc94703d857c6ecb4194738 | [
"Apache-2.0"
] | 11 | 2015-02-06T23:35:49.000Z | 2021-11-28T21:26:46.000Z | arcrest/ago.py | williamscraigm/arcrest | 5a381988fe0035678dc94703d857c6ecb4194738 | [
"Apache-2.0"
] | 1 | 2015-06-24T13:46:44.000Z | 2015-07-01T07:46:28.000Z | arcrest/ago.py | williamscraigm/arcrest | 5a381988fe0035678dc94703d857c6ecb4194738 | [
"Apache-2.0"
] | 6 | 2015-02-23T22:51:53.000Z | 2021-01-17T05:57:24.000Z | # coding: utf-8
"""Represents the ArcGIS online REST APIs"""
from . import compat, server
__all__ = ['AGORoot', 'Community', 'Content', 'Portals']
class AGORoot(server.RestURL):
def __init__(self, url, username=None, password=None,
token=None, generate_token=False,
expiration=60):
url_list = list(compat.urlsplit(url))
if not url_list[2].endswith('/'):
url_list[2] += "/"
url = compat.urlunsplit(url_list)
if username is not None and password is not None:
self._pwdmgr.add_password(None,
url,
username,
password)
if token:
self.__token__ = token
elif generate_token:
self.__generateToken(url, username, password, expiration)
super(AGORoot, self).__init__(url)
def search(self, q=None, bbox=None, start=None, num=None,
sortField=None, sortOrder=None):
return self._get_subfolder("./search",
server.JsonPostResult,
{'q': q,
'bbox': bbox,
'start': start,
'num': num,
'sortField': sortField,
'sortOrder': sortOrder})
@property
def community(self):
return self._get_subfolder("./community/", Community)
@property
def content(self):
return self._get_subfolder("./content/", Content)
@property
def portals(self):
return self._get_subfolder("./portals/", Portals)
class Community(server.RestURL):
pass
class Content(server.RestURL):
pass
class Portals(server.RestURL):
pass
| 35.481481 | 69 | 0.497912 |
acee40684d5f25d43d9db61ac3afbb27826af906 | 565 | py | Python | example_gridsearch.py | gmum/PMLM | 9a5912b3836a74ac06cc8b5e2eaaa38ea719c437 | [
"MIT"
] | null | null | null | example_gridsearch.py | gmum/PMLM | 9a5912b3836a74ac06cc8b5e2eaaa38ea719c437 | [
"MIT"
] | null | null | null | example_gridsearch.py | gmum/PMLM | 9a5912b3836a74ac06cc8b5e2eaaa38ea719c437 | [
"MIT"
] | null | null | null | import sys
import numpy as np
from sklearn.datasets import load_svmlight_file
from sklearn.cross_validation import StratifiedKFold
from sklearn import grid_search
from pmlm import PMLM
from pmlm.utils import ACC, BAC
from pprint import pprint
if __name__ == "__main__":
X, y = load_svmlight_file(sys.argv[1])
parameters = {
'density_estimator': ('normal', 'kde'),
'gamma': (0.25, 0.5, 1.0)
}
pmlm = PMLM(random_state=1)
clf = grid_search.GridSearchCV(pmlm, parameters)
clf.fit(X, y)
pprint(clf.grid_scores_)
| 20.178571 | 52 | 0.693805 |
acee40d7102461fe7df9d406484fff6cf69ec112 | 362 | py | Python | test/ipc/dummy_basic_bolt.py | thedrow/streamparse | 6d614434747009f16389db03f538d82733183eac | [
"Apache-2.0"
] | null | null | null | test/ipc/dummy_basic_bolt.py | thedrow/streamparse | 6d614434747009f16389db03f538d82733183eac | [
"Apache-2.0"
] | null | null | null | test/ipc/dummy_basic_bolt.py | thedrow/streamparse | 6d614434747009f16389db03f538d82733183eac | [
"Apache-2.0"
] | null | null | null | import sys
import os
here = os.path.split(os.path.abspath(__file__))[0]
root = os.path.abspath(os.path.join(here, '../../'))
sys.path[0:0] = [root]
from streamparse.bolt import BasicBolt
class DummyBolt(BasicBolt):
def process(self, tup):
if tup.id == "emit":
self.emit(tup.values)
if __name__ == '__main__':
DummyBolt().run()
| 18.1 | 52 | 0.635359 |
acee40dbd9e4eb8a0e7fcaa95b9a095da2641214 | 312 | py | Python | mcmala/__init__.py | mala-project/mcmala | beabfbb2d4f64269096e02433a51b6fa17e9aa91 | [
"BSD-3-Clause"
] | null | null | null | mcmala/__init__.py | mala-project/mcmala | beabfbb2d4f64269096e02433a51b6fa17e9aa91 | [
"BSD-3-Clause"
] | 8 | 2021-12-15T09:49:04.000Z | 2022-01-19T16:02:08.000Z | mcmala/__init__.py | mala-project/mcmala | beabfbb2d4f64269096e02433a51b6fa17e9aa91 | [
"BSD-3-Clause"
] | null | null | null | """
Monte Carlo for MALA.
A frontend package to execute Monte Carlo simulations for MALA.
"""
from .simulation import ConfigurationSuggester, IsingGrid, \
IsingModelConfigurations, IsingModelEvaluator, \
AtomDisplacer
from .montecarlo import MarkovChain, Averager
| 28.363636 | 71 | 0.692308 |
acee4120d5cdb3413699db7cb1dfb79c0ea8c325 | 4,601 | py | Python | sila_cetoni/controllers/sila/control_loop_service/generated/controlloopservice/controlloopservice_base.py | CETONI-Software/sila_cetoni_controllers | 6ea228986e0fd684b8ce5182a34a70222386b493 | [
"BSD-3-Clause"
] | 1 | 2022-03-08T13:39:58.000Z | 2022-03-08T13:39:58.000Z | sila_cetoni/controllers/sila/control_loop_service/generated/controlloopservice/controlloopservice_base.py | CETONI-Software/sila_cetoni_controllers | 6ea228986e0fd684b8ce5182a34a70222386b493 | [
"BSD-3-Clause"
] | null | null | null | sila_cetoni/controllers/sila/control_loop_service/generated/controlloopservice/controlloopservice_base.py | CETONI-Software/sila_cetoni_controllers | 6ea228986e0fd684b8ce5182a34a70222386b493 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
from abc import ABC, abstractmethod
from queue import Queue
from typing import Any, Dict, List, Optional, Union
from sila2.framework import Command, Feature, FullyQualifiedIdentifier, Property
from sila2.server import FeatureImplementationBase, ObservableCommandInstance
from .controlloopservice_types import RunControlLoop_Responses, StopControlLoop_Responses, WriteSetPoint_Responses
class ControlLoopServiceBase(FeatureImplementationBase, ABC):
_ControllerValue_producer_queue: Queue[float]
_SetPointValue_producer_queue: Queue[float]
def __init__(self):
"""
Allows to control a Qmix Device with a Control Loop
"""
self._ControllerValue_producer_queue = Queue()
self._SetPointValue_producer_queue = Queue()
@abstractmethod
def get_NumberOfChannels(self, *, metadata: Dict[FullyQualifiedIdentifier, Any]) -> int:
"""
The number of controller channels.
:param metadata: The SiLA Client Metadata attached to the call
:return: The number of controller channels.
"""
pass
def update_ControllerValue(self, ControllerValue: float, queue: Optional[Queue[float]] = None):
"""
The actual value from the Device
This method updates the observable property 'ControllerValue'.
"""
if queue:
queue.put(ControllerValue)
else:
self._ControllerValue_producer_queue.put(ControllerValue)
def ControllerValue_on_subscription(
self, *, metadata: Dict[FullyQualifiedIdentifier, Any]
) -> Optional[Queue[float]]:
"""
The actual value from the Device
This method is called when a client subscribes to the observable property 'ControllerValue'
:param metadata: The SiLA Client Metadata attached to the call
:return: Optional `Queue` that should be used for updating this property
"""
pass
def update_SetPointValue(self, SetPointValue: float, queue: Optional[Queue[float]] = None):
"""
The current SetPoint value of the Device
This method updates the observable property 'SetPointValue'.
"""
if queue:
queue.put(SetPointValue)
else:
self._SetPointValue_producer_queue.put(SetPointValue)
def SetPointValue_on_subscription(self, *, metadata: Dict[FullyQualifiedIdentifier, Any]) -> Optional[Queue[float]]:
"""
The current SetPoint value of the Device
This method is called when a client subscribes to the observable property 'SetPointValue'
:param metadata: The SiLA Client Metadata attached to the call
:return: Optional `Queue` that should be used for updating this property
"""
pass
@abstractmethod
def WriteSetPoint(
self, SetPointValue: float, *, metadata: Dict[FullyQualifiedIdentifier, Any]
) -> WriteSetPoint_Responses:
"""
Write a Set Point value to the Controller Device
:param SetPointValue: The Set Point value to write
:param metadata: The SiLA Client Metadata attached to the call
"""
pass
@abstractmethod
def StopControlLoop(self, *, metadata: Dict[FullyQualifiedIdentifier, Any]) -> StopControlLoop_Responses:
"""
Stops the Control Loop (has no effect, if no Loop is currently running)
:param metadata: The SiLA Client Metadata attached to the call
"""
pass
@abstractmethod
def RunControlLoop(
self, *, metadata: Dict[FullyQualifiedIdentifier, Any], instance: ObservableCommandInstance
) -> RunControlLoop_Responses:
"""
Run the Control Loop
:param metadata: The SiLA Client Metadata attached to the call
:param instance: The command instance, enabling sending status updates to subscribed clients
"""
pass
@abstractmethod
def get_calls_affected_by_ChannelIndex(self) -> List[Union[Feature, Command, Property, FullyQualifiedIdentifier]]:
"""
Returns the fully qualified identifiers of all features, commands and properties affected by the
SiLA Client Metadata 'Delay'.
**Description of 'ChannelIndex'**:
The index of the channel that should be used. This value is 0-indexed, i.e. the first channel has index 0, the second one index 1 and so on.
:return: Fully qualified identifiers of all features, commands and properties affected by the
SiLA Client Metadata 'Delay'.
"""
pass
| 33.583942 | 148 | 0.682678 |
acee41217a9df2aa68373c58bdbf411740c583fb | 4,071 | py | Python | kubernetes/client/models/v1_load_balancer_ingress.py | scele/kubernetes-client-python | 9e982cbdb5f19dc1a3935a75bdd92288f3b807fb | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_load_balancer_ingress.py | scele/kubernetes-client-python | 9e982cbdb5f19dc1a3935a75bdd92288f3b807fb | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_load_balancer_ingress.py | scele/kubernetes-client-python | 9e982cbdb5f19dc1a3935a75bdd92288f3b807fb | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LoadBalancerIngress(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hostname': 'str',
'ip': 'str'
}
attribute_map = {
'hostname': 'hostname',
'ip': 'ip'
}
def __init__(self, hostname=None, ip=None):
"""
V1LoadBalancerIngress - a model defined in Swagger
"""
self._hostname = None
self._ip = None
self.discriminator = None
if hostname is not None:
self.hostname = hostname
if ip is not None:
self.ip = ip
@property
def hostname(self):
"""
Gets the hostname of this V1LoadBalancerIngress.
Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)
:return: The hostname of this V1LoadBalancerIngress.
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""
Sets the hostname of this V1LoadBalancerIngress.
Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)
:param hostname: The hostname of this V1LoadBalancerIngress.
:type: str
"""
self._hostname = hostname
@property
def ip(self):
"""
Gets the ip of this V1LoadBalancerIngress.
IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
:return: The ip of this V1LoadBalancerIngress.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""
Sets the ip of this V1LoadBalancerIngress.
IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
:param ip: The ip of this V1LoadBalancerIngress.
:type: str
"""
self._ip = ip
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1LoadBalancerIngress):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.264516 | 112 | 0.561533 |
acee420f79cb30eeebe31e7564e27de019ccb7b9 | 785 | py | Python | tests/test_rewriter.py | la-luo/py2star | 5664a1eaed6940eb1b9c9d2dd3448001d972f356 | [
"Apache-2.0"
] | 2 | 2021-07-07T22:25:46.000Z | 2021-11-16T19:13:16.000Z | tests/test_rewriter.py | la-luo/py2star | 5664a1eaed6940eb1b9c9d2dd3448001d972f356 | [
"Apache-2.0"
] | 17 | 2020-11-02T19:11:11.000Z | 2021-12-29T16:36:06.000Z | tests/test_rewriter.py | la-luo/py2star | 5664a1eaed6940eb1b9c9d2dd3448001d972f356 | [
"Apache-2.0"
] | 3 | 2021-04-16T16:57:40.000Z | 2021-09-07T16:03:48.000Z | import logging
from lib2to3 import refactor
from textwrap import dedent
import pytest
logger = logging.getLogger(__name__)
@pytest.mark.skip("unused")
def test_rewrite(simple_class, lib2to3_xfrms):
_fixers = refactor.get_fixers_from_package("py2star.fixes")
assert isinstance(_fixers, list) and len(_fixers) != 0
def rt(fixers, options=None, explicit=None):
return refactor.RefactoringTool(fixers, options, explicit)
out = simple_class
# out = open("sample_test2.py").read()
for f in _fixers:
# if not f.endswith("fix_exceptions"):
# continue
tool = rt([f])
out = str(tool.refactor_string(dedent(out), "simple_class.py"))
print(out)
assert out.strip().splitlines() == lib2to3_xfrms.strip().splitlines()
| 29.074074 | 73 | 0.69172 |
acee4263cdefc128c63cae2120ab46355c59d48b | 1,607 | py | Python | pyclient/RobotEPuck.py | titos-carrasco/robworld-client-python | 60a0af8e80750d3eeed244c59742780d9c9ca3b8 | [
"MIT"
] | null | null | null | pyclient/RobotEPuck.py | titos-carrasco/robworld-client-python | 60a0af8e80750d3eeed244c59742780d9c9ca3b8 | [
"MIT"
] | null | null | null | pyclient/RobotEPuck.py | titos-carrasco/robworld-client-python | 60a0af8e80750d3eeed244c59742780d9c9ca3b8 | [
"MIT"
] | null | null | null | import socket
from robworld import RobotBase
class RobotEPuck( RobotBase.RobotBase ):
"""
Clase "wrapper" para acceder a un robot remoto del tipo EPuck
Parameters
name: nombre del robot a controlar en el simulador
host: servidor en donde se encuenra este robot
port: puerta en donde se encuentra este robot
"""
tipo = "epuck"
def __init__( self, name:str, host:str, port:int ):
super().__init__( name, host, port )
def getSensors( self ):
"""
Actualiza el valor de los sensores del robot
"""
resp = super().getSensors()
self.proximitySensorValues = tuple( resp["proximitySensorValues"] )
self.proximitySensorDistances = tuple( resp["proximitySensorDistances"] )
def setLedRing( self, on_off:bool ):
"""
Apaga o enciende el anillo que rodea al robot
Parameters
on_off: True para encender, False para apagar
"""
led_on = 1 if on_off else 0
pkg = { "cmd":"setLedRing", "estado": led_on }
resp = self.sendPkg( pkg )
def getCameraImage( self ) -> list :
"""
Obtiene la imagen de la camara lineal del robot.
La imagen es de 60x1 pixeles
Returns
La magen lineal
"""
pkg = { "cmd":"getCameraImage" }
resp = self.sendPkg( pkg, bytes )
l = len( resp )
resp = [ tuple( resp[i:i+4] ) for i in range( 0, l, 4 ) ]
return resp
def __str__( self ):
return f"RobotEnki >> name:{self.name} - host={self.host} - port={self.port}"
| 29.759259 | 85 | 0.589297 |
acee434d6cd1fe2f30cbe300b6b0b79e6c8262c0 | 10,631 | py | Python | local_intersubject_pkg/intersubject.py | jahzam/movie-watching-control | 26d3901c709a43ae70a3052f5c155949135f63a7 | [
"BSD-3-Clause"
] | null | null | null | local_intersubject_pkg/intersubject.py | jahzam/movie-watching-control | 26d3901c709a43ae70a3052f5c155949135f63a7 | [
"BSD-3-Clause"
] | null | null | null | local_intersubject_pkg/intersubject.py | jahzam/movie-watching-control | 26d3901c709a43ae70a3052f5c155949135f63a7 | [
"BSD-3-Clause"
] | 1 | 2021-04-18T21:48:03.000Z | 2021-04-18T21:48:03.000Z | # intersubject.py: Module contains an Intersubject class that provides
# methods for computing one-sample and within-between
# intersubject correlation and intersubject functional correlation
import numpy as np
from .tools import save_data
from .basic_stats import pairwise_r, r_mean
# class for computing whole sample and witihin-between group ISFC and ISC.
class Intersubject:
"""
Perform Intersubject analysis on a group of subjects.
Use method group_isfc() to perform either entire- or within-between group
ISFC/ISC analysis. Retrieve results from dicts stored in the object instance
attributes .isfc and .isc.
Example:
# compute isfc and isc
entire_isfc = Intersubject(files, (4, 4, 4, 10))
entire_isfc.group_isfc([3, 14, 17, 28, 29], 'entire')
# get results
entire_isfc.isfc
entire_isfc.isc
"""
def __init__(self, data_path, datasize):
self.data_path = data_path
self.datasize = datasize
self.dims = (datasize[0] * datasize[1] * datasize[2], datasize[3]) # for quick matrix size
self.subject_ids = {} # can accomodate either one or multiple ID groups
self.group_mask = None
self.isfc = {}
self.isc = {}
self._data_sum = {}
self._voxel_sum = {}
def _get_sum(self, label):
# Calculate's a group's summed data in preparation for computing the
# average (for one-to-average/leave-one-out ISC method)
# Make empty summed data output arrays
sum_data = np.zeros(shape=(self.dims))
sum_vox = np.zeros(shape=(self.dims[0]))
_group_mask = np.zeros(shape=(self.dims[0]))
# Save each subject's data and voxel mask
sub_list = self.subject_ids[label]
for sub in sub_list:
# Load subject data from npz dict object
subject_dict = np.load(self.data_path.format(sub))
mask = subject_dict['mask']
# Get subject's masked data
data = np.zeros(shape=(len(mask), self.dims[1])) # matrix of zeros
data[mask, :] = subject_dict['data']
# Add subject data to the sample-wide sum array
sum_data = sum_data + data
sum_vox = sum_vox + mask
assert sum_data.shape == self.dims, f"sum data shape is {sum_data.shape} but should be {self.dims}"
assert sum_vox.shape == (self.dims[0], ), f"sum vox shape is {sum_vox.shape} but should be {self.dims[0]}"
# Create boolean mask of sample-wide surviving voxels
# NOTE: 0.7 is magic number carry-over from YC
_group_mask = sum_vox.copy() > (0.7 * len(sub_list)) # True if 70% participants voxels survive; magic number from YC's!
# Find set union between group masks if already defined
if self.group_mask is not None:
self.group_mask = np.logical_or(self.group_mask, _group_mask)
else:
self.group_mask = _group_mask
# Reduce summed data and voxels with mask
self._data_sum[label] = sum_data[_group_mask, :]
self._voxel_sum[label] = sum_vox[_group_mask]
def _isfc_oneavg(self, sub_id, label, compare_method = 'entire', minus_one = True):
# Calculates one subject's ISFC and ISC
# Recursively calculate subject's isfc for within and between group method
if compare_method == 'within_between':
# Save dicts of within and between data separately from arguments
label_list = list(self.subject_ids)
# treat provided label as "within group" label
within_isfc = self._isfc_oneavg(sub_id, label)
# use remaining label as "between group"
label_list.remove(label)
label = label_list[0]
between_isfc = self._isfc_oneavg(sub_id, label, minus_one=False)
return within_isfc, between_isfc
elif compare_method == 'entire':
# Retrieve subject data with their mask
subject_dict = np.load(self.data_path.format(sub_id))
mask = subject_dict['mask']
data = np.full(shape=(mask.shape[0], self.dims[1]), fill_value=np.nan)
data[mask, :] = subject_dict['data']
# Mask the subject's data and voxel mask with sample-wide voxel mask (potentially more restrictive)
data = data[self.group_mask, :]
mask = mask[self.group_mask]
assert data.shape == self._data_sum[label].shape, f"subject data {data.shape} and data sum {self._data_sum[label].shape} have mismatched shapes"
assert mask.shape == self._voxel_sum[label].shape, f"subject mask {mask.shape} and voxel sum {self._voxel_sum[label].shape} have mismatched shapes"
# Check whether to subtract of subject data from summed data
if minus_one==False:
temp_avg = self._data_sum[label] / self._voxel_sum[label].reshape((self._voxel_sum[label].shape[0], 1))
else:
# Create sample-wide average timecourse minus subject's timecourse for all voxels
numer = self._data_sum[label] - data # removes subject's data
denom = self._voxel_sum[label] - mask # removes subject's voxels
temp_avg = numer / denom.reshape((denom.shape[0], 1))
# Compute ISFC correlation between subject and average-minus-one
isfc_matrix = pairwise_r(data, temp_avg)
return isfc_matrix
def group_isfc(self, group_ids, compare_method = "entire", keep_isfc=True, keep_isc=True):
"""
Calculate isfc and isc for a group of subjects.
Note: currently only uses leave-one-out ISC calculation method.
Parameters
----------
group_ids : dict or list
The subject IDs and their association group/label/condition.
If only one group will be examined, the IDs can be passed as a list
or as a dict with one key. ID lists of two groups must be passed as a
dict with two keys.
compare_method : str
Choose the group analysis method.
-'entire' for analysis on one group of subjects only.
-'within-between' for within-between analysis between two groups
of subjects.
(keep_isfc and keep_isc do not currently do anything and are placeholders
to selectively return isfc or isc only)
Warning: A new Intersubject object should be created whenever a different
analysis is performed (eg., 'within-between', then 'entire') to avoid
accidental carryover of the previously performed analysis' object state
and affecting your results.
"""
# Calculate isfc and isc for a group of subjects
assert keep_isfc or keep_isc, "Either keep_isfc or keep_isc must be True"
# Treat one list as a dict with one group; otherwise, return error
if type(group_ids) is list:
# try:
if any(isinstance(item, list) for item in group_ids):
raise TypeError("'subject_id' cannot be nested lists; provide multiple lists as dict with simple keys (eg., 0, 1; 'a', 'b') instead")
else:
self.subject_ids['sample'] = group_ids
assert type(self.subject_ids) is dict
# except TypeError:
# print()
else:
self.subject_ids = group_ids
label_list = list(self.subject_ids)
def get_container(this_label=None, last_dim=None):
# Function to generate an "empty" array to be filled afterward.
# Note: currently only works for the Intersubject class.
if this_label is not None:
last_dim = len(self.subject_ids[this_label])
isfc_container = np.full((self.dims[0], self.dims[0], last_dim), np.nan)
isc_container = np.full((self.dims[0], last_dim), np.nan)
return isfc_container, isc_container
# Whole-sample ISFC/ISC
if compare_method == "entire":
# treat first group as the primary label
label = label_list[0]
# get containers and summed data
self.isfc['entire'], self.isc['entire'] = get_container(label)
self._get_sum(label)
# Get isfc/isc
for i, sub in enumerate(self.subject_ids[label]):
this_isfc = self._isfc_oneavg(sub, label, compare_method=compare_method)
assert this_isfc.shape == self.isfc['entire'][:,:,0].shape, f"subject isfc {this_isfc.shape} and isfc container {self.isfc['entire'][:,:,0].shape} are mismatched"
# if keep_isfc:
self.isfc['entire'][:, :, i] = this_isfc
# self._isfc = subject_isfc
# if keep_isc
self.isc['entire'][:, i] = this_isfc.diagonal()
# self._isc = subject_isfc.diagonal()
# Within-between ISFC/ISC
elif compare_method == "within_between":
label_left, label_right = label_list[0], label_list[1]
all_ids = self.subject_ids[label_left] + self.subject_ids[label_right]
# Save array containers to attribute's dict
self.isfc['within'], self.isc['within'] = get_container(last_dim = len(all_ids))
self.isfc['between'], self.isc['between'] = get_container(last_dim = len(all_ids))
# Get sums for both subject groups
self._get_sum(label_left)
self._get_sum(label_right)
# Get within and betweeen isfc/isc
for i, sub in enumerate(all_ids):
# Treat label as 'within' if it matches the group
if sub in self.subject_ids[label_left]:
wb_isfc = self._isfc_oneavg(sub, label_left, compare_method=compare_method)
elif sub in self.subject_ids[label_right]:
wb_isfc = self._isfc_oneavg(sub, label_right, compare_method=compare_method)
self.isfc['within'][:,:,i] = wb_isfc[0]
self.isfc['between'][:,:,i] = wb_isfc[1]
self.isc['within'][:,i] = wb_isfc[0].diagonal()
self.isc['between'][:,i] = wb_isfc[1].diagonal() | 44.85654 | 179 | 0.599944 |
acee435966a4e23ba2fb1925ca46da745188d57f | 5,146 | py | Python | plots.py | bfeng/pytorch-cifar | 6de257bb4b489429785502d487044c55bec62aae | [
"MIT"
] | 1 | 2021-09-27T18:46:34.000Z | 2021-09-27T18:46:34.000Z | plots.py | bfeng/pytorch-cifar | 6de257bb4b489429785502d487044c55bec62aae | [
"MIT"
] | null | null | null | plots.py | bfeng/pytorch-cifar | 6de257bb4b489429785502d487044c55bec62aae | [
"MIT"
] | null | null | null | from typing import Callable, Union
from matplotlib.axes import Axes
import torch
import torch.nn as nn
import numpy as np
from models.custom import DentReLUFunction
import matplotlib.pyplot as plt
import utils
import models
plt.style.use("classic")
def _ax_plot(ax_func: Callable[[Axes], None], name):
plt.figure(figsize=(5, 5))
fig, ax = plt.subplots()
# fig.set_size_inches(5, 5)
ax_func(ax)
# ax.grid(True)
fig.savefig(f"plots/{name}.png", bbox_inches="tight")
fig.savefig(f"plots/{name}.pdf", bbox_inches="tight")
plt.close()
def plot_formula():
def ax_func(ax):
# Move the left and bottom spines to x = 0 and y = 0, respectively.
ax.spines[["left", "bottom"]].set_position(("data", 0))
# Hide the top and right spines.
ax.spines[["top", "right"]].set_visible(False)
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("left")
x = torch.tensor(np.arange(-1, 1, 0.0005))
y = DentReLUFunction.apply(x, -0.666)
ax.set_aspect("equal")
ax.plot(x, y)
_ax_plot(ax_func, "formula")
def plot_dist():
def ax_func(ax):
x = torch.tensor(np.arange(-1, 1, 0.01))
y1 = torch.relu(x)
p_values = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59]
dist = []
for p in p_values:
p = (2 - p) / p
y2 = DentReLUFunction.apply(x, p)
d = torch.dist(y1, y2, p=2)
dist.append(d)
ax.stem(p_values, dist, markerfmt="x")
ax.set_xticks(p_values)
ax.set_ylim(1, 6)
# ax.set_xticklabels(ax.get_xticks(), rotation=90)
_ax_plot(ax_func, "dist")
def plot_train_test():
stock_hist_acc = np.loadtxt("stock_hist_acc.txt", delimiter=",")
stock_hist_loss = np.loadtxt("stock_hist_loss.txt", delimiter=",")
p1_hist_acc = np.loadtxt("p1_hist_acc.txt", delimiter=",")
p1_hist_loss = np.loadtxt("p1_hist_loss.txt", delimiter=",")
p2_hist_acc = np.loadtxt("p2_hist_acc.txt", delimiter=",")
p2_hist_loss = np.loadtxt("p2_hist_loss.txt", delimiter=",")
def ax_func(ax):
epochs = np.arange(1, len(stock_hist_acc) + 1)
ax.plot(epochs, stock_hist_acc / 100, label="Relu_acc")
ax.plot(epochs, stock_hist_loss, label="Relu_loss")
ax.plot(epochs, p1_hist_acc / 100, label="p=3_acc")
ax.plot(epochs, p1_hist_loss, label="p=3_loss")
ax.plot(epochs, p2_hist_acc / 100, label="p=5_acc")
ax.plot(epochs, p2_hist_loss, label="p=5_loss")
ax.set_xlim(0, len(stock_hist_acc) + 1)
ax.legend()
_ax_plot(ax_func, "train_test")
def plot_hist(net: Union[models.VGG, models.CustomVGG], checkpoint, name):
conv_out = []
norm_out = []
relu_out = []
def conv_hook_fn(m, i, o: torch.Tensor):
print(m)
conv_out.append(o.clone().detach())
def norm_hook_fn(m, i, o: torch.Tensor):
print(m)
norm_out.append(o.clone().detach())
def relu_hook_fn(m, i, o: torch.Tensor):
print(m)
relu_out.append(o.clone().detach())
testloader = utils.prepare_test_data()
net.features[0].register_forward_hook(conv_hook_fn)
net.features[1].register_forward_hook(norm_hook_fn)
net.features[2].register_forward_hook(relu_hook_fn)
utils.test_model_vis(net, checkpoint, testloader)
conv_out = torch.cat(conv_out)
print(conv_out.shape)
norm_out = torch.cat(norm_out)
print(norm_out.shape)
relu_out = torch.cat(relu_out)
print(relu_out.shape)
def conv_ax_func(ax):
ax.set_xlim(-6, 6)
ax.set_yscale("log")
h0 = conv_out.cpu().numpy().ravel()
ax.hist(h0, label=["conv"], bins=10)
ax.legend()
ax.set_title(f"Conv output: min={np.min(h0):.4f}, max={np.max(h0):.4f}")
np.savetxt(f"{name}-conv.txt", h0, fmt="%.8f")
def norm_ax_func(ax):
ax.set_xlim(-6, 6)
ax.set_yscale("log")
h1 = norm_out.cpu().numpy().ravel()
ax.hist(h1, label=["norm"], bins=10)
ax.legend()
ax.set_title(f"Norm output: min={np.min(h1):.4f}, max={np.max(h1):.4f}")
np.savetxt(f"{name}-norm.txt", h1, fmt="%.8f")
def relu_ax_func(ax):
p_val = 3 if isinstance(net, models.CustomVGG) and net.p_value == -1 else 5
label = "Relu" if isinstance(net, models.VGG) else f"DRelu p={p_val}"
ax.set_xlim(-6, 6)
ax.set_yscale("log")
h2 = relu_out.cpu().numpy().ravel()
ax.hist([h2], label=[label.lower()], bins=10)
ax.legend()
ax.set_title(f"{label} output: min={np.min(h2):.4f}, max={np.max(h2):.4f}")
np.savetxt(f"{name}-relu.txt", h2, fmt="%.8f")
_ax_plot(conv_ax_func, f"{name}-conv")
_ax_plot(norm_ax_func, f"{name}-norm")
_ax_plot(relu_ax_func, f"{name}-relu")
# plot_formula()
# plot_dist()
# plot_train_test()
plot_hist(models.VGG("VGG16"), "checkpoint-vgg16-stock", "hist")
plot_hist(models.CustomVGG("VGG16", p_value=-1), "checkpoint-cvgg16-p-1", "hist-p-1")
plot_hist(
models.CustomVGG("VGG16", p_value=-1.8), "checkpoint-cvgg16-p-1.8", "hist-p-1.8"
)
| 33.633987 | 85 | 0.616207 |
acee44e909e330c6cd17d54f0695d07c6cc4fe10 | 4,063 | py | Python | acmetk/models/certificate.py | commonism/acmetk | 0dbbcd4b92a9347221753d4076553af047876b12 | [
"MIT"
] | 3 | 2021-03-15T11:25:22.000Z | 2021-04-01T09:05:07.000Z | acmetk/models/certificate.py | commonism/acmetk | 0dbbcd4b92a9347221753d4076553af047876b12 | [
"MIT"
] | 60 | 2021-03-16T13:28:56.000Z | 2021-04-03T14:07:31.000Z | acmetk/models/certificate.py | commonism/acmetk | 0dbbcd4b92a9347221753d4076553af047876b12 | [
"MIT"
] | 1 | 2021-03-15T11:25:05.000Z | 2021-03-15T11:25:05.000Z | import enum
import uuid
import cryptography
from sqlalchemy import (
Column,
Enum,
ForeignKey,
LargeBinary,
TypeDecorator,
Integer,
Text,
CheckConstraint,
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
from acmetk.models.messages import RevocationReason
from .base import Serializer, Entity
class x509Certificate(TypeDecorator):
"""x509 Certificate as PEM."""
impl = LargeBinary
def load_dialect_impl(self, dialect):
return dialect.type_descriptor(self.impl)
def process_bind_param(self, value, dialect):
if value is None:
return value
return self._adapt(value)
def process_result_value(self, value, dialect):
if value is None:
return value
return self._convert(value)
@staticmethod
def _adapt(cert):
if isinstance(cert, cryptography.x509.Certificate):
return cert.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.PEM
)
raise TypeError(type(cert))
@staticmethod
def _convert(s):
return cryptography.x509.load_pem_x509_certificate(s)
class CertificateStatus(str, enum.Enum):
# subclassing str simplifies json serialization using json.dumps
VALID = "valid"
REVOKED = "revoked"
class Certificate(Entity, Serializer):
"""Database model for certificate objects.
The ACME RFC does not specify how certificate objects should be structured.
It merely requires that the resulting certificate chain that the client downloads be encoded
with the PEM encoding:
`9.1. Media Type: application/pem-certificate-chain <https://tools.ietf.org/html/rfc8555#section-9.1>`_
There exists a check constraint on the resulting table to ensure that either the attribute
:attr:`cert` or the attribute :attr:`full_chain` is set. :attr:`cert` is used by the
:class:`~acmetk.server.AcmeCA` as it appends its root certificate on certificate download.
:class:`full_chain` is used by all subclasses of :class:`~acmetk.server.AcmeRelayBase` to easily
store the full certificate chain that is downloaded from the remote CA.
"""
__tablename__ = "certificates"
__mapper_args__ = {
"polymorphic_identity": "certificate",
}
__diff__ = frozenset(["status"])
__table_args__ = (
CheckConstraint(
"cert is not NULL or full_chain is not NULL",
name="check_cert_or_full_chain",
),
)
_entity = Column(Integer, ForeignKey("entities.entity"), nullable=False, index=True)
certificate_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
"""The certificate's ID."""
status = Column("status", Enum(CertificateStatus), nullable=False)
"""The certificate's status."""
order_id = Column(
UUID(as_uuid=True),
ForeignKey("orders.order_id"),
nullable=False,
index=True,
unique=True,
)
order = relationship(
"Order", back_populates="certificate", lazy="noload", foreign_keys=order_id
)
"""The :class:`acmetk.models.order.Order` associated with the certificate."""
cert = Column(x509Certificate, nullable=True, index=True)
"""The actual client certificate (:class:`cryptography.x509.Certificate`)."""
full_chain = Column(Text, nullable=True)
"""The full chain of the certificate (:class:`str`)."""
reason = Column(Enum(RevocationReason), nullable=True)
"""The revocation reason (:class:`~acmetk.models.messages.RevocationReason`)."""
def revoke(self, reason: RevocationReason):
"""Sets the certificate's :attr:`status` to *revoked* and copies the given reason.
:param reason: The reason for revocation.
"""
self.status = CertificateStatus.REVOKED
self.reason = reason or RevocationReason.unspecified
@property
def account_of(self):
return self.order.account_of
@property
def order_of(self):
return self.order
| 32.766129 | 107 | 0.684223 |
acee45086bba384eda154efeccf4c43132cac05b | 2,791 | py | Python | src/zenml/integrations/vertex/orchestrator/vertex_ai_orchestrator.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | src/zenml/integrations/vertex/orchestrator/vertex_ai_orchestrator.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | src/zenml/integrations/vertex/orchestrator/vertex_ai_orchestrator.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | # Original License:
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# New License:
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import TYPE_CHECKING, Any, ClassVar
from zenml.integrations.kubeflow.orchestrators.kubeflow_orchestrator import (
KubeflowOrchestrator,
)
from zenml.logger import get_logger
from zenml.stack import Stack
# from zenml.stack.stack_component_class_registry import (
# register_stack_component_class,
# )
# from google.cloud import aiplatform
# from google.cloud.aiplatform import pipeline_jobs
if TYPE_CHECKING:
from zenml.pipelines.base_pipeline import BasePipeline
from zenml.runtime_configuration import RuntimeConfiguration
logger = get_logger(__name__)
# @register_stack_component_class(
# component_type=StackComponentType.ORCHESTRATOR,
# component_flavor=OrchestratorFlavor.VERTEX,
# )
class VertexOrchestrator(KubeflowOrchestrator):
"""Orchestrator responsible for running pipelines on Vertex AI."""
FLAVOR: ClassVar[str] = "vertex"
def run(
self,
pipeline: "BasePipeline",
stack: "Stack",
runtime_configuration: "RuntimeConfiguration",
) -> Any:
"""Runs a pipeline on Vertex AI using the Kubeflow orchestrator."""
raise NotImplementedError("Vertex AI orchestration is coming soon!")
# super().run_pipeline(pipeline, stack, runtime_configuration)
# aiplatform.init(
# project=GOOGLE_CLOUD_PROJECT, location=GOOGLE_CLOUD_REGION
# )
# job = pipeline_jobs.PipelineJob(
# template_path=PIPELINE_DEFINITION_FILE, display_name=PIPELINE_NAME
# )
# job.submit()
| 34.036585 | 80 | 0.739162 |
acee45cd37d70e70521fcb3681276b8af80d16be | 722 | py | Python | mailparser/version.py | NoobSkywalker/mail-parser | 450233424042c136abd6f8bf9b7b20eb914c7f85 | [
"Apache-2.0"
] | null | null | null | mailparser/version.py | NoobSkywalker/mail-parser | 450233424042c136abd6f8bf9b7b20eb914c7f85 | [
"Apache-2.0"
] | null | null | null | mailparser/version.py | NoobSkywalker/mail-parser | 450233424042c136abd6f8bf9b7b20eb914c7f85 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2016 Fedele Mantuano (https://twitter.com/fedelemantuano)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version__ = "3.7.1"
if __name__ == "__main__":
print(__version__)
| 30.083333 | 72 | 0.754848 |
acee45cd42e376be8b787fb6c3793d547dee5e04 | 11,909 | py | Python | ion_functions/data/perf/test_sfl_performance.py | crisien/ion-functions | cefb59875fe9f0052e4533e663fece9af7aa3889 | [
"Apache-2.0"
] | null | null | null | ion_functions/data/perf/test_sfl_performance.py | crisien/ion-functions | cefb59875fe9f0052e4533e663fece9af7aa3889 | [
"Apache-2.0"
] | null | null | null | ion_functions/data/perf/test_sfl_performance.py | crisien/ion-functions | cefb59875fe9f0052e4533e663fece9af7aa3889 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from ion_functions.data.perf.test_performance import PerformanceTestCase, a_deca
from ion_functions.data.sfl_functions import (sfl_trhph_vfltemp,
sfl_trhph_vflorp,
sfl_trhph_chloride,
sfl_sflpres_l1,
sfl_thsph_temp_int,
sfl_thsph_temp_ref,
sfl_thsph_temp_tcl,
sfl_thsph_temp_tch,
sfl_thsph_temp_tl,
sfl_thsph_temp_th,
sfl_thsph_hydrogen,
sfl_thsph_sulfide,
sfl_thsph_ph,
sfl_thsph_ph_acl,
sfl_thsph_ph_noref,
sfl_thsph_ph_noref_acl,
)
import numpy as np
class TestSFLPerformance(PerformanceTestCase):
# Performance tests for seafloor instruments THSPH and TRHPH
def setUp(self):
### test inputs for THSPH L2 data products
self.counts_h2 = np.tile(4907, a_deca)
self.counts_hs = np.tile(3806, a_deca)
self.counts_ysz = np.tile(7807, a_deca)
self.counts_agcl = np.tile(7801, a_deca)
self.temperature = np.tile(300.0, a_deca)
self.chloride = np.tile(400.0, a_deca)
# calibration polynomial coefficients:
# electrode engineering to lab calibrated units
e2l_h2 = np.array([0.0, 0.0, 0.0, 0.0, 1.0, -0.00375])
e2l_hs = np.array([0.0, 0.0, 0.0, 0.0, 1.0, -0.00350])
e2l_ysz = np.array([0.0, 0.0, 0.0, 0.0, 1.0, -0.00375])
e2l_agcl = np.array([0.0, 0.0, 0.0, 0.0, 1.0, -0.00225])
# electrode material response
arr_hgo = np.array([0.0, 0.0, 4.38978E-10, -1.88519E-07, -1.88232E-04, 9.23720E-01])
arr_agcl = np.array([0.0, -8.61134E-10, 9.21187E-07, -3.7455E-04, 6.6550E-02, -4.30086])
# calculated theoretical reference electrode potential
arr_agclref = np.array([0.0, 0.0, -2.5E-10, -2.5E-08, -2.5E-06, -9.025E-02])
# for calculation of chl activity polynomial coefficients
arr_tac = np.array([0.0, 0.0, -2.80979E-09, 2.21477E-06, -5.53586E-04, 5.723E-02])
arr_tbc1 = np.array([0.0, 0.0, -6.59572E-08, 4.52831E-05, -1.204E-02, 1.70059])
arr_tbc2 = np.array([0.0, 0.0, 8.49102E-08, -6.20293E-05, 1.485E-02, -1.41503])
arr_tbc3 = np.array([-1.86747E-12, 2.32877E-09, -1.18318E-06, 3.04753E-04, -3.956E-02, 2.2047])
# h2 and h2s fugacity/activity calculations
arr_logkfh2g = np.array([0.0, 0.0, -1.51904000E-07, 1.16655E-04, -3.435E-02, 6.32102])
arr_eh2sg = np.array([0.0, 0.0, 0.0, 0.0, -4.49477E-05, -1.228E-02])
arr_yh2sg = np.array([2.3113E+01, -1.8780E+02, 5.9793E+02, -9.1512E+02, 6.7717E+02, -1.8638E+02])
### tiled calibration arrays for THSPH L2 products
tile_spec = (a_deca, 1)
self.e2l_h2 = np.tile(e2l_h2, tile_spec)
self.e2l_hs = np.tile(e2l_hs, tile_spec)
self.e2l_ysz = np.tile(e2l_ysz, tile_spec)
self.e2l_agcl = np.tile(e2l_agcl, tile_spec)
self.arr_hgo = np.tile(arr_hgo, tile_spec)
self.arr_agcl = np.tile(arr_agcl, tile_spec)
self.arr_agclref = np.tile(arr_agclref, tile_spec)
self.arr_tac = np.tile(arr_tac, tile_spec)
self.arr_tbc1 = np.tile(arr_tbc1, tile_spec)
self.arr_tbc2 = np.tile(arr_tbc2, tile_spec)
self.arr_tbc3 = np.tile(arr_tbc3, tile_spec)
self.arr_logkfh2g = np.tile(arr_logkfh2g, tile_spec)
self.arr_eh2sg = np.tile(arr_eh2sg, tile_spec)
self.arr_yh2sg = np.tile(arr_yh2sg, tile_spec)
### test inputs for THSPHTE products
self.ts_rawdec_b = np.tile(8185.0, a_deca)
self.ts_rawdec_r = np.tile(8758.0, a_deca)
self.tc_rawdec_L = np.tile(16009.0, a_deca)
self.tc_rawdec_H = np.tile(4236.0, a_deca)
### calibration arrays for THSPHTE products
# calibration constants: b thermistor
# engineering values to lab calibrated values
e2l_b = np.array([0.0, 0.0, 0.0, 0.0, 1.04938, -275.5])
# lab calibrated values to scientific values
l2s_b = np.array([0.0, 0.0, 8.7755e-08, 0.0, 0.000234101, 0.001129306])
# calibration constants: r thermistor
# engineering values to lab calibrated values
e2l_r = np.array([0.0, 0.0, 0.0, 0.0, 1.04938, -275.5])
# lab calibrated values to scientific values
l2s_r = np.array([0.0, 0.0, 8.7755e-08, 0.0, 0.000234101, 0.001129306])
# calibration constants: L thermocouple
# engineering values to lab calibrated values
e2l_L = np.array([0.0, 0.0, 0.0, 0.0, 0.9964, -0.46112])
# lab calibrated values to scientific values
l2s_L = np.array([9.32483e-7, -0.000122268, 0.00702, -0.23532, 17.06172, 0.0])
# calibration constants: H thermocouple
# engineering values to lab calibrated values
e2l_H = np.array([0.0, 0.0, 0.0, 0.0, 0.9979, -0.10287])
# lab calibrated values to scientific values
l2s_H = np.array([9.32483e-7, -0.000122268, 0.00702, -0.23532, 17.06172, 0.0])
# calibration constants: convert 'r' thermistor scientific (temperature)
# values to thermocouple equivalent voltage [mV]
s2v_r = np.array([5.83124e-14, -4.09038e-11, -3.44498e-8, 5.14528e-5, 0.05841, 0.00209])
### tiled calibration arrays for THSPHTE products
tile_spec = (a_deca, 1)
self.e2l_b = np.tile(e2l_b, tile_spec)
self.l2s_b = np.tile(l2s_b, tile_spec)
self.e2l_r = np.tile(e2l_r, tile_spec)
self.l2s_r = np.tile(l2s_r, tile_spec)
self.e2l_L = np.tile(e2l_L, tile_spec)
self.l2s_L = np.tile(l2s_L, tile_spec)
self.e2l_H = np.tile(e2l_H, tile_spec)
self.l2s_H = np.tile(l2s_H, tile_spec)
self.s2v_r = np.tile(s2v_r, tile_spec)
# Performance tests for seafloor instruments THSPH, L2 data products
def test_sfl_thsph_hydrogen(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_hydrogen, self.counts_h2, self.counts_ysz,
self.temperature, self.e2l_h2, self.e2l_ysz, self.arr_hgo,
self.arr_logkfh2g)
def test_sfl_thsph_sulfide(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_sulfide, self.counts_hs, self.counts_ysz,
self.temperature, self.e2l_hs, self.e2l_ysz, self.arr_hgo,
self.arr_logkfh2g, self.arr_eh2sg, self.arr_yh2sg)
def test_sfl_thsph_ph(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_ph, self.counts_ysz, self.counts_agcl,
self.temperature, self.e2l_ysz, self.e2l_agcl, self.arr_hgo,
self.arr_agcl, self.arr_tac, self.arr_tbc1, self.arr_tbc2,
self.arr_tbc3, self.chloride)
def test_sfl_thsph_ph_acl(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_ph_acl, self.counts_ysz, self.counts_agcl,
self.temperature, self.e2l_ysz, self.e2l_agcl, self.arr_hgo,
self.arr_agcl, self.arr_tac, self.arr_tbc1, self.arr_tbc2,
self.arr_tbc3)
def test_sfl_thsph_ph_noref(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_ph_noref, self.counts_ysz, self.temperature,
self.arr_agclref, self.e2l_ysz, self.arr_hgo, self.arr_agcl,
self.arr_tac, self.arr_tbc1, self.arr_tbc2, self.arr_tbc3,
self.chloride)
def test_sfl_thsph_ph_noref_acl(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_ph_noref_acl, self.counts_ysz, self.temperature,
self.arr_agclref, self.e2l_ysz, self.arr_hgo, self.arr_agcl,
self.arr_tac, self.arr_tbc1, self.arr_tbc2, self.arr_tbc3)
# Performance tests for seafloor instruments THSPH, 6 THSPHTE data products
def test_sfl_thsph_temp_int(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_temp_int, self.ts_rawdec_b, self.e2l_b, self.l2s_b)
def test_sfl_thsph_temp_ref(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_temp_ref, self.ts_rawdec_r, self.e2l_r, self.l2s_r)
def test_sfl_thsph_temp_tcl(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_temp_tcl, self.tc_rawdec_L, self.e2l_L, self.l2s_L)
def test_sfl_thsph_temp_tch(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_temp_tch, self.tc_rawdec_H, self.e2l_H, self.l2s_H)
def test_sfl_thsph_temp_tl(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_temp_tl, self.tc_rawdec_L, self.e2l_L, self.l2s_L,
self.ts_rawdec_r, self.e2l_r, self.l2s_r, self.s2v_r)
def test_sfl_thsph_temp_th(self):
stats = []
# timing test
self.profile(stats, sfl_thsph_temp_th, self.tc_rawdec_H, self.e2l_H, self.l2s_H,
self.ts_rawdec_r, self.e2l_r, self.l2s_r, self.s2v_r)
# Performance tests for seafloor instruments TRHPH
def test_sfl_trhph_vfltemp(self):
stats = []
# create 10000 data packets
V_s = np.zeros(a_deca) + 1.931
V_c = np.zeros(a_deca) + 1.077
# calibration constants
tc_slope = np.zeros(a_deca) + 4.22e-5
ts_slope = np.zeros(a_deca) + 0.003
#c3 = np.zeros(a_deca) + -1.00e-6
#c2 = np.zeros(a_deca) + 7.00e-6
#c1 = np.zeros(a_deca) + 0.0024
#c0 = np.zeros(a_deca) + 0.015
# timing test
#self.profile(stats, sfl_trhph_vfltemp, V_s, V_c, tc_slope, ts_slope, c0, c1, c2, c3)
self.profile(stats, sfl_trhph_vfltemp, V_s, V_c, tc_slope, ts_slope)
def test_sfl_trhph_vflorp(self):
stats = []
# create 10000 data packets
V = np.zeros((a_deca, 1)) + 1.541
# calibration coefficients
offset = 2004.0
gain = 4.0
# timing test
self.profile(stats, sfl_trhph_vflorp, V, offset, gain)
def test_sfl_sflpres_l1(self):
stats = []
# create 10000 data packets
P = np.zeros((a_deca, 1)) + 14.868
# timing test
self.profile(stats, sfl_sflpres_l1, P)
def test_sfl_trhph_chloride(self):
stats = []
# set of data packets which will use the various resistivity branches
# in the code and has 1 out of range temperature value
test_array = np.array([
[0.440, 4.095, 4.095, 105.4],
[0.380, 4.095, 4.095, 241.9],
[0.320, 4.095, 4.095, 374.2],
[0.184, 0.915, 4.064, 105.4],
[0.172, 0.857, 4.082, 374.2],
[0.183, 0.926, 4.076, 222.0],
[0.131, 0.673, 3.293, 325.8],
[0.133, 0.678, 3.396, 999.9],
[0.135, 0.681, 2.000, 333.4],
[0.135, 0.681, 1.000, 333.2]
])
# create 10000 data packets
tile_value = np.round(a_deca/test_array.shape[0])
test_array = np.tile(test_array, (tile_value, 1))
V_R1 = test_array[:, 0]
V_R2 = test_array[:, 1]
V_R3 = test_array[:, 2]
T = test_array[:, 3]
# timing test
self.profile(stats, sfl_trhph_chloride, V_R1, V_R2, V_R3, T)
| 43.783088 | 105 | 0.577882 |
acee45cd72d0c6d8c21523cd5317c4b1990a22d1 | 5,069 | py | Python | skimage/morphology/convex_hull.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | 1 | 2019-01-12T13:17:32.000Z | 2019-01-12T13:17:32.000Z | skimage/morphology/convex_hull.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | 30 | 2020-04-15T19:37:40.000Z | 2020-04-22T21:19:35.000Z | skimage/morphology/convex_hull.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | 2 | 2020-03-12T23:20:22.000Z | 2021-02-15T21:54:02.000Z | """Convex Hull."""
from itertools import product
import numpy as np
from scipy.spatial import ConvexHull
from ..measure.pnpoly import grid_points_in_poly
from ._convex_hull import possible_hull
from ..measure._label import label
from ..util import unique_rows
from .._shared.utils import warn
__all__ = ['convex_hull_image', 'convex_hull_object']
def _offsets_diamond(ndim):
offsets = np.zeros((2 * ndim, ndim))
for vertex, (axis, offset) in enumerate(product(range(ndim), (-0.5, 0.5))):
offsets[vertex, axis] = offset
return offsets
def convex_hull_image(image, offset_coordinates=True, tolerance=1e-10):
"""Compute the convex hull image of a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : array
Binary input image. This array is cast to bool before processing.
offset_coordinates : bool, optional
If ``True``, a pixel at coordinate, e.g., (4, 7) will be represented
by coordinates (3.5, 7), (4.5, 7), (4, 6.5), and (4, 7.5). This adds
some "extent" to a pixel when computing the hull.
tolerance : float, optional
Tolerance when determining whether a point is inside the hull. Due
to numerical floating point errors, a tolerance of 0 can result in
some points erroneously being classified as being outside the hull.
Returns
-------
hull : (M, N) array of bool
Binary image with pixels in convex hull set to True.
References
----------
.. [1] http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/
"""
ndim = image.ndim
if np.count_nonzero(image) == 0:
warn("Input image is entirely zero, no valid convex hull. "
"Returning empty image", UserWarning)
return np.zeros(image.shape, dtype=np.bool_)
# In 2D, we do an optimisation by choosing only pixels that are
# the starting or ending pixel of a row or column. This vastly
# limits the number of coordinates to examine for the virtual hull.
if ndim == 2:
coords = possible_hull(image.astype(np.uint8))
else:
coords = np.transpose(np.nonzero(image))
if offset_coordinates:
# when offsetting, we multiply number of vertices by 2 * ndim.
# therefore, we reduce the number of coordinates by using a
# convex hull on the original set, before offsetting.
hull0 = ConvexHull(coords)
coords = hull0.points[hull0.vertices]
# Add a vertex for the middle of each pixel edge
if offset_coordinates:
offsets = _offsets_diamond(image.ndim)
coords = (coords[:, np.newaxis, :] + offsets).reshape(-1, ndim)
# repeated coordinates can *sometimes* cause problems in
# scipy.spatial.ConvexHull, so we remove them.
coords = unique_rows(coords)
# Find the convex hull
hull = ConvexHull(coords)
vertices = hull.points[hull.vertices]
# If 2D, use fast Cython function to locate convex hull pixels
if ndim == 2:
mask = grid_points_in_poly(image.shape, vertices)
else:
gridcoords = np.reshape(np.mgrid[tuple(map(slice, image.shape))],
(ndim, -1))
# A point is in the hull if it satisfies all of the hull's inequalities
coords_in_hull = np.all(hull.equations[:, :ndim].dot(gridcoords) +
hull.equations[:, ndim:] < tolerance, axis=0)
mask = np.reshape(coords_in_hull, image.shape)
return mask
def convex_hull_object(image, neighbors=8):
"""Compute the convex hull image of individual objects in a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : (M, N) array
Binary input image.
neighbors : {4, 8}, int
Whether to use 4- or 8-connectivity.
Returns
-------
hull : ndarray of bool
Binary image with pixels in convex hull set to True.
Notes
-----
This function uses skimage.morphology.label to define unique objects,
finds the convex hull of each using convex_hull_image, and combines
these regions with logical OR. Be aware the convex hulls of unconnected
objects may overlap in the result. If this is suspected, consider using
convex_hull_image separately on each object.
"""
if image.ndim > 2:
raise ValueError("Input must be a 2D image")
if neighbors != 4 and neighbors != 8:
raise ValueError('Neighbors must be either 4 or 8.')
labeled_im = label(image, neighbors, background=0)
convex_obj = np.zeros(image.shape, dtype=bool)
convex_img = np.zeros(image.shape, dtype=bool)
for i in range(1, labeled_im.max() + 1):
convex_obj = convex_hull_image(labeled_im == i)
convex_img = np.logical_or(convex_img, convex_obj)
return convex_img
| 36.731884 | 96 | 0.662655 |
acee45ec7463987418e8c84ab099a1bd1610194f | 598 | py | Python | vnpy/api/tap/generator/tap_md_commen_struct.py | funrunskypalace/vnpy | 2d87aede685fa46278d8d3392432cc127b797926 | [
"MIT"
] | 19,529 | 2015-03-02T12:17:35.000Z | 2022-03-31T17:18:27.000Z | vnpy/api/tap/generator/tap_md_commen_struct.py | funrunskypalace/vnpy | 2d87aede685fa46278d8d3392432cc127b797926 | [
"MIT"
] | 2,186 | 2015-03-04T23:16:33.000Z | 2022-03-31T03:44:01.000Z | vnpy/api/tap/generator/tap_md_commen_struct.py | funrunskypalace/vnpy | 2d87aede685fa46278d8d3392432cc127b797926 | [
"MIT"
] | 8,276 | 2015-03-02T05:21:04.000Z | 2022-03-31T13:13:13.000Z | TapAPIApplicationInfo = {
"AuthCode": "string",
"KeyOperationLogPath": "string",
}
TapAPICommodity = {
"ExchangeNo": "string",
"CommodityType": "char",
"CommodityNo": "string",
}
TapAPIContract = {
"Commodity": "dict",
"ContractNo1": "string",
"StrikePrice1": "string",
"CallOrPutFlag1": "char",
"ContractNo2": "string",
"StrikePrice2": "string",
"CallOrPutFlag2": "char",
}
TapAPIExchangeInfo = {
"ExchangeNo": "string",
"ExchangeName": "string",
}
TapAPIChangePasswordReq = {
"OldPassword": "string",
"NewPassword": "string",
}
| 19.290323 | 36 | 0.613712 |
acee46516eb3f3b0cc8ac710d7715716e12db08f | 1,931 | py | Python | solutions/object_oriented_design/lru_cache/lru_cache.py | bhargav0605/system-design-primer | e8a867ee286d0848ba32a2709051b6f275214714 | [
"CC-BY-4.0"
] | 176,415 | 2017-03-08T13:18:41.000Z | 2022-03-31T23:57:56.000Z | solutions/object_oriented_design/lru_cache/lru_cache.py | bhargav0605/system-design-primer | e8a867ee286d0848ba32a2709051b6f275214714 | [
"CC-BY-4.0"
] | 456 | 2017-03-08T23:38:56.000Z | 2022-03-31T12:27:17.000Z | solutions/object_oriented_design/lru_cache/lru_cache.py | bhargav0605/system-design-primer | e8a867ee286d0848ba32a2709051b6f275214714 | [
"CC-BY-4.0"
] | 31,885 | 2017-03-08T23:14:27.000Z | 2022-03-31T23:22:26.000Z | class Node(object):
def __init__(self, results):
self.results = results
self.next = next
class LinkedList(object):
def __init__(self):
self.head = None
self.tail = None
def move_to_front(self, node):
pass
def append_to_front(self, node):
pass
def remove_from_tail(self):
pass
class Cache(object):
def __init__(self, MAX_SIZE):
self.MAX_SIZE = MAX_SIZE
self.size = 0
self.lookup = {} # key: query, value: node
self.linked_list = LinkedList()
def get(self, query):
"""Get the stored query result from the cache.
Accessing a node updates its position to the front of the LRU list.
"""
node = self.lookup.get(query)
if node is None:
return None
self.linked_list.move_to_front(node)
return node.results
def set(self, results, query):
"""Set the result for the given query key in the cache.
When updating an entry, updates its position to the front of the LRU list.
If the entry is new and the cache is at capacity, removes the oldest entry
before the new entry is added.
"""
node = self.lookup.get(query)
if node is not None:
# Key exists in cache, update the value
node.results = results
self.linked_list.move_to_front(node)
else:
# Key does not exist in cache
if self.size == self.MAX_SIZE:
# Remove the oldest entry from the linked list and lookup
self.lookup.pop(self.linked_list.tail.query, None)
self.linked_list.remove_from_tail()
else:
self.size += 1
# Add the new key and value
new_node = Node(results)
self.linked_list.append_to_front(new_node)
self.lookup[query] = new_node
| 28.820896 | 82 | 0.585189 |
acee4785eb0fddc66d9d06bac300997f7d882e67 | 16,276 | py | Python | core/ta_instr_sum.py | taatuco/smartalpha-data-collection | 0c184f156d60314cf1606b2cf1fb9e22526d5171 | [
"MIT"
] | 4 | 2018-07-06T05:55:05.000Z | 2018-11-13T01:07:59.000Z | core/ta_instr_sum.py | taatuco/smartalpha-data-collection | 0c184f156d60314cf1606b2cf1fb9e22526d5171 | [
"MIT"
] | null | null | null | core/ta_instr_sum.py | taatuco/smartalpha-data-collection | 0c184f156d60314cf1606b2cf1fb9e22526d5171 | [
"MIT"
] | null | null | null | """ Functionalities related to instruments data """
# Copyright (c) 2018-present, Taatu Ltd.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
import datetime
from datetime import timedelta
import csv
from pathlib import Path
import pymysql.cursors
PDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.abspath(PDIR))
from settings import SmartAlphaPath, debug
SETT = SmartAlphaPath()
sys.path.append(os.path.abspath(SETT.get_path_core()))
from get_instr_perf_summ import InstrumentSummaryData
from sa_numeric import get_stdev, get_mdd, get_romad, get_volatility_risk
sys.path.append(os.path.abspath(SETT.get_path_pwd()))
from sa_access import sa_db_access
ACCESS_OBJ = sa_db_access()
DB_USR = ACCESS_OBJ.username()
DB_PWD = ACCESS_OBJ.password()
DB_NAME = ACCESS_OBJ.db_name()
DB_SRV = ACCESS_OBJ.db_server()
class ForecastData:
"""
Get all related to forecast data such as target price, stop loss,
price prediction...
Args:
Integer: Instrument Unique id
"""
ent_1_b = 0
sl_1_b = 0
tp_1_b = 0
ent_1_s = 0
sl_1_s = 0
tp_1_s = 0
ent_2_b = 0
sl_2_b = 0
tp_2_b = 0
ent_2_s = 0
sl_2_s = 0
tp_2_s = 0
frc_pt = 0
def __init__(self, uid, connection):
target_price = -9
date_today = datetime.datetime.now()
date_today = date_today.strftime('%Y%m%d')
date_yesterday = datetime.datetime.now() - timedelta(days=1)
date_yesterday = date_yesterday.strftime('%Y%m%d')
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = "SELECT price_instruments_data.target_price FROM trades "+\
"JOIN symbol_list ON trades.symbol = symbol_list.symbol "+\
"JOIN price_instruments_data ON "+\
"(trades.symbol = price_instruments_data.symbol AND "+\
"price_instruments_data.date = "+ str(date_yesterday) +") "+\
"WHERE symbol_list.uid = "+ str(uid) +" AND trades.entry_date = " + str(date_today)
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
target_price = row[0]
cursor.close()
forc_src = SETT.get_path_src()
file_str = forc_src+str(uid)+'f.csv'
filepath = Path(file_str)
if filepath.exists():
with open(file_str) as csvfile:
csv_file = csv.reader(csvfile, delimiter=',')
i = 1
for row in csv_file:
if i == 2:
self.ent_1_b = row[2] #lower 80 first row row[2]
self.sl_1_b = row[4] #lower 95 first row row[4]
self.tp_1_b = row[5] #upper 95 first row row[5]
self.ent_1_s = row[3] #upper 80 first row row[3]
self.sl_1_s = row[5] #upper 95 first row row[5]
self.tp_1_s = row[4] #lower 95 first row row[4]
if i == 8:
self.ent_2_b = row[2] #lower 80 last row row[2]
self.sl_2_b = row[4] #lower 95 last row row [4]
self.tp_2_b = row[5] #upper 95 last row row[5]
self.ent_2_s = row[3] #upper 80 last row row[3]
self.sl_2_s = row[5] #upper 95 last row row[5]
self.tp_2_s = row[4] #lower 95 last row row[4]
self.frc_pt = target_price
i += 1
debug(str(uid) +": "+ os.path.basename(__file__))
def get_frc_pt(self):
""" Get forecast point """
return self.frc_pt
def get_entry_buy(self, pos):
""" Get entry price for buy """
if pos == 1:
val = self.ent_1_b
else:
val = self.ent_2_b
return val
def get_sl_buy(self, pos):
""" get stop loss for buy """
if pos == 1:
val = self.sl_1_b
else:
val = self.sl_2_b
return val
def get_tp_buy(self, pos):
""" get target price for buy """
if pos == 1:
val = self.tp_1_b
else:
val = self.tp_2_b
return val
def get_entry_sell(self, pos):
""" get entry price for sell """
if pos == 1:
val = self.ent_1_s
else:
val = self.ent_2_s
return val
def get_sl_sell(self, pos):
""" get stop loss for sell """
if pos == 1:
val = self.sl_1_s
else:
val = self.sl_2_s
return val
def get_tp_sell(self, pos):
""" get target price for sell """
if pos == 1:
val = self.tp_1_s
else:
val = self.tp_2_s
return val
def get_forecast_pct(lprice, fprice):
"""
Return the forecasted percentage price change
Args:
Double: Last price
Double: Forecast price
Returns:
Double: Forecasted percentage change
"""
if lprice != 0 and lprice is not None:
lpf = float(lprice)
fpf = float(fprice)
result = (fpf - lpf)/lpf
else:
result = 0
return result
def update_forecast_table(symbol, weekf, frc, date_this, connection):
"""
Update forecast data table instruments and price_instruments_data
Args:
String: Instrument symbol
Double: Week forecast change (if -999 it means the signal is cancelled)
Double: Forecast weekly target price
String: Date in string format YYYYMMDD
Returns:
None
"""
cr_d = connection.cursor(pymysql.cursors.SSCursor)
sql_d = "SELECT unit FROM instruments WHERE symbol = '"+symbol+"'"
cr_d.execute(sql_d)
rs_d = cr_d.fetchall()
unit = ''
for row in rs_d:
unit = row[0]
cr_d.close()
w_forecast_display_info = ''
if weekf != -999:
w_forecast_display_info = str(round(float(weekf*100), 2)) + " " + unit
if unit == 'pips':
w_forecast_display_info = str(round(float(weekf*10000), 0)) +" "+ unit
if unit == '%':
w_forecast_display_info = str(round(float(weekf*100), 2)) + unit
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = "UPDATE instruments SET w_forecast_change='"+str(weekf)+"', w_forecast_display_info='"+\
w_forecast_display_info +"' WHERE symbol='"+symbol+"'"
cursor.execute(sql)
connection.commit()
cursor.close()
#cursor = connection.cursor(pymysql.cursors.SSCursor)
#sql = "UPDATE price_instruments_data SET target_price = "+str(frc)+" WHERE (date>="+\
#date_this +" AND symbol='"+symbol+"' AND target_price =0) "
#debug(sql)
#cursor.execute(sql)
#connection.commit()
#cursor.close()
def update_instruments_table(symbol, y1_pct, m6_pct, m3_pct, m1_pct, w1_pct, d1_pct, wf_pct,
trade_entry_buy_1, trade_tp_buy_1, trade_sl_buy_1,
trade_entry_buy_2, trade_tp_buy_2, trade_sl_buy_2,
trade_entry_sell_1, trade_tp_sell_1, trade_sl_sell_1,
trade_entry_sell_2, trade_tp_sell_2, trade_sl_sell_2,
y1_pct_signal, m6_pct_signal, m3_pct_signal,
m1_pct_signal, w1_pct_signal, sentiment, connection):
"""
Update instrument table with summary data information
Args:
String: Instrument symbol
Double: (24x) Various numerical data
Returns:
None
"""
cr_d = connection.cursor(pymysql.cursors.SSCursor)
sql_d = "SELECT decimal_places FROM instruments WHERE symbol='"+symbol+"' "
cr_d.execute(sql_d)
rs_d = cr_d.fetchall()
decimal_places = 2
for row in rs_d:
decimal_places = row[0]
cr_d.close()
cr_d = connection.cursor(pymysql.cursors.SSCursor)
sql_d = "SELECT price_close, date FROM price_instruments_data WHERE symbol='"+\
symbol +"' ORDER BY date DESC LIMIT 1 "
cr_d.execute(sql_d)
rs_d = cr_d.fetchall()
for row in rs_d:
last_price = row[0]
last_date = row[1]
cr_d.close()
y1_pct_signal = round(float(y1_pct_signal), 3)
m6_pct_signal = round(float(m6_pct_signal), 3)
m3_pct_signal = round(float(m3_pct_signal), 3)
m1_pct_signal = round(float(m1_pct_signal), 3)
w1_pct_signal = round(float(w1_pct_signal), 3)
y1_pct = round(float(y1_pct), 3)
m6_pct = round(float(m6_pct), 3)
m3_pct = round(float(m3_pct), 3)
m1_pct = round(float(m1_pct), 3)
w1_pct = round(float(w1_pct), 3)
d1_pct = round(float(d1_pct), 3)
wf_pct = round(float(wf_pct), 3)
date_last_month = datetime.datetime.now() - timedelta(days=30)
date_last_month = date_last_month.strftime('%Y%m%d')
sql = "SELECT price_close FROM price_instruments_data WHERE symbol='"+\
str(symbol) +"' AND date >="+ str(date_last_month) +" ORDER BY date"
stdev_st = get_stdev(sql)
maximum_dd_st = get_mdd(sql)
romad_st = get_romad(sql)
volatility_risk_st = get_volatility_risk(sql, False, '')
if wf_pct >= 0:
signal_type = "buy"
signal_dir = '<'
else:
signal_type = "sell"
signal_dir = '<'
signal_entry = signal_dir + str(round(float(last_price), decimal_places))
date_next_week = last_date + timedelta(days=7)
signal_expiration = date_next_week.strftime("%Y%m%d")
risk_reward_ratio = 1.5
buy_tp_gap_1 = float(trade_tp_buy_1) * float(volatility_risk_st) * float(risk_reward_ratio)
buy_sl_gap_1 = float(trade_sl_buy_1) * float(volatility_risk_st)
buy_tp_gap_2 = float(trade_tp_buy_2) * float(volatility_risk_st) * float(risk_reward_ratio)
buy_sl_gap_2 = float(trade_sl_buy_2) * float(volatility_risk_st)
sell_tp_gap_1 = float(trade_tp_sell_1) * float(volatility_risk_st) * float(risk_reward_ratio)
sell_sl_gap_1 = float(trade_sl_sell_1) * float(volatility_risk_st)
sell_tp_gap_2 = float(trade_tp_sell_2) * float(volatility_risk_st) * float(risk_reward_ratio)
sell_sl_gap_2 = float(trade_sl_sell_2) * float(volatility_risk_st)
trade_entry_buy_1 = round(float(trade_entry_buy_1), decimal_places)
trade_tp_buy_1 = round(float(trade_tp_buy_1) + float(buy_tp_gap_1), decimal_places)
trade_sl_buy_1 = round(float(trade_sl_buy_1) - float(buy_sl_gap_1), decimal_places)
trade_entry_buy_2 = round(float(trade_entry_buy_2), decimal_places)
trade_tp_buy_2 = round(float(trade_tp_buy_2) + float(buy_tp_gap_2), decimal_places)
trade_sl_buy_2 = round(float(trade_sl_buy_2) - float(buy_sl_gap_2), decimal_places)
trade_entry_sell_1 = round(float(trade_entry_sell_1), decimal_places)
trade_tp_sell_1 = round(float(trade_tp_sell_1) - float(sell_tp_gap_1), decimal_places)
trade_sl_sell_1 = round(float(trade_sl_sell_1) + float(sell_sl_gap_1), decimal_places)
trade_entry_sell_2 = round(float(trade_entry_sell_2), decimal_places)
trade_tp_sell_2 = round(float(trade_tp_sell_2) - float(sell_tp_gap_2), decimal_places)
trade_sl_sell_2 = round(float(trade_sl_sell_2) + float(sell_sl_gap_2), decimal_places)
if (trade_entry_buy_1 < 0 or trade_entry_buy_2 < 0 or
trade_entry_sell_1 < 0 or trade_entry_sell_2 < 0):
trade_entry_buy_1 = round(last_price, decimal_places)
trade_entry_buy_2 = round(last_price, decimal_places)
trade_entry_sell_1 = round(last_price, decimal_places)
trade_entry_sell_2 = round(last_price, decimal_places)
if trade_tp_buy_1 < 0:
trade_tp_buy_1 = 0
if trade_sl_buy_1 < 0:
trade_sl_buy_1 = 0
if trade_tp_buy_2 < 0:
trade_tp_buy_2 = 0
if trade_sl_buy_2 < 0:
trade_sl_buy_2 = 0
if trade_tp_sell_1 < 0:
trade_tp_sell_1 = 0
if trade_sl_sell_1 < 0:
trade_sl_sell_1 = 0
if trade_tp_sell_2 < 0:
trade_tp_sell_2 = 0
if trade_sl_sell_2 < 0:
trade_sl_sell_2 = 0
cr_i = connection.cursor(pymysql.cursors.SSCursor)
sql_i = "UPDATE instruments SET y1="+str(y1_pct)+",m6="+str(m6_pct)+\
",m3="+str(m3_pct)+",m1="+str(m1_pct)+",w1="+str(w1_pct)+",d1="+\
str(d1_pct)+",wf="+str(wf_pct)+","+\
"signal_type='"+ signal_type +"',signal_entry='"+ signal_entry +\
"',signal_expiration="+ str(signal_expiration) + ","+\
"trade_1_entry="+str(trade_entry_buy_1)+",trade_1_tp="+str(trade_tp_buy_1)+\
",trade_1_sl="+str(trade_sl_buy_1)+",trade_1_type='buy',"+\
"trade_2_entry="+str(trade_entry_buy_2)+",trade_2_tp="+str(trade_tp_buy_2)+\
",trade_2_sl="+str(trade_sl_buy_2)+",trade_2_type='buy',"+\
"trade_3_entry="+str(trade_entry_sell_1)+",trade_3_tp="+str(trade_tp_sell_1)+\
",trade_3_sl="+str(trade_sl_sell_1)+",trade_3_type='sell',"+\
"trade_4_entry="+str(trade_entry_sell_2)+",trade_4_tp="+str(trade_tp_sell_2)+\
",trade_4_sl="+str(trade_sl_sell_2)+",trade_4_type='sell', "+\
"stdev_st="+ str(stdev_st)+", maximum_dd_st="+ str(maximum_dd_st)+", romad_st="+\
str(romad_st) + ", volatility_risk_st="+ str(volatility_risk_st) +", "+\
"y1_signal="+str(y1_pct_signal)+",m6_signal="+str(m6_pct_signal)+",m3_signal="+\
str(m3_pct_signal)+",m1_signal="+str(m1_pct_signal)+",w1_signal="+str(w1_pct_signal) +", "+\
"sentiment="+str(sentiment)+" "+\
"WHERE symbol='"+symbol+"' "
debug(sql_i)
cr_i.execute(sql_i)
connection.commit()
cr_i.close()
def get_instr_sum(symbol, uid, asset_class, date_this, sentiment, connection):
"""
Retrieve instrument data summary
Args:
String: Instrument symbol
Integer: Instrument unique id
String: Instrument asset class
String: Date in string format YYYYMMDD
Double: Instrument sentiment score
Returns:
None
"""
mul = 1
#Convert from percentage to pips for forex
if asset_class == 'FX:':
mul = 10000
instr_data = InstrumentSummaryData(symbol, uid, connection)
forc_data = ForecastData(uid, connection)
# ---
y1_pct_signal = float(instr_data.get_pct_1_year_signal())* mul
m6_pct_signal = float(instr_data.get_pct_6_month_signal())* mul
m3_pct_signal = float(instr_data.get_pct_3_month_signal())* mul
m1_pct_signal = float(instr_data.get_pct_1_month_signal())* mul
w1_pct_signal = float(instr_data.get_pct_1_week_signal())* mul
y1_pct = float(instr_data.get_pct_1_year_performance())* mul
m6_pct = float(instr_data.get_pct_6_month_performance())* mul
m3_pct = float(instr_data.get_pct_3_month_performance())* mul
m1_pct = float(instr_data.get_pct_1_month_performance())* mul
w1_pct = float(instr_data.get_pct_1_week_performance())* mul
d1_pct = float(instr_data.get_pct_1_day_performance())* mul
frc_pt = forc_data.get_frc_pt()
lp_pt = instr_data.get_last_price()
wf_pct = 0
if frc_pt != -9:
weekf = get_forecast_pct(lp_pt, frc_pt)
wf_pct = weekf * mul
else:
weekf = -999
# --- (1)
trade_entry_buy_1 = forc_data.get_entry_buy(1)
trade_tp_buy_1 = forc_data.get_tp_buy(1)
trade_sl_buy_1 = forc_data.get_sl_buy(1)
# --- (2)
trade_entry_buy_2 = forc_data.get_entry_buy(2)
trade_tp_buy_2 = forc_data.get_tp_buy(2)
trade_sl_buy_2 = forc_data.get_sl_buy(2)
# --- (3)
trade_entry_sell_1 = forc_data.get_entry_sell(1)
trade_tp_sell_1 = forc_data.get_tp_sell(1)
trade_sl_sell_1 = forc_data.get_sl_sell(1)
# --- (4)
trade_entry_sell_2 = forc_data.get_entry_sell(2)
trade_tp_sell_2 = forc_data.get_tp_sell(2)
trade_sl_sell_2 = forc_data.get_sl_sell(2)
# ---
update_forecast_table(symbol, weekf, frc_pt, date_this, connection)
update_instruments_table(symbol, y1_pct, m6_pct, m3_pct, m1_pct, w1_pct, d1_pct, wf_pct,
trade_entry_buy_1, trade_tp_buy_1, trade_sl_buy_1,
trade_entry_buy_2, trade_tp_buy_2, trade_sl_buy_2,
trade_entry_sell_1, trade_tp_sell_1, trade_sl_sell_1,
trade_entry_sell_2, trade_tp_sell_2, trade_sl_sell_2,
y1_pct_signal, m6_pct_signal, m3_pct_signal,
m1_pct_signal, w1_pct_signal, sentiment, connection)
| 39.125 | 98 | 0.64248 |
acee47ea5d471d518b26b97e65f0959205d17284 | 21,128 | py | Python | aldryn_newsblog/south_migrations/0026_auto__rename_field_featuredarticlesplugin_entry_count.py | what-digital/aldryn-newsblog-blog-teaser-size | c52cb256fe3b608838f2184de9575b6cbbfd5f8e | [
"BSD-3-Clause"
] | null | null | null | aldryn_newsblog/south_migrations/0026_auto__rename_field_featuredarticlesplugin_entry_count.py | what-digital/aldryn-newsblog-blog-teaser-size | c52cb256fe3b608838f2184de9575b6cbbfd5f8e | [
"BSD-3-Clause"
] | null | null | null | aldryn_newsblog/south_migrations/0026_auto__rename_field_featuredarticlesplugin_entry_count.py | what-digital/aldryn-newsblog-blog-teaser-size | c52cb256fe3b608838f2184de9575b6cbbfd5f8e | [
"BSD-3-Clause"
] | 2 | 2019-10-22T04:30:28.000Z | 2019-10-22T05:09:16.000Z | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from aldryn_newsblog.utils.migration import rename_tables_old_to_new, rename_tables_new_to_old
class Migration(SchemaMigration):
def forwards(self, orm):
rename_tables_old_to_new(db)
db.rename_column(u'aldryn_newsblog_featuredarticlesplugin', 'entry_count', 'article_count')
def backwards(self, orm):
rename_tables_new_to_old(db)
db.rename_column(u'aldryn_newsblog_featuredarticlesplugin', 'article_count', 'entry_count')
models = {
u'aldryn_categories.category': {
'Meta': {'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'rgt': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'aldryn_newsblog.archiveplugin': {
'Meta': {'object_name': 'ArchivePlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_newsblog.article': {
'Meta': {'ordering': "[u'-publishing_date']", 'object_name': 'Article'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Person']", 'null': 'True', 'blank': 'True'}),
'categories': ('aldryn_categories.fields.CategoryManyToManyField', [], {'to': u"orm['aldryn_categories.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'aldryn_newsblog_articles'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'featured_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'publishing_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'related': ('sortedm2m.fields.SortedManyToManyField', [], {'related_name': "'related_rel_+'", 'blank': 'True', 'to': u"orm['aldryn_newsblog.Article']"})
},
u'aldryn_newsblog.articletranslation': {
'Meta': {'unique_together': "[(u'language_code', u'slug'), (u'language_code', u'master')]", 'object_name': 'ArticleTranslation', 'db_table': "u'aldryn_newsblog_article_translation'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'lead_in': ('djangocms_text_ckeditor.fields.HTMLField', [], {'default': "u''", 'blank': 'True'}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_newsblog.Article']"}),
'meta_description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'meta_title': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '234'})
},
u'aldryn_newsblog.authorsplugin': {
'Meta': {'object_name': 'AuthorsPlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_newsblog.categoriesplugin': {
'Meta': {'object_name': 'CategoriesPlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_newsblog.featuredarticlesplugin': {
'Meta': {'object_name': 'FeaturedArticlesPlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'article_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_newsblog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'latest_entries': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'aldryn_newsblog.newsblogconfig': {
'Meta': {'object_name': 'NewsBlogConfig'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
'create_authors': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'detail_view_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'aldryn_newsblog_detail_view_placeholder_set'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_view_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'aldryn_newsblog_list_view_placeholder_set'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'namespace': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100'}),
'paginate_by': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'search_indexed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'aldryn_newsblog.newsblogconfigtranslation': {
'Meta': {'unique_together': "[(u'language_code', u'master')]", 'object_name': 'NewsBlogConfigTranslation', 'db_table': "u'aldryn_newsblog_newsblogconfig_translation'"},
'app_title': ('django.db.models.fields.CharField', [], {'max_length': '234'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_newsblog.NewsBlogConfig']"})
},
u'aldryn_newsblog.relatedplugin': {
'Meta': {'object_name': 'RelatedPlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'aldryn_newsblog.tagsplugin': {
'Meta': {'object_name': 'TagsPlugin'},
'app_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_newsblog.NewsBlogConfig']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_people.group': {
'Meta': {'object_name': 'Group'},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "u''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'aldryn_people.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'default': "u''", 'max_length': '75', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_people.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'vcard_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visual': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image'},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['aldryn_newsblog']
| 87.66805 | 206 | 0.578995 |
acee480d83dbca31f1605fb3a955ca7b8f97c108 | 4,045 | py | Python | 04-Working-With-Dataframes/1.Describe-a-dataframe.py | RodriGonca/DP-203-Data-Engineer | ef8e81bd4bda1e285c2e43714368d46be3ad041b | [
"MIT"
] | null | null | null | 04-Working-With-Dataframes/1.Describe-a-dataframe.py | RodriGonca/DP-203-Data-Engineer | ef8e81bd4bda1e285c2e43714368d46be3ad041b | [
"MIT"
] | null | null | null | 04-Working-With-Dataframes/1.Describe-a-dataframe.py | RodriGonca/DP-203-Data-Engineer | ef8e81bd4bda1e285c2e43714368d46be3ad041b | [
"MIT"
] | null | null | null | # Databricks notebook source
# MAGIC %md
# MAGIC
# MAGIC # Describe a DataFrame
# MAGIC
# MAGIC Your data processing in Azure Databricks is accomplished by defining Dataframes to read and process the Data.
# MAGIC
# MAGIC This notebook will introduce how to read your data using Azure Databricks Dataframes.
# COMMAND ----------
# MAGIC %md
# MAGIC #Introduction
# MAGIC
# MAGIC ** Data Source **
# MAGIC * One hour of Pagecounts from the English Wikimedia projects captured August 5, 2016, at 12:00 PM UTC.
# MAGIC * Size on Disk: ~23 MB
# MAGIC * Type: Compressed Parquet File
# MAGIC * More Info: <a href="https://dumps.wikimedia.org/other/pagecounts-raw" target="_blank">Page view statistics for Wikimedia projects</a>
# MAGIC
# MAGIC **Technical Accomplishments:**
# MAGIC * Develop familiarity with the `DataFrame` APIs
# MAGIC * Introduce the classes...
# MAGIC * `SparkSession`
# MAGIC * `DataFrame` (aka `Dataset[Row]`)
# MAGIC * Introduce the actions...
# MAGIC * `count()`
# COMMAND ----------
# MAGIC %md
# MAGIC ## Getting Started
# MAGIC
# MAGIC Run the following cell to configure our "classroom."
# COMMAND ----------
# MAGIC %run "./Includes/Classroom-Setup"
# COMMAND ----------
# MAGIC %md
# MAGIC ## **The Data Source**
# MAGIC
# MAGIC * In this notebook, we will be using a compressed parquet "file" called **pagecounts** (~23 MB file from Wikipedia)
# MAGIC * We will explore the data and develop an understanding of it as we progress.
# MAGIC * You can read more about this dataset here: <a href="https://dumps.wikimedia.org/other/pagecounts-raw/" target="_blank">Page view statistics for Wikimedia projects</a>.
# MAGIC
# MAGIC We can use **dbutils.fs.ls()** to view our data on the DBFS.
# COMMAND ----------
(source, sasEntity, sasToken) = getAzureDataSource()
spark.conf.set(sasEntity, sasToken)
# COMMAND ----------
path = source + "/wikipedia/pagecounts/staging_parquet_en_only_clean/"
files = dbutils.fs.ls(path)
display(files)
# COMMAND ----------
# MAGIC %md
# MAGIC As we can see from the files listed above, this data is stored in <a href="https://parquet.apache.org" target="_blank">Parquet</a> files which can be read in a single command, the result of which will be a `DataFrame`.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Create a DataFrame
# MAGIC * We can read the Parquet files into a `DataFrame`.
# MAGIC * We'll start with the object **spark**, an instance of `SparkSession` and the entry point to Spark 2.0 applications.
# MAGIC * From there we can access the `read` object which gives us an instance of `DataFrameReader`.
# COMMAND ----------
parquetDir = source + "/wikipedia/pagecounts/staging_parquet_en_only_clean/"
# COMMAND ----------
pagecountsEnAllDF = (spark # Our SparkSession & Entry Point
.read # Our DataFrameReader
.parquet(parquetDir) # Returns an instance of DataFrame
)
print(pagecountsEnAllDF) # Python hack to see the data type
# COMMAND ----------
# MAGIC %md
# MAGIC ## count()
# MAGIC
# MAGIC If you look at the API docs, `count()` is described like this:
# MAGIC > Returns the number of rows in the Dataset.
# MAGIC
# MAGIC `count()` will trigger a job to process the request and return a value.
# MAGIC
# MAGIC We can now count all records in our `DataFrame` like this:
# COMMAND ----------
total = pagecountsEnAllDF.count()
print("Record Count: {0:,}".format( total ))
# COMMAND ----------
# MAGIC %md
# MAGIC That tells us that there are around 2 million rows in the `DataFrame`.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Next steps
# MAGIC
# MAGIC Start the next lesson, [Use common DataFrame methods]($./2.Use-common-dataframe-methods) | 34.87069 | 226 | 0.698888 |
acee4843603d25807f99b8c4186e4634097504d9 | 3,613 | py | Python | python/ray/train/tests/test_xgboost_predictor.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | python/ray/train/tests/test_xgboost_predictor.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | python/ray/train/tests/test_xgboost_predictor.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | import json
import os
import tempfile
import numpy as np
import pandas as pd
import xgboost as xgb
from ray.air._internal.checkpointing import save_preprocessor_to_dir
from ray.air.checkpoint import Checkpoint
from ray.air.constants import MODEL_KEY
from ray.data.preprocessor import Preprocessor
from ray.train.xgboost import XGBoostPredictor, to_air_checkpoint
class DummyPreprocessor(Preprocessor):
def transform_batch(self, df):
self._batch_transformed = True
return df * 2
dummy_data = np.array([[1, 2], [3, 4], [5, 6]])
dummy_target = np.array([0, 1, 0])
model = xgb.XGBClassifier(n_estimators=10).fit(dummy_data, dummy_target).get_booster()
def get_num_trees(booster: xgb.Booster) -> int:
data = [json.loads(d) for d in booster.get_dump(dump_format="json")]
return len(data)
def test_init():
preprocessor = DummyPreprocessor()
preprocessor.attr = 1
predictor = XGBoostPredictor(model=model, preprocessor=preprocessor)
with tempfile.TemporaryDirectory() as tmpdir:
# This somewhat convoluted procedure is the same as in the
# Trainers. The reason for saving model to disk instead
# of directly to the dict as bytes is due to all callbacks
# following save to disk logic. GBDT models are small
# enough that IO should not be an issue.
model.save_model(os.path.join(tmpdir, MODEL_KEY))
save_preprocessor_to_dir(preprocessor, tmpdir)
checkpoint = Checkpoint.from_directory(tmpdir)
checkpoint_predictor = XGBoostPredictor.from_checkpoint(checkpoint)
assert get_num_trees(checkpoint_predictor.model) == get_num_trees(predictor.model)
assert checkpoint_predictor.preprocessor.attr == predictor.preprocessor.attr
def test_predict():
preprocessor = DummyPreprocessor()
predictor = XGBoostPredictor(model=model, preprocessor=preprocessor)
data_batch = np.array([[1, 2], [3, 4], [5, 6]])
predictions = predictor.predict(data_batch)
assert len(predictions) == 3
assert hasattr(predictor.preprocessor, "_batch_transformed")
def test_predict_feature_columns():
preprocessor = DummyPreprocessor()
predictor = XGBoostPredictor(model=model, preprocessor=preprocessor)
data_batch = np.array([[1, 2, 7], [3, 4, 8], [5, 6, 9]])
predictions = predictor.predict(data_batch, feature_columns=[0, 1])
assert len(predictions) == 3
assert hasattr(predictor.preprocessor, "_batch_transformed")
def test_predict_feature_columns_pandas():
pandas_data = pd.DataFrame(dummy_data, columns=["A", "B"])
pandas_target = pd.Series(dummy_target)
pandas_model = (
xgb.XGBClassifier(n_estimators=10).fit(pandas_data, pandas_target).get_booster()
)
preprocessor = DummyPreprocessor()
predictor = XGBoostPredictor(model=pandas_model, preprocessor=preprocessor)
data_batch = pd.DataFrame(
np.array([[1, 2, 7], [3, 4, 8], [5, 6, 9]]), columns=["A", "B", "C"]
)
predictions = predictor.predict(data_batch, feature_columns=["A", "B"])
assert len(predictions) == 3
assert hasattr(predictor.preprocessor, "_batch_transformed")
def test_predict_no_preprocessor_no_training():
with tempfile.TemporaryDirectory() as tmpdir:
checkpoint = to_air_checkpoint(tmpdir, booster=model)
predictor = XGBoostPredictor.from_checkpoint(checkpoint)
data_batch = np.array([[1, 2], [3, 4], [5, 6]])
predictions = predictor.predict(data_batch)
assert len(predictions) == 3
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-sv", __file__]))
| 33.146789 | 88 | 0.718516 |
acee486865a26222aa4122e214df88edbd9fb0b6 | 9,782 | py | Python | src/oci/core/models/launch_instance_agent_config_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/core/models/launch_instance_agent_config_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/core/models/launch_instance_agent_config_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LaunchInstanceAgentConfigDetails(object):
"""
Configuration options for the Oracle Cloud Agent software running on the instance.
"""
def __init__(self, **kwargs):
"""
Initializes a new LaunchInstanceAgentConfigDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param is_monitoring_disabled:
The value to assign to the is_monitoring_disabled property of this LaunchInstanceAgentConfigDetails.
:type is_monitoring_disabled: bool
:param is_management_disabled:
The value to assign to the is_management_disabled property of this LaunchInstanceAgentConfigDetails.
:type is_management_disabled: bool
:param are_all_plugins_disabled:
The value to assign to the are_all_plugins_disabled property of this LaunchInstanceAgentConfigDetails.
:type are_all_plugins_disabled: bool
:param plugins_config:
The value to assign to the plugins_config property of this LaunchInstanceAgentConfigDetails.
:type plugins_config: list[oci.core.models.InstanceAgentPluginConfigDetails]
"""
self.swagger_types = {
'is_monitoring_disabled': 'bool',
'is_management_disabled': 'bool',
'are_all_plugins_disabled': 'bool',
'plugins_config': 'list[InstanceAgentPluginConfigDetails]'
}
self.attribute_map = {
'is_monitoring_disabled': 'isMonitoringDisabled',
'is_management_disabled': 'isManagementDisabled',
'are_all_plugins_disabled': 'areAllPluginsDisabled',
'plugins_config': 'pluginsConfig'
}
self._is_monitoring_disabled = None
self._is_management_disabled = None
self._are_all_plugins_disabled = None
self._plugins_config = None
@property
def is_monitoring_disabled(self):
"""
Gets the is_monitoring_disabled of this LaunchInstanceAgentConfigDetails.
Whether Oracle Cloud Agent can gather performance metrics and monitor the instance using the
monitoring plugins. Default value is false (monitoring plugins are enabled).
These are the monitoring plugins: Compute Instance Monitoring
and Custom Logs Monitoring.
The monitoring plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- If `isMonitoringDisabled` is true, all of the monitoring plugins are disabled, regardless of
the per-plugin configuration.
- If `isMonitoringDisabled` is false, all of the monitoring plugins are enabled. You
can optionally disable individual monitoring plugins by providing a value in the `pluginsConfig`
object.
:return: The is_monitoring_disabled of this LaunchInstanceAgentConfigDetails.
:rtype: bool
"""
return self._is_monitoring_disabled
@is_monitoring_disabled.setter
def is_monitoring_disabled(self, is_monitoring_disabled):
"""
Sets the is_monitoring_disabled of this LaunchInstanceAgentConfigDetails.
Whether Oracle Cloud Agent can gather performance metrics and monitor the instance using the
monitoring plugins. Default value is false (monitoring plugins are enabled).
These are the monitoring plugins: Compute Instance Monitoring
and Custom Logs Monitoring.
The monitoring plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- If `isMonitoringDisabled` is true, all of the monitoring plugins are disabled, regardless of
the per-plugin configuration.
- If `isMonitoringDisabled` is false, all of the monitoring plugins are enabled. You
can optionally disable individual monitoring plugins by providing a value in the `pluginsConfig`
object.
:param is_monitoring_disabled: The is_monitoring_disabled of this LaunchInstanceAgentConfigDetails.
:type: bool
"""
self._is_monitoring_disabled = is_monitoring_disabled
@property
def is_management_disabled(self):
"""
Gets the is_management_disabled of this LaunchInstanceAgentConfigDetails.
Whether Oracle Cloud Agent can run all the available management plugins.
Default value is false (management plugins are enabled).
These are the management plugins: OS Management Service Agent and Compute Instance
Run Command.
The management plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- If `isManagementDisabled` is true, all of the management plugins are disabled, regardless of
the per-plugin configuration.
- If `isManagementDisabled` is false, all of the management plugins are enabled. You
can optionally disable individual management plugins by providing a value in the `pluginsConfig`
object.
:return: The is_management_disabled of this LaunchInstanceAgentConfigDetails.
:rtype: bool
"""
return self._is_management_disabled
@is_management_disabled.setter
def is_management_disabled(self, is_management_disabled):
"""
Sets the is_management_disabled of this LaunchInstanceAgentConfigDetails.
Whether Oracle Cloud Agent can run all the available management plugins.
Default value is false (management plugins are enabled).
These are the management plugins: OS Management Service Agent and Compute Instance
Run Command.
The management plugins are controlled by this parameter and by the per-plugin
configuration in the `pluginsConfig` object.
- If `isManagementDisabled` is true, all of the management plugins are disabled, regardless of
the per-plugin configuration.
- If `isManagementDisabled` is false, all of the management plugins are enabled. You
can optionally disable individual management plugins by providing a value in the `pluginsConfig`
object.
:param is_management_disabled: The is_management_disabled of this LaunchInstanceAgentConfigDetails.
:type: bool
"""
self._is_management_disabled = is_management_disabled
@property
def are_all_plugins_disabled(self):
"""
Gets the are_all_plugins_disabled of this LaunchInstanceAgentConfigDetails.
Whether Oracle Cloud Agent can run all the available plugins.
This includes the management and monitoring plugins.
To get a list of available plugins, use the
:func:`list_instanceagent_available_plugins`
operation in the Oracle Cloud Agent API. For more information about the available plugins, see
`Managing Plugins with Oracle Cloud Agent`__.
__ https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/manage-plugins.htm
:return: The are_all_plugins_disabled of this LaunchInstanceAgentConfigDetails.
:rtype: bool
"""
return self._are_all_plugins_disabled
@are_all_plugins_disabled.setter
def are_all_plugins_disabled(self, are_all_plugins_disabled):
"""
Sets the are_all_plugins_disabled of this LaunchInstanceAgentConfigDetails.
Whether Oracle Cloud Agent can run all the available plugins.
This includes the management and monitoring plugins.
To get a list of available plugins, use the
:func:`list_instanceagent_available_plugins`
operation in the Oracle Cloud Agent API. For more information about the available plugins, see
`Managing Plugins with Oracle Cloud Agent`__.
__ https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/manage-plugins.htm
:param are_all_plugins_disabled: The are_all_plugins_disabled of this LaunchInstanceAgentConfigDetails.
:type: bool
"""
self._are_all_plugins_disabled = are_all_plugins_disabled
@property
def plugins_config(self):
"""
Gets the plugins_config of this LaunchInstanceAgentConfigDetails.
The configuration of plugins associated with this instance.
:return: The plugins_config of this LaunchInstanceAgentConfigDetails.
:rtype: list[oci.core.models.InstanceAgentPluginConfigDetails]
"""
return self._plugins_config
@plugins_config.setter
def plugins_config(self, plugins_config):
"""
Sets the plugins_config of this LaunchInstanceAgentConfigDetails.
The configuration of plugins associated with this instance.
:param plugins_config: The plugins_config of this LaunchInstanceAgentConfigDetails.
:type: list[oci.core.models.InstanceAgentPluginConfigDetails]
"""
self._plugins_config = plugins_config
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 42.163793 | 245 | 0.718156 |
acee49758c876881242a8870bb684239fe57fc18 | 1,621 | py | Python | woniu-build.py | pityonline/woniu-cmdb | 64722689dd1e9334a89a93683cdad825b9ef76d8 | [
"MIT"
] | 317 | 2015-12-22T03:49:01.000Z | 2022-03-30T07:18:52.000Z | woniu-build.py | O-ll-O/woniu-cmdb | 64722689dd1e9334a89a93683cdad825b9ef76d8 | [
"MIT"
] | 27 | 2016-01-06T06:44:15.000Z | 2020-02-11T09:29:28.000Z | woniu-build.py | O-ll-O/woniu-cmdb | 64722689dd1e9334a89a93683cdad825b9ef76d8 | [
"MIT"
] | 166 | 2015-12-22T03:46:18.000Z | 2021-09-27T02:50:15.000Z |
# coding=utf-8
import os,sys
reload(sys)
sys.setdefaultencoding("utf-8")
import json
from config import db_config,page_config
import sys
from flask_web import db
head = '''
{% extends "layout.html" %}
{% block body %}
{% endblock %}
{% block js %}
<script>
$(function() {
$.rebootOps(
'''
foot = '''
)
})
</script>
{% endblock %}
'''
def gen_file(config):
with open('templates/page/'+config['name']+'.html','w') as f:
f.write(head)
f.write(' '+json.dumps(config))
f.write(foot)
print config['name']+' build success'
def gen_config(config):
for c in config:
gen_file(c)
def del_table(name):
sql = 'drop table if exists '+name
print sql
db.execute(sql)
def create_table(name,data):
tmp = []
for v in data:
tmp.append('%s varchar(200)'%v['name'])
sql = 'create table %s (id int not null auto_increment primary key,%s)' % (name,','.join(tmp))
print sql
db.execute(sql)
print 'table %s is created' % (name)
def init_database(config):
for c in config:
name = c['name']
del_table(name)
create_table(name,c['data'])
if __name__ == '__main__':
config = []
for page in page_config['menu']:
if 'sub' in page:
for s in page['sub']:
config.append(s)
else:
config.append(page)
print config
if len(sys.argv)>1 and sys.argv[1]=='init':
init_database(config)
db.execute('insert into user (username,password) values ("51reboot","51reboot")')
gen_config(config) | 22.513889 | 98 | 0.571252 |
acee498dea6d3f2266bd6f16b99ca1aa7938858f | 517 | py | Python | oops_fhir/r4/value_set/need.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/need.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/need.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.need import need as need_
__all__ = ["need"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class need(need_):
"""
Need
The frequency with which the target must be validated
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/verificationresult-need
"""
class Meta:
resource = _resource
| 17.827586 | 69 | 0.715667 |
acee4be14064eb382cd7ed174f1ad321795c9bc2 | 2,541 | py | Python | jinahub/encoders/image/ImageTorchEncoder/tests/conftest.py | vivek2301/executors | 8159681d68408ab8f797497bc3374be77e6ca392 | [
"Apache-2.0"
] | 4 | 2021-07-01T13:05:51.000Z | 2022-03-15T02:27:58.000Z | jinahub/encoders/image/ImageTorchEncoder/tests/conftest.py | vivek2301/executors | 8159681d68408ab8f797497bc3374be77e6ca392 | [
"Apache-2.0"
] | 11 | 2021-06-22T13:53:48.000Z | 2022-02-21T10:51:53.000Z | tests/conftest.py | jina-ai/executor-image-torch-encoder | 69bd316f46282df8e32f73ebf47b7bf70089abcf | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
from pathlib import Path
from typing import Dict
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray
from PIL import Image
from torchvision.models.mobilenetv2 import model_urls
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def mobilenet_weights(tmpdir: str) -> str:
weights_file = os.path.join(tmpdir, 'w.pth')
torch.hub.download_url_to_file(
url=model_urls['mobilenet_v2'], dst=weights_file, progress=False
)
return weights_file
@pytest.fixture()
def docs_with_blobs() -> DocumentArray:
return DocumentArray(
[Document(blob=np.ones((10, 10, 3), dtype=np.uint8)) for _ in range(11)]
)
@pytest.fixture()
def docs_with_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(chunks=[Document(blob=np.ones((10, 10, 3), dtype=np.uint8))])
for _ in range(11)
]
)
@pytest.fixture()
def docs_with_chunk_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(
chunks=[
Document(blob=np.ones((10, 10, 3), dtype=np.uint8))
for _ in range(11)
]
)
]
)
]
)
@pytest.fixture()
def test_images(test_dir: str) -> Dict[str, np.ndarray]:
def get_path(file_name_no_suffix: str) -> str:
return os.path.join(test_dir, 'test_data', file_name_no_suffix + '.png')
image_dict = {
file_name: np.array(Image.open(get_path(file_name)))[:, :, 0:3]
for file_name in ['airplane', 'banana1', 'banana2', 'satellite', 'studio']
}
return image_dict
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
| 26.46875 | 86 | 0.624164 |
acee4d2b7017f69d9345d88f1aba7139e2b203d8 | 5,064 | py | Python | buildroot-external/rootfs-overlay/usr/lib/python3.8/site-packages/pendulum/locales/en/locale.py | builderjer/OpenVoiceOS | 280550da387275e79ad0c82b890c79554529b92f | [
"Apache-2.0"
] | 96 | 2020-10-07T20:56:40.000Z | 2022-03-17T01:22:29.000Z | buildroot-external/rootfs-overlay/usr/lib/python3.8/site-packages/pendulum/locales/en/locale.py | builderjer/OpenVoiceOS | 280550da387275e79ad0c82b890c79554529b92f | [
"Apache-2.0"
] | 38 | 2020-10-10T20:38:46.000Z | 2022-03-14T11:59:52.000Z | buildroot-external/rootfs-overlay/usr/lib/python3.8/site-packages/pendulum/locales/en/locale.py | nittaya1990/OpenVoiceOS | 85d7c15c034dac37848406b64b2c4988ed9ee091 | [
"Apache-2.0"
] | 15 | 2020-11-19T06:20:41.000Z | 2022-03-06T23:03:00.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .custom import translations as custom_translations
"""
en locale file.
It has been generated automatically and must not be modified directly.
"""
locale = {
"plural": lambda n: "one"
if ((n == n and ((n == 1))) and (0 == 0 and ((0 == 0))))
else "other",
"ordinal": lambda n: "few"
if (
((n % 10) == (n % 10) and (((n % 10) == 3)))
and (not ((n % 100) == (n % 100) and (((n % 100) == 13))))
)
else "one"
if (
((n % 10) == (n % 10) and (((n % 10) == 1)))
and (not ((n % 100) == (n % 100) and (((n % 100) == 11))))
)
else "two"
if (
((n % 10) == (n % 10) and (((n % 10) == 2)))
and (not ((n % 100) == (n % 100) and (((n % 100) == 12))))
)
else "other",
"translations": {
"days": {
"abbreviated": {
0: "Sun",
1: "Mon",
2: "Tue",
3: "Wed",
4: "Thu",
5: "Fri",
6: "Sat",
},
"narrow": {0: "S", 1: "M", 2: "T", 3: "W", 4: "T", 5: "F", 6: "S"},
"short": {0: "Su", 1: "Mo", 2: "Tu", 3: "We", 4: "Th", 5: "Fr", 6: "Sa"},
"wide": {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
},
},
"months": {
"abbreviated": {
1: "Jan",
2: "Feb",
3: "Mar",
4: "Apr",
5: "May",
6: "Jun",
7: "Jul",
8: "Aug",
9: "Sep",
10: "Oct",
11: "Nov",
12: "Dec",
},
"narrow": {
1: "J",
2: "F",
3: "M",
4: "A",
5: "M",
6: "J",
7: "J",
8: "A",
9: "S",
10: "O",
11: "N",
12: "D",
},
"wide": {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December",
},
},
"units": {
"year": {"one": "{0} year", "other": "{0} years"},
"month": {"one": "{0} month", "other": "{0} months"},
"week": {"one": "{0} week", "other": "{0} weeks"},
"day": {"one": "{0} day", "other": "{0} days"},
"hour": {"one": "{0} hour", "other": "{0} hours"},
"minute": {"one": "{0} minute", "other": "{0} minutes"},
"second": {"one": "{0} second", "other": "{0} seconds"},
"microsecond": {"one": "{0} microsecond", "other": "{0} microseconds"},
},
"relative": {
"year": {
"future": {"other": "in {0} years", "one": "in {0} year"},
"past": {"other": "{0} years ago", "one": "{0} year ago"},
},
"month": {
"future": {"other": "in {0} months", "one": "in {0} month"},
"past": {"other": "{0} months ago", "one": "{0} month ago"},
},
"week": {
"future": {"other": "in {0} weeks", "one": "in {0} week"},
"past": {"other": "{0} weeks ago", "one": "{0} week ago"},
},
"day": {
"future": {"other": "in {0} days", "one": "in {0} day"},
"past": {"other": "{0} days ago", "one": "{0} day ago"},
},
"hour": {
"future": {"other": "in {0} hours", "one": "in {0} hour"},
"past": {"other": "{0} hours ago", "one": "{0} hour ago"},
},
"minute": {
"future": {"other": "in {0} minutes", "one": "in {0} minute"},
"past": {"other": "{0} minutes ago", "one": "{0} minute ago"},
},
"second": {
"future": {"other": "in {0} seconds", "one": "in {0} second"},
"past": {"other": "{0} seconds ago", "one": "{0} second ago"},
},
},
"day_periods": {
"midnight": "midnight",
"am": "AM",
"noon": "noon",
"pm": "PM",
"morning1": "in the morning",
"afternoon1": "in the afternoon",
"evening1": "in the evening",
"night1": "at night",
},
},
"custom": custom_translations,
}
| 32.883117 | 86 | 0.30707 |
acee4d4aae73c0aa628cb6ecb7581c7567ca808a | 1,481 | py | Python | assign01/assign01_3.py | mrzhuzhe/pepper | 8d99b9262490988fa96fe548f48103e57b75f1a6 | [
"MIT"
] | 3 | 2020-04-10T13:01:01.000Z | 2021-05-05T01:59:33.000Z | assign01/assign01_3.py | mrzhuzhe/pepper | 8d99b9262490988fa96fe548f48103e57b75f1a6 | [
"MIT"
] | null | null | null | assign01/assign01_3.py | mrzhuzhe/pepper | 8d99b9262490988fa96fe548f48103e57b75f1a6 | [
"MIT"
] | null | null | null | from sklearn.decomposition import TruncatedSVD
from assign01_2 import compute_co_occurrence_matrix
def reduce_to_k_dim(M, k=2):
""" Reduce a co-occurence count matrix of dimensionality (num_corpus_words, num_corpus_words)
to a matrix of dimensionality (num_corpus_words, k) using the following SVD function from Scikit-Learn:
- http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.TruncatedSVD.html
Params:
M (numpy matrix of shape (number of corpus words, number of corpus words)): co-occurence matrix of word counts
k (int): embedding size of each word after dimension reduction
Return:
M_reduced (numpy matrix of shape (number of corpus words, k)): matrix of k-dimensioal word embeddings.
In terms of the SVD from math class, this actually returns U * S
"""
n_iters = 10 # Use this parameter in your call to `TruncatedSVD`
M_reduced = None
print("Running Truncated SVD over %i words..." % (M.shape[0]))
# ------------------
# Write your implementation here.
svd = TruncatedSVD(n_components=k, n_iter=n_iters)
M_reduced = svd.fit_transform(M)
# ------------------
print("Done.")
return M_reduced
"""
input = ["START I am a pig , I am a dog END".split(' ')]
print("input", input)
M , word2Ind = compute_co_occurrence_matrix(input)
print("M", M)
print("res", reduce_to_k_dim(M))
"""
| 42.314286 | 122 | 0.652937 |
acee4e405b498121184c8de67d8b0066def2874f | 1,339 | py | Python | setup.py | shrutiichandra/spanish-conjugator | 2ebf41b92c14c3e47a873c52fdf4ce1d17bff5e0 | [
"MIT"
] | null | null | null | setup.py | shrutiichandra/spanish-conjugator | 2ebf41b92c14c3e47a873c52fdf4ce1d17bff5e0 | [
"MIT"
] | null | null | null | setup.py | shrutiichandra/spanish-conjugator | 2ebf41b92c14c3e47a873c52fdf4ce1d17bff5e0 | [
"MIT"
] | null | null | null | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='spanishconjugator',
version='2.3.1',
description='A python library to conjugate spanish words with parameters tense, mood and pronoun',
packages=['spanishconjugator',
'spanishconjugator.tenses',
'spanishconjugator.tenses.conditional',
'spanishconjugator.tenses.indicative',
'spanishconjugator.tenses.imperative',
'spanishconjugator.tenses.subjunctive',
'spanishconjugator.irregulars'],
package_dir={'': 'src'},
classifiers = [
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent"
],
long_description=long_description,
long_description_content_type="text/markdown",
extras_require={
"dev": [
"pytest",
"check-manifest",
"twine"
],
},
url="https://github.com/Benedict-Carling/spanish-conjugator",
author="Benedict Carling",
author_email="bencarling1@gmail.com"
) | 34.333333 | 102 | 0.61165 |
acee4ff104a5eae973f609a62f47cf8b29600840 | 8,247 | py | Python | tests/std_modules/test_module_socket.py | RyanSkraba/python-enchiridion | e5d271bafdeaf92d9961b54e55aafd31a4dc34cd | [
"Apache-2.0"
] | null | null | null | tests/std_modules/test_module_socket.py | RyanSkraba/python-enchiridion | e5d271bafdeaf92d9961b54e55aafd31a4dc34cd | [
"Apache-2.0"
] | null | null | null | tests/std_modules/test_module_socket.py | RyanSkraba/python-enchiridion | e5d271bafdeaf92d9961b54e55aafd31a4dc34cd | [
"Apache-2.0"
] | null | null | null | # -*- mode: python -*-
# -*- coding: utf-8 -*-
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import socket
import threading
import time
import unittest
from typing import Optional
"""Tests for sockets with Python"""
class EchoBytesServer(object):
"""A server that returns the bytes it receives."""
def __init__(
self,
host: str = "",
port: int = 0,
stopword=b"Stop",
timeout: Optional[float] = None,
) -> None:
self.log = logging.getLogger(__name__)
# The number of connections the server has processed.
self.client_count = 0
self.host = host
self.port = port
# When this word has been received, shutdown the server.
self.stopword = stopword
self.timeout = timeout
self.__exception = None
self.__stopword_buffer = bytes(len(self.stopword))
self.__thread = threading.Thread(target=self.run_catch, name="EchoBytesServer")
def __enter__(self) -> "EchoBytesServer":
"""Starts itself in a thread, waiting for clients to connect."""
self.__thread.start()
return self
def __exit__(self, t, v, tb) -> None:
"""If started in a thread, wait for the thread to finish."""
if self.__thread is not None and self.__thread.is_alive():
self.__thread.join()
def run_catch(self) -> None:
try:
self.run()
except Exception as e:
self.__exception = e
def run(self) -> None:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# Bind to the specified port, or find a free port if zero.
self.log.info("server.bind(%s:%s)", self.host, self.port)
s.bind((self.host, self.port))
self.port = s.getsockname()[1]
self.log.info(" bound: %s", s.getsockname())
if self.timeout is not None:
s.settimeout(self.timeout)
# Mark that the port is available for new clients to listen to. Only one client can
# be connected at a time.
self.log.info("socket.listen()")
s.listen(0)
# But serve all clients in order.
shutdown_requested = False
while not shutdown_requested:
try:
# Create a new socket (on a different port) to talk to the client. This
# is a blocking call.
# addr is a tuple of host, port
self.log.info("server.accept()")
connection, addr = s.accept()
if self.timeout is not None:
connection.settimeout(self.timeout)
self.log.info(" accepted: %s", addr)
self.client_count += 1
# Serve all the bytes in order.
client_disconnected = False
while not shutdown_requested and not client_disconnected:
# Echo all bytes
self.log.info("server.recv(1)")
data = connection.recv(1)
self.log.info(" received %s", data)
if len(data) == 0:
# The client has closed and will no longer send bytes.
client_disconnected = True
else:
self.__stopword_buffer += data
self.__stopword_buffer = self.__stopword_buffer[1:]
if self.stopword == self.__stopword_buffer:
# At any zero, we'll return the byte but stop receiving.
shutdown_requested = True
connection.sendall(data)
finally:
# The socket that we accepted should be closed down.
connection.close()
def get_port(self) -> int:
"""Blocks until the port is not zero."""
while self.port == 0:
time.sleep(1)
return self.port
class EchoBytesClient(object):
"""A server for speaking to the EchoBytesServer."""
def __init__(self, host: str, port: int, timeout: Optional[float] = None) -> None:
self.log = logging.getLogger(__name__)
self.connection = None
self.host = host
self.port = port
self.timeout = timeout
def __enter__(self) -> "EchoBytesClient":
if self.connection is None:
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout is not None:
self.connection.settimeout(self.timeout)
self.log.info("client.connect(%s:%s)", self.host, self.port)
self.connection.connect((self.host, self.port))
self.log.info(" connected: %s", self.connection.getsockname())
return self
def __exit__(self, t, v, tb):
self.connection.close()
def send(self, data: Optional[bytes]) -> bytes:
if data is not None:
self.connection.sendall(data)
return self.connection.recv(1)
class SocketModuleTestSuite(unittest.TestCase):
def test_echo_bytes_server_basic(self) -> None:
"""Simple client/server communication with the server and client."""
with EchoBytesServer(timeout=10) as srv:
port = srv.get_port()
self.assertNotEqual(port, 0)
self.assertEqual(0, srv.client_count)
with EchoBytesClient("", port, timeout=10) as c1:
self.assertEqual(bytes([1]), c1.send(bytes([1, 2, 3])))
self.assertEqual(bytes([2]), c1.send(None))
self.assertEqual(bytes([3]), c1.send(None))
self.assertEqual(1, srv.client_count)
with EchoBytesClient("", port) as c2:
self.assertEqual(bytes([2]), c2.send(bytes([2, 3, 4])))
self.assertEqual(bytes([3]), c2.send(None))
self.assertEqual(bytes([4]), c2.send(None))
self.assertEqual(2, srv.client_count)
with EchoBytesClient("", port) as c3:
self.assertEqual(bytes([3]), c3.send(b"\x03" + b"Stop"))
self.assertEqual(b"S", c3.send(None))
self.assertEqual(b"t", c3.send(None))
self.assertEqual(b"o", c3.send(None))
self.assertEqual(b"p", c3.send(None))
self.assertEqual(3, srv.client_count)
def test_accept_timeout(self) -> None:
"""A socket.accept call blocks, but can time out."""
info = {}
def accept(info: dict) -> None:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# None of these socket calls are blocking
s.bind(("", 0))
info["port"] = s.getsockname()[1]
s.listen(0)
s.settimeout(0.5)
try:
# This will block until a client accepts or a timeout occurs.
connection, addr = s.accept()
info["connection"] = connection
info["addr"] = addr
except socket.timeout:
pass
accept(info)
self.assertIn("port", info.keys())
self.assertNotIn("connection", info.keys())
self.assertNotIn("addr", info.keys())
if __name__ == "__main__":
unittest.main()
| 37.316742 | 96 | 0.56275 |
acee50c734696161ac4a5a1666f3986796dd4382 | 802 | py | Python | src/pyams_pagelet/tests/__init__.py | Py-AMS/pyams-pagelet | b8505c3fdf417605ab09bfb2c6059c5df59985fc | [
"ZPL-2.1"
] | null | null | null | src/pyams_pagelet/tests/__init__.py | Py-AMS/pyams-pagelet | b8505c3fdf417605ab09bfb2c6059c5df59985fc | [
"ZPL-2.1"
] | null | null | null | src/pyams_pagelet/tests/__init__.py | Py-AMS/pyams-pagelet | b8505c3fdf417605ab09bfb2c6059c5df59985fc | [
"ZPL-2.1"
] | null | null | null | #
# Copyright (c) 2015-2019 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""
Generic test cases for pyams_pagelet doctests
"""
__docformat__ = 'restructuredtext'
import os
import sys
def get_package_dir(value):
"""Get package directory"""
package_dir = os.path.split(value)[0]
if package_dir not in sys.path:
sys.path.append(package_dir)
return package_dir
| 26.733333 | 75 | 0.741895 |
acee50ff938e689e7e5a43e169e6d517e5fe7b06 | 587 | py | Python | src/dataloader.py | JeanExtreme002/Geography-Game | e8a2f19fa24468a80263ef7e55497bfd5ae298b2 | [
"BSD-3-Clause"
] | 1 | 2020-05-23T01:03:28.000Z | 2020-05-23T01:03:28.000Z | src/dataloader.py | JeanExtreme002/Geography-Game | e8a2f19fa24468a80263ef7e55497bfd5ae298b2 | [
"BSD-3-Clause"
] | null | null | null | src/dataloader.py | JeanExtreme002/Geography-Game | e8a2f19fa24468a80263ef7e55497bfd5ae298b2 | [
"BSD-3-Clause"
] | null | null | null | import json
import os
def get_info():
"""
Retorna as informações do jogo.
"""
with open(os.path.join("data", "game_info.txt"), encoding = "utf-8") as file:
return file.read()
def get_questions():
"""
Retorna as questões do jogo.
"""
with open(os.path.join("data", "game_questions.json"), encoding = "utf-8-sig") as file:
return json.loads(file.read())
def get_rules():
"""
Retorna as regras do jogo.
"""
with open(os.path.join("data", "game_rules.txt"), encoding = "utf-8") as file:
return file.read() | 18.34375 | 91 | 0.586031 |
acee5172004ee5fe91f7f6d5ad2d35e7e44168f6 | 67 | py | Python | helper/test-import-module.py | singhst/web-scraping-tv-movie | 1942cad5f631424dccc4a0c89fc346497ce9854d | [
"MIT"
] | null | null | null | helper/test-import-module.py | singhst/web-scraping-tv-movie | 1942cad5f631424dccc4a0c89fc346497ce9854d | [
"MIT"
] | 1 | 2021-06-26T17:57:02.000Z | 2021-08-15T09:58:00.000Z | helper/test-import-module.py | singhst/web-scraping-tv-movie | 1942cad5f631424dccc4a0c89fc346497ce9854d | [
"MIT"
] | null | null | null | import translateToUrlPath
translateToUrlPath.translateToUrlPath() | 16.75 | 39 | 0.895522 |
acee5183d9d5ce5a1982e3e4ac053a522289fe01 | 17,714 | py | Python | BottlesOnCoasters/python/rl_environment_bottles_on_coasters.py | mgualti/Seq6DofManip | 706d0802bda43195d41b953a02380f3d0d731718 | [
"MIT"
] | 4 | 2020-04-08T03:58:31.000Z | 2021-04-08T14:44:18.000Z | BottlesOnCoasters/python/rl_environment_bottles_on_coasters.py | mgualti/Seq6DofManip | 706d0802bda43195d41b953a02380f3d0d731718 | [
"MIT"
] | null | null | null | BottlesOnCoasters/python/rl_environment_bottles_on_coasters.py | mgualti/Seq6DofManip | 706d0802bda43195d41b953a02380f3d0d731718 | [
"MIT"
] | null | null | null | '''Reinforcement learning (RL) environment for the bottles on coasters domain.'''
# python
import os
import fnmatch
from copy import copy
from time import sleep, time
# scipy
from scipy.io import loadmat
from matplotlib import pyplot
from scipy.spatial import cKDTree
from numpy.linalg import inv, norm
from numpy.random import choice, rand, randint, randn, uniform
from numpy import arccos, argmax, argmin, array, arange, cos, dot, eye, hstack, logical_or, mean, \
pi, power, repeat, reshape, sin, sqrt, sum, vstack, zeros
# openrave
import openravepy
# self
import point_cloud
from rl_environment import RlEnvironment
from hand_descriptor import HandDescriptor
class RlEnvironmentBottlesOnCoasters(RlEnvironment):
def __init__(self, params):
'''Initializes openrave environment, parameters, and first episode.
- Input params: System parameters data structure.
'''
RlEnvironment.__init__(self, params)
# parameters
self.nObjects = params["nObjects"]
self.nSupportObjects = params["nSupportObjects"]
self.objectFolder = params["objectFolder"]
self.supportObjectFolder = params["supportObjectFolder"]
self.graspFrictionCone = params["graspFrictionCone"]
self.graspMinDistFromBottom = params["graspMinDistFromBottom"]
self.placeOrientTolerance = self.params["placeOrientTolerance"]
self.placePosTolerance = self.params["placePosTolerance"]
self.placeHeightTolerance = self.params["placeHeightTolerance"]
# initialization
self.InitializeHandRegions()
self.objectFileNames = os.listdir(self.objectFolder)
self.objectFileNames = fnmatch.filter(self.objectFileNames, "*.dae")
self.supportObjectFileNames = os.listdir(self.supportObjectFolder)
self.supportObjectFileNames = fnmatch.filter(self.supportObjectFileNames, "*.dae")
# internal state
self.objects = []
self.supportObjects = []
self.kinBodyCache = {}
self.ResetEpisode()
def GetArtificialCloud(self):
'''Concatenates point cloud data from all objects and support objects.
- Returns cloud: Point cloud in the base/world reference frame.
'''
clouds = []
objects = self.supportObjects + self.objects
for obj in objects:
cloud = point_cloud.Transform(obj.GetTransform(), obj.cloud)
clouds.append(cloud)
return vstack(clouds)
def InitializeHandRegions(self):
'''Determines hand geometry, in the descriptor reference frame, for collision checking. Should
be called once at initialization.
'''
# find default descriptor geometry
desc = HandDescriptor(eye(4), self.params["imP"], self.params["imD"], self.params["imW"])
# important reference points
topUp = desc.top + (desc.height / 2) * desc.axis
topDn = desc.top - (desc.height / 2) * desc.axis
BtmUp = desc.top + (desc.height / 2) * desc.axis
BtmDn = desc.top - (desc.height / 2) * desc.axis
# cuboids representing hand regions, in workspace format
self.handClosingRegion = [
(-desc.height / 2, desc.height / 2),
(-desc.width / 2, desc.width / 2),
(-desc.depth / 2, desc.depth / 2)]
self.handFingerRegionL = [
(-desc.height / 2, desc.height / 2),
(-desc.width / 2 - 0.01, -desc.width / 2),
(-desc.depth / 2, desc.depth / 2)]
self.handFingerRegionR = [
(-desc.height / 2, desc.height / 2),
(desc.width / 2, desc.width / 2 + 0.01),
(-desc.depth / 2, desc.depth / 2)]
self.handTopRegion = [
(-desc.height / 2, desc.height / 2),
(-desc.width / 2 - 0.01, desc.width / 2 + 0.01),
(desc.depth / 2, desc.depth / 2 + 0.01)]
# find corners of hand collision geometry
self.externalHandPoints = array([ \
topUp + ((desc.width / 2) + 0.01) * desc.binormal,
topUp - ((desc.width / 2) + 0.01) * desc.binormal,
topDn + ((desc.width / 2) + 0.01) * desc.binormal,
topDn - ((desc.width / 2) + 0.01) * desc.binormal,
BtmUp + ((desc.width / 2) + 0.01) * desc.binormal,
BtmUp - ((desc.width / 2) + 0.01) * desc.binormal,
BtmDn + ((desc.width / 2) + 0.01) * desc.binormal,
BtmDn - ((desc.width / 2) + 0.01) * desc.binormal, ])
def IsAntipodalGrasp(self, descriptor, targetObject, maxAngleToFinger):
'''Returns True if a grasp is near antipodal, based on the parameters.
- Input descriptor: HandDescriptor object with pose of grasp.
- Input targetObject: OpenRAVE object handle with cloud and normals entries.
- Input maxAngleToFinger: Maximum angle between surfance normal and finger in degrees. Used
10 degrees for blocks, 15 degrees for mugs, and 15 degrees for bottles.
- Returns: True if antipodal grasp, False otherwise.
'''
# parameters
contactWidth = 0.01
maxAntipodalDist = 0.01
maxAngleToFinger = cos(maxAngleToFinger*(pi/180))
# put cloud into hand reference frame
bTo = targetObject.GetTransform()
bTh = descriptor.T
hTo = dot(inv(bTh), bTo)
X, N = point_cloud.Transform(hTo, targetObject.cloud, targetObject.normals)
X, N = point_cloud.FilterWorkspace(self.handClosingRegion, X, N)
if X.shape[0] == 0:
#print("No points in hand.")
return False
# find contact points
leftPoint = min(X[:, 1]); rightPoint = max(X[:, 1])
lX, lN = point_cloud.FilterWorkspace([(-1,1),(leftPoint,leftPoint+contactWidth),(-1,1)], X, N)
rX, rN = point_cloud.FilterWorkspace([(-1,1),(rightPoint-contactWidth,rightPoint),(-1,1)], X, N)
# find contact points normal to finger
lX = lX[-lN[:, 1] >= maxAngleToFinger, :]
rX = rX[ rN[:, 1] >= maxAngleToFinger, :]
if lX.shape[0] == 0 or rX.shape[0] == 0:
#print("No contact points normal to finger.")
return False
# are the closest two contact points nearly antipodal?
leftTree = cKDTree(lX[:,(0, 2)])
d, idxs = leftTree.query(rX[:, (0,2)])
#if min(d) >= maxAntipodalDist:
# print("Contacts not antipodal.")
return min(d) < maxAntipodalDist
def IsGrasp(self, descriptor):
'''Checks if, when the hand is placed at the descriptor's pose and closed, a grasp takes place.
- Input descriptor: HandDescriptor object of the target hand pose.
- Returns graspedObject: The handle of the grasped object if a cylinder can be grasped from the
target hand pose; otherwise None.
'''
# check collision
if self.IsRobotInCollision(descriptor):
return None
# check intersection of exactly 1 object
graspedObject = None
for i, obj in enumerate(self.objects):
bTo = obj.GetTransform()
hTo = dot(inv(descriptor.T), bTo)
X = point_cloud.Transform(hTo, obj.cloud)
X = point_cloud.FilterWorkspace(self.handClosingRegion, X)
intersect = X.size > 0
if intersect:
if graspedObject is None:
graspedObject = obj
else:
# intersection of multiple objects
return None
if graspedObject is None:
# intersection of no objects
return None
# check antipodal condition
if self.IsAntipodalGrasp(descriptor, graspedObject, self.graspFrictionCone):
return graspedObject
return None
def IsRobotInCollision(self, descriptor):
'''Checks collision between the robot and the world.
- Input descriptor: HandDescriptor object for the current hand pose.
- Returns: True if in collision and False otherwise.
'''
self.robot.SetTransform(descriptor.T)
return self.env.CheckCollision(self.robot)
def IsBottleUpright(self, obj):
'''Returns True iff the bottle's axis is (nearly) normal to the table plane. In this environment
it can be only be normal or orthogonal.'''
return dot(obj.GetTransform()[0:3, 2], array([0.0, 0.0, 1.0])) > \
cos(self.placeOrientTolerance * pi / 180)
def PerformGrasp(self, descriptor, cloud):
'''Tests for and simulates a grasp. If an object is grasped, self.holdingObject is set.
- Input descriptor: Pose of the grasp.
- Input cloud: Point cloud of the current scene, in the base/world frame (excluding table).
- Returns reward: -1 if grasping a placed object, 1 if grasping an unplaced object, and 0 otherwise.
'''
self.holdingObject = self.IsGrasp(descriptor)
if not self.holdingObject:
if self.params["showSteps"]:
raw_input("Grasp failed.")
return 0.0
if self.params["showSteps"]:
raw_input("Grasp succeeded.")
# penalize grasps near bottom of bottle (kinematics consideration)
oTd = dot(inv(self.holdingObject.GetTransform()), descriptor.T)
graspTooLow = oTd[2, 3] - self.holdingObject.workspace[2][0] < self.graspMinDistFromBottom
# generate grasp image
descriptor.GenerateHeightmap(cloud, self.GetTableHeight())
self.holdingDescriptor = descriptor
# simulate object movement when hand closes
self.SimulateObjectMovementOnClose(descriptor, self.holdingObject)
# move to holding pose
self.MoveHandToHoldingPose()
self.MoveObjectToHandAtGrasp(descriptor.T, self.holdingObject)
# compute reward
if self.holdingObject in self.placedObjects:
del self.placedObjects[self.holdingObject]
return -1.0
if graspTooLow:
return 0.0
return 1.0
def PerformPlace(self, descriptor):
'''Places the object and computes the appropriate reward. If place is not good, the object gets
removed from the environment, as its resulting state is hard to determine. Assumes robot and
object are at the holding pose.
- Input descriptor: Location of the hand at place.
- Returns reward: 1 if place is on an unoccupied coaster and 0 otherwise.
'''
# move object to hand at place
bTg = self.robot.GetTransform()
self.MoveHandToPose(descriptor.T)
self.MoveObjectToHandAtGrasp(bTg, self.holdingObject)
self.MoveHandToHoldingPose()
# no longer holding an object
placedObject = self.holdingObject
self.holdingObject = None
self.holdingDescriptor = None
# check if bottle is vertical
if not self.IsBottleUpright(placedObject):
self.PlaceFailed(placedObject)
return 0.0
# check if bottle is approximately completely over the coaster
bTo = placedObject.GetTransform()
supportObject = None
for coaster in self.supportObjects:
coasterXY = coaster.GetTransform()[0:2, 3]
if sum(power(coasterXY - bTo[0:2, 3], 2)) < (coaster.radius - self.placePosTolerance)**2:
supportObject = coaster
break
# not above any coaster
if supportObject is None:
self.PlaceFailed(placedObject)
return 0.0
# support object is already occupied
if supportObject in self.placedObjects.values():
self.PlaceFailed(placedObject)
return 0.0
# check if bottle bottom is within given height tolerance
supportTopZ = supportObject.GetTransform()[2, 3] + supportObject.workspace[2, 1]
objectBottomZ = placedObject.GetTransform()[2, 3] + placedObject.workspace[2, 0]
if objectBottomZ < supportTopZ - self.placeHeightTolerance[0] or \
objectBottomZ > supportTopZ + self.placeHeightTolerance[1]:
self.PlaceFailed(placedObject)
return 0.0
'''# check if hand is in collision
collision, cloudsInHandFrame = self.IsRobotInCollision(descriptor)
if collision:
self.PlaceFailed(placedObject)
return 0.0'''
# place is good
if self.params["showSteps"]:
raw_input("Placed object successfully.")
self.placedObjects[placedObject] = supportObject
return 1.0
def PlaceObjects(self, isSupportObjects, maxPlaceAttempts=10,
workspace=((-0.18, 0.18), (-0.18, 0.18))):
'''Chooses and places objects randomly on the table.
- Input isSupportObjects: Are the objects support objects (i.e. coasters)?
- Input maxPlaceAttempts: Maximum number of times to place an object collision-free. If exceeded,
the object will be placed in collision with some already placed object(s).
- Input workspace: Area to place objects in, [(minX, maxX), (minY, maxY)]. Center of objects will
not be outside of these bounds.
- Returns None.
'''
# support object / graspable object
if isSupportObjects:
nObjects = self.nSupportObjects
folderName = self.supportObjectFolder
fileNames = self.supportObjectFileNames
else:
nObjects = self.nObjects
folderName = self.objectFolder
fileNames = self.objectFileNames
# select file(s)
fileIdxs = choice(len(fileNames), size=nObjects, replace=False)
objectHandles = []
# add objects
for i in xrange(nObjects):
# choose a random object from the folder
objectName = fileNames[fileIdxs[i]]
# load object
if objectName in self.kinBodyCache:
body = self.kinBodyCache[objectName]
self.env.AddKinBody(body)
else:
# load mesh
self.env.Load(folderName + "/" + objectName)
shortObjectName = objectName[:-4]
body = self.env.GetKinBody(shortObjectName)
# load points and normals
data = loadmat(folderName + "/" + shortObjectName + ".mat")
body.cloud = data["cloud"]
body.workspace = data["workspace"]
if "normals" in data: body.normals = data["normals"]
if "radius" in data: body.radius = data["radius"]
# add to cache
self.kinBodyCache[objectName] = body
# select pose for object
for j in xrange(maxPlaceAttempts):
# choose orientation
if isSupportObjects:
R = eye(4)
downAxis = (2, 0)
else:
r1 = choice(array([0, 1, 2, 3]) * (pi / 2))
r2 = choice([pi / 2.0, 0.0], p=[2.0 / 3.0, 1.0 / 3.0])
r3 = uniform(0, 2 * pi)
R1 = openravepy.matrixFromAxisAngle([0.0, 0.0, 1.0], r1)
R2 = openravepy.matrixFromAxisAngle([0.0, 1.0, 0.0], r2)
R3 = openravepy.matrixFromAxisAngle([0.0, 0.0, 1.0], r3)
R = dot(R3, dot(R2, R1)) # about fixed frame in order 1, 2, 3
if r2 == 0:
downAxis = (2, 0)
elif r1 == 0:
downAxis = (0, 1)
elif r1 == pi/2:
downAxis = (1, 0)
elif r1 == pi:
downAxis = (0, 0)
else:
downAxis = (1, 1)
# choose xy position
xy = array([ \
uniform(workspace[0][0], workspace[0][1]),
uniform(workspace[1][0], workspace[1][1])])
# set height
z = abs(body.workspace[downAxis]) + self.GetTableHeight() + 0.001
# set transform
T = eye(4)
T[0:2, 3] = xy
T[2, 3] = z
T = dot(T, R)
body.SetTransform(T)
if not self.env.CheckCollision(body): break
# add to environment
objectHandles.append(body)
if isSupportObjects:
self.supportObjects += objectHandles
else:
self.objects += objectHandles
def PlaceFailed(self, placedObject):
'''Helper function to be called if a successful place condition is not met.'''
if self.params["showSteps"]:
raw_input("Place failed.")
self.objects.remove(placedObject)
self.env.Remove(placedObject)
def ResetEpisode(self):
'''Resets all internal variables pertaining to a particular episode, including objects placed.'''
self.RemoveObjectSet(self.objects)
self.RemoveObjectSet(self.supportObjects)
self.objects = []
self.supportObjects = []
self.holdingObject = None
self.holdingDescriptor = None
self.placedObjects = {}
def SimulateObjectMovementOnClose(self, descriptor, obj):
'''The object can move when the fingers close during a grasp.
This sets the object to an approximation to the correct resultant pose.
- Input descriptor: Grasp pose. Assumes this is a valid grasp.
- Input obj: The object being grasped.
- Returns None.
'''
# get object pose in hand frame
bTo = obj.GetTransform()
hTb = inv(descriptor.T)
hTo = dot(hTb, bTo)
if self.IsBottleUpright(obj):
# Top grasp. Simply set the y-position to 0.
hTo[1, 3] = 0
else:
# Side grasp.
# Set y = 0 at the point of contact along the bottle axis.
alpha = -hTo[0, 3] / hTo[0, 2]
deltaY = hTo[1, 3] + alpha * hTo[1, 2]
hTo[1, 3] -= deltaY
# Set the orientation to be vertical in the hand.
zAxis = hTo[0:2, 2] if hTo[0, 2] >= 0 else -hTo[0:2, 2]
angle = arccos(dot(zAxis, array([1.0, 0.0])) / norm(zAxis))
angle = angle if zAxis[1] <= 0 else 2 * pi - angle
handDepthAxis = array([0.0, 0.0, 1.0])
T = openravepy.matrixFromAxisAngle(handDepthAxis, angle)
hTo = dot(T, hTo)
# set the object's new pose in the base frame
bToNew = dot(descriptor.T, hTo)
obj.SetTransform(bToNew)
def Transition(self, descriptor, cloud):
'''The robot takes the provided action and a reward is evaluated.
The reward is +1 if a bottle is placed on an unoccupied coaster, -1 if a bottle already placed
is removed from a coaster, and 0 otherwise.
- Input descriptor: HandDescriptor object describing the current (overt) action.
- Input cloud: Point cloud of the scene, excluding table.
- Returns r: A scalar reward. The state of the objects in the simulator may change.
'''
if self.holdingObject is None:
r = self.PerformGrasp(descriptor, cloud)
else:
r = self.PerformPlace(descriptor)
return self.holdingDescriptor, r | 36.674948 | 104 | 0.649994 |
acee5186bea6ba37953f549a9d53bec524681e1f | 2,829 | py | Python | orders/models.py | gonzalo2009/CS50_Web_2019_Project3 | 25aae07f0fbb97e75ce03be5a79f1e345fbe309e | [
"MIT"
] | null | null | null | orders/models.py | gonzalo2009/CS50_Web_2019_Project3 | 25aae07f0fbb97e75ce03be5a79f1e345fbe309e | [
"MIT"
] | 3 | 2020-02-12T03:23:35.000Z | 2021-06-10T22:28:25.000Z | orders/models.py | gonzalo2009/CS50_Web_2019_Project3 | 25aae07f0fbb97e75ce03be5a79f1e345fbe309e | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
class Topping(models.Model):
name = models.CharField(max_length=64)
def __str__(self):
return f"{self.name}"
class Add_On(models.Model):
topping = models.OneToOneField(
Topping, on_delete=models.CASCADE, related_name="add_on")
def __str__(self):
return f"{self.topping}"
class Category(models.Model):
name = models.CharField(max_length=64)
def __str__(self):
return f"{self.name}"
class Item(models.Model):
name = models.CharField(max_length=64)
category = models.ForeignKey(
Category, null=True, on_delete=models.CASCADE, related_name="items")
one_price = models.FloatField(blank=True, null=True)
price_small = models.FloatField(blank=True, null=True)
price_large = models.FloatField(blank=True, null=True)
number_toppings = models.IntegerField(blank=True, null=True)
add_ons = models.BooleanField(default=False)
extra_cheese = models.BooleanField(default=False)
def __str__(self):
return f"{self.name}"
class Order(models.Model):
user = models.ForeignKey(
User, null=True, on_delete=models.CASCADE, related_name="orders")
date = models.DateTimeField(default=timezone.now)
price = models.FloatField(blank=True, null=True)
def __str__(self):
return f"User: {self.user}, Date: {self.date}"
class Cart(models.Model):
user = models.OneToOneField(
User, null=True, on_delete=models.CASCADE, related_name="cart")
price = models.FloatField(default=0)
def __str__(self):
return f"{self.user}"
class Purchase(models.Model):
item = models.ForeignKey(
Item, null=True, on_delete=models.CASCADE, related_name="items")
SiZE_CHOICES = [
("S", "Small"),
("L", "Large"),
]
size = models.CharField(max_length=1, choices=SiZE_CHOICES, null=True)
toppings = models.ManyToManyField(
Topping, blank=True, related_name="items")
add_ons = models.ManyToManyField(
Add_On, blank=True, related_name="items")
extra_cheese_added = models.BooleanField(default=False)
price = models.FloatField(blank=True, null=True)
quantity = models.IntegerField(blank=True, null=True)
total_price = models.FloatField(null=True)
cart = models.ForeignKey(
Cart, null=True, on_delete=models.CASCADE, related_name="purchases")
order = models.ForeignKey(
Order, null=True, on_delete=models.CASCADE, related_name="purchases")
def __str__(self):
return f"{self.item}, {self.size}, {self.toppings}, {self.add_ons}, {self.extra_cheese_added},\
{self.price}, {self.quantity}, {self.total_price}, {self.cart}, {self.order}" | 33.282353 | 103 | 0.685401 |
acee51a92f476facf2b31df3a03e8697a3e73289 | 302 | py | Python | GrayLsb.py | itungsten/miscScripts | c43378247525a2b8a48b906b3a19cf9847e4c845 | [
"MIT"
] | null | null | null | GrayLsb.py | itungsten/miscScripts | c43378247525a2b8a48b906b3a19cf9847e4c845 | [
"MIT"
] | null | null | null | GrayLsb.py | itungsten/miscScripts | c43378247525a2b8a48b906b3a19cf9847e4c845 | [
"MIT"
] | null | null | null | import PIL.Image as Image
path='low.bmp'
img = Image.open(path)
img_tmp = img.copy()
pix = img_tmp.load()
width,height = img_tmp.size
for w in range(width):
for h in range(height):
if pix[w,h]&1 == 0:
pix[w,h] = 0
else:
pix[w,h] = 255
img_tmp.save('ans.jpg')
#inplace | 18.875 | 27 | 0.599338 |
acee51a975e27da00e81a3565516d45f08ae1911 | 10,223 | py | Python | zoltpy/covid19.py | hannanabdul55/zoltpy | a73f9ae30db49f4ea71a8356ed726a0ec928894c | [
"MIT"
] | null | null | null | zoltpy/covid19.py | hannanabdul55/zoltpy | a73f9ae30db49f4ea71a8356ed726a0ec928894c | [
"MIT"
] | null | null | null | zoltpy/covid19.py | hannanabdul55/zoltpy | a73f9ae30db49f4ea71a8356ed726a0ec928894c | [
"MIT"
] | null | null | null | import csv
import datetime
import os
from pathlib import Path
import click
from zoltpy.quantile_io import json_io_dict_from_quantile_csv_file, summarized_error_messages, MESSAGE_FORECAST_CHECKS, \
MESSAGE_DATE_ALIGNMENT
#
# functionality specific to the COVID19 project
#
#
# columns in addition to REQUIRED_COLUMNS
#
COVID_ADDL_REQ_COLS = ['forecast_date', 'target_end_date']
#
# FIPS codes (locations)
#
def load_fips_codes(file):
fips_codes_state = []
fips_codes_county = []
with open(file) as fp:
csv_reader = csv.reader(fp, delimiter=',')
next(csv_reader) # skip header
for abbreviation, location, location_name in csv_reader:
if abbreviation:
fips_codes_state.append(location)
else:
fips_codes_county.append(location)
return fips_codes_state, fips_codes_county
# file per https://stackoverflow.com/questions/10174211/how-to-make-an-always-relative-to-current-module-file-path
# FIPS_CODES_STATE: '01', '02', ..., 'US'
# FIPS_CODES_COUNTY: '01001', '01003', ..., '56045'
FIPS_CODES_STATE, FIPS_CODES_COUNTY = load_fips_codes(os.path.join(os.path.dirname(__file__), 'locations.csv'))
#
# targets
#
COVID_TARGETS_NON_CASE = [f"{_} day ahead inc hosp" for _ in range(131)] + \
[f"{_} wk ahead inc death" for _ in range(1, 21)] + \
[f"{_} wk ahead cum death" for _ in range(1, 21)]
COVID_TARGETS_CASE = [f"{_} wk ahead inc case" for _ in range(1, 9)]
COVID_TARGETS = COVID_TARGETS_NON_CASE + COVID_TARGETS_CASE
#
# quantiles
#
#
# these are non-overlapping, which makes the below logic simpler. case targets is complete, but the full non-case
# targets list is the combination of both, i.e.,
# NC: 0.01, 0.05, 0.15, 0.2, 0.3, 0.35, 0.4, 0.45, 0.55, 0.6, 0.65, 0.7, 0.8, 0.85, 0.95, 0.99 (16) = 23
# C: 0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975 (7)
#
COVID_QUANTILES_NON_CASE = [0.01, 0.05, 0.15, 0.2, 0.3, 0.35, 0.4, 0.45, 0.55, 0.6, 0.65, 0.7, 0.8, 0.85, 0.95, 0.99]
COVID_QUANTILES_CASE = [0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975]
#
# validate_quantile_csv_file()
#
def validate_quantile_csv_file(csv_fp):
"""
A simple wrapper of `json_io_dict_from_quantile_csv_file()` that tosses the json_io_dict and just prints validation
error_messages.
:param csv_fp: as passed to `json_io_dict_from_quantile_csv_file()`
:return: error_messages: a list of strings
"""
quantile_csv_file = Path(csv_fp)
click.echo(f"* validating quantile_csv_file '{quantile_csv_file}'...")
with open(quantile_csv_file) as cdc_csv_fp:
# toss json_io_dict:
_, error_messages = json_io_dict_from_quantile_csv_file(cdc_csv_fp, COVID_TARGETS, covid19_row_validator,
COVID_ADDL_REQ_COLS)
if error_messages:
return summarized_error_messages(error_messages) # summarizes and orders, converting 2-tuples to strings
else:
return "no errors"
#
# `json_io_dict_from_quantile_csv_file()` row validator
#
def covid19_row_validator(column_index_dict, row):
"""
Does COVID19-specific row validation. Notes:
- expects these `valid_target_names` passed to `json_io_dict_from_quantile_csv_file()`: COVID_TARGETS_NON_CASE
- expects these `addl_req_cols` passed to `json_io_dict_from_quantile_csv_file()`: COVID_ADDL_REQ_COLS
"""
from zoltpy.cdc_io import _parse_date # avoid circular imports
error_messages = [] # return value. filled next
location = row[column_index_dict['location']]
target = row[column_index_dict['target']]
is_county_location = location in FIPS_CODES_COUNTY
is_state_location = location in FIPS_CODES_STATE
is_case_target = target in COVID_TARGETS_CASE
is_non_case_target = target in COVID_TARGETS_NON_CASE
# validate location (FIPS code)
if not ((is_case_target and is_state_location) or
(is_case_target and is_county_location) or
(is_non_case_target and is_state_location)):
error_messages.append((MESSAGE_FORECAST_CHECKS, f"invalid location for target. location={location!r}, "
f"target={target!r}. row={row}"))
# validate quantiles. recall at this point all row values are strings, but COVID_QUANTILES_NON_CASE is numbers
quantile = row[column_index_dict['quantile']]
value = row[column_index_dict['value']]
try:
if float(value) < 0: # value must always be non-negative regardless of row type
error_messages.append((MESSAGE_FORECAST_CHECKS, f"entries in the `value` column must be non-negative. "
f"value='{value}'. row={row}"))
except ValueError:
pass # ignore here - it will be caught by `json_io_dict_from_quantile_csv_file()`
if row[column_index_dict['type']] == 'quantile':
try:
quantile_float = float(quantile)
is_case_quantile = quantile_float in COVID_QUANTILES_CASE
is_non_case_quantile = quantile_float in COVID_QUANTILES_CASE + COVID_QUANTILES_NON_CASE
if not ((is_case_target and is_case_quantile) or
(is_non_case_target and is_case_quantile) or
(is_non_case_target and is_non_case_quantile)):
error_messages.append((MESSAGE_FORECAST_CHECKS, f"invalid quantile for target. quantile={quantile!r}, "
f"target={target!r}. row={row}"))
except ValueError:
pass # ignore here - it will be caught by `json_io_dict_from_quantile_csv_file()`
# validate forecast_date and target_end_date date formats
forecast_date = row[column_index_dict['forecast_date']]
target_end_date = row[column_index_dict['target_end_date']]
forecast_date = _parse_date(forecast_date) # None if invalid format
target_end_date = _parse_date(target_end_date) # ""
if not forecast_date or not target_end_date:
error_messages.append((MESSAGE_FORECAST_CHECKS,
f"invalid forecast_date or target_end_date format. forecast_date={forecast_date!r}. "
f"target_end_date={target_end_date}. row={row}"))
return error_messages # terminate - remaining validation depends on valid dates
# formats are valid. next: validate "__ day ahead" or "__ week ahead" increment - must be an int
target_day_ahead_split = target.split('day ahead')
target_week_ahead_split = target.split('wk ahead')
is_day_or_week_ahead_target = (len(target_day_ahead_split) == 2) or (len(target_week_ahead_split) == 2)
try:
if is_day_or_week_ahead_target: # valid day or week ahead target
step_ahead_increment = int(target_day_ahead_split[0].strip()) if len(target_day_ahead_split) == 2 \
else int(target_week_ahead_split[0].strip())
else: # invalid target. don't add error message b/c caught by caller `_validated_rows_for_quantile_csv()`
return error_messages # terminate - remaining validation depends on valid step_ahead_increment
except ValueError:
error_messages.append((MESSAGE_FORECAST_CHECKS, f"non-integer 'ahead' number in target: {target!r}. row={row}"))
return error_messages # terminate - remaining validation depends on valid step_ahead_increment
# validate date alignment
# 1/4) for x day ahead targets the target_end_date should be forecast_date + x
if 'day ahead' in target:
if (target_end_date - forecast_date).days != step_ahead_increment:
error_messages.append((MESSAGE_FORECAST_CHECKS,
f"invalid target_end_date: was not {step_ahead_increment} day(s) after "
f"forecast_date. diff={(target_end_date - forecast_date).days}, "
f"forecast_date={forecast_date}, target_end_date={target_end_date}. row={row}"))
else: # 'wk ahead' in target
# NB: we convert `weekdays()` (Monday is 0 and Sunday is 6) to a Sunday-based numbering to get the math to work:
weekday_to_sun_based = {i: i + 2 if i != 6 else 1 for i in range(7)} # Sun=1, Mon=2, ..., Sat=7
# 2/4) for x week ahead targets, weekday(target_end_date) should be a Sat
if weekday_to_sun_based[target_end_date.weekday()] != 7: # Sat
error_messages.append((MESSAGE_DATE_ALIGNMENT, f"target_end_date was not a Saturday: {target_end_date}. "
f"row={row}"))
return error_messages # terminate - remaining validation depends on valid target_end_date
# set exp_target_end_date and then validate it
weekday_diff = datetime.timedelta(days=(abs(weekday_to_sun_based[target_end_date.weekday()] -
weekday_to_sun_based[forecast_date.weekday()])))
if weekday_to_sun_based[forecast_date.weekday()] <= 2: # Sun or Mon
# 3/4) (Sun or Mon) for x week ahead targets, ensure that the 1-week ahead forecast is for the next Sat
delta_days = weekday_diff + datetime.timedelta(days=(7 * (step_ahead_increment - 1)))
exp_target_end_date = forecast_date + delta_days
else: # Tue through Sat
# 4/4) (Tue on) for x week ahead targets, ensures that the 1-week ahead forecast is for the Sat after next
delta_days = weekday_diff + datetime.timedelta(days=(7 * step_ahead_increment))
exp_target_end_date = forecast_date + delta_days
if target_end_date != exp_target_end_date:
error_messages.append((MESSAGE_DATE_ALIGNMENT,
f"target_end_date was not the expected Saturday. forecast_date={forecast_date}, "
f"target_end_date={target_end_date}. exp_target_end_date={exp_target_end_date}, "
f"row={row}"))
# done!
return error_messages
| 48.680952 | 121 | 0.65695 |
acee51e82f41ea45eed99aa95a35da29ce140ec2 | 1,043 | py | Python | setup.py | mtingers/opacify | 4c71478b98afb4c809d9be139b59600f5084a697 | [
"BSD-2-Clause"
] | 1 | 2019-01-24T16:52:57.000Z | 2019-01-24T16:52:57.000Z | setup.py | mtingers/opacify | 4c71478b98afb4c809d9be139b59600f5084a697 | [
"BSD-2-Clause"
] | 5 | 2019-01-24T04:51:22.000Z | 2019-01-29T06:52:13.000Z | setup.py | mtingers/opacify | 4c71478b98afb4c809d9be139b59600f5084a697 | [
"BSD-2-Clause"
] | null | null | null | from distutils.core import setup
import setuptools
setup(
name='Opacify',
version='0.3.2',
author='Matth Ingersoll',
author_email='matth@mtingers.com',
packages=['opacify',],
license='BSD 2-Clause License',
long_description=open('README.md').read(),
url='https://github.com/mtingers/opacify',
install_requires=[
"requests",
"enum34",
],
#scripts=['bin/opacify',],
entry_points={
'console_scripts': ['opacify=opacify.opacify_cli:main',],
},
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: System :: Archiving :: Mirroring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 29.8 | 65 | 0.597315 |
acee52675ec6f7f13708607dd24e4a5331bcaa4c | 13,984 | py | Python | arviz/data/io_numpyro.py | alexisperakis/arviz | edfbcf9c177f6be25139e20317400133ab1180ed | [
"Apache-2.0"
] | 1 | 2021-06-13T00:29:11.000Z | 2021-06-13T00:29:11.000Z | arviz/data/io_numpyro.py | alexisperakis/arviz | edfbcf9c177f6be25139e20317400133ab1180ed | [
"Apache-2.0"
] | null | null | null | arviz/data/io_numpyro.py | alexisperakis/arviz | edfbcf9c177f6be25139e20317400133ab1180ed | [
"Apache-2.0"
] | null | null | null | """NumPyro-specific conversion code."""
import logging
from typing import Callable, Optional
import numpy as np
from .. import utils
from ..rcparams import rcParams
from .base import dict_to_dataset, requires
from .inference_data import InferenceData
_log = logging.getLogger(__name__)
class NumPyroConverter:
"""Encapsulate NumPyro specific logic."""
# pylint: disable=too-many-instance-attributes
model = None # type: Optional[Callable]
nchains = None # type: int
ndraws = None # type: int
def __init__(
self,
*,
posterior=None,
prior=None,
posterior_predictive=None,
predictions=None,
constant_data=None,
predictions_constant_data=None,
log_likelihood=None,
index_origin=None,
coords=None,
dims=None,
pred_dims=None,
num_chains=1
):
"""Convert NumPyro data into an InferenceData object.
Parameters
----------
posterior : numpyro.mcmc.MCMC
Fitted MCMC object from NumPyro
prior: dict
Prior samples from a NumPyro model
posterior_predictive : dict
Posterior predictive samples for the posterior
predictions: dict
Out of sample predictions
constant_data: dict
Dictionary containing constant data variables mapped to their values.
predictions_constant_data: dict
Constant data used for out-of-sample predictions.
index_origin : int, optional
coords : dict[str] -> list[str]
Map of dimensions to coordinates
dims : dict[str] -> list[str]
Map variable names to their coordinates
pred_dims: dict
Dims for predictions data. Map variable names to their coordinates.
num_chains: int
Number of chains used for sampling. Ignored if posterior is present.
"""
import jax
import numpyro
self.posterior = posterior
self.prior = jax.device_get(prior)
self.posterior_predictive = jax.device_get(posterior_predictive)
self.predictions = predictions
self.constant_data = constant_data
self.predictions_constant_data = predictions_constant_data
self.log_likelihood = (
rcParams["data.log_likelihood"] if log_likelihood is None else log_likelihood
)
self.index_origin = rcParams["data.index_origin"] if index_origin is None else index_origin
self.coords = coords
self.dims = dims
self.pred_dims = pred_dims
self.numpyro = numpyro
def arbitrary_element(dct):
return next(iter(dct.values()))
if posterior is not None:
samples = jax.device_get(self.posterior.get_samples(group_by_chain=True))
if not isinstance(samples, dict):
# handle the case we run MCMC with a general potential_fn
# (instead of a NumPyro model) whose args is not a dictionary
# (e.g. f(x) = x ** 2)
tree_flatten_samples = jax.tree_util.tree_flatten(samples)[0]
samples = {
"Param:{}".format(i): jax.device_get(v)
for i, v in enumerate(tree_flatten_samples)
}
self._samples = samples
self.nchains, self.ndraws = (
posterior.num_chains,
posterior.num_samples // posterior.thinning,
)
self.model = self.posterior.sampler.model
# model arguments and keyword arguments
self._args = self.posterior._args # pylint: disable=protected-access
self._kwargs = self.posterior._kwargs # pylint: disable=protected-access
else:
self.nchains = num_chains
get_from = None
if predictions is not None:
get_from = predictions
elif posterior_predictive is not None:
get_from = posterior_predictive
elif prior is not None:
get_from = prior
if get_from is None and constant_data is None and predictions_constant_data is None:
raise ValueError(
"When constructing InferenceData must have at least"
" one of posterior, prior, posterior_predictive or predictions."
)
if get_from is not None:
aelem = arbitrary_element(get_from)
self.ndraws = aelem.shape[0] // self.nchains
observations = {}
if self.model is not None:
# we need to use an init strategy to generate random samples for ImproperUniform sites
seeded_model = numpyro.handlers.substitute(
numpyro.handlers.seed(self.model, jax.random.PRNGKey(0)),
substitute_fn=numpyro.infer.init_to_sample,
)
trace = numpyro.handlers.trace(seeded_model).get_trace(*self._args, **self._kwargs)
observations = {
name: site["value"]
for name, site in trace.items()
if site["type"] == "sample" and site["is_observed"]
}
self.observations = observations if observations else None
@requires("posterior")
def posterior_to_xarray(self):
"""Convert the posterior to an xarray dataset."""
data = self._samples
return dict_to_dataset(
data,
library=self.numpyro,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
)
@requires("posterior")
def sample_stats_to_xarray(self):
"""Extract sample_stats from NumPyro posterior."""
rename_key = {
"potential_energy": "lp",
"adapt_state.step_size": "step_size",
"num_steps": "n_steps",
"accept_prob": "acceptance_rate",
}
data = {}
for stat, value in self.posterior.get_extra_fields(group_by_chain=True).items():
if isinstance(value, (dict, tuple)):
continue
name = rename_key.get(stat, stat)
value = value.copy()
data[name] = value
if stat == "num_steps":
data["tree_depth"] = np.log2(value).astype(int) + 1
return dict_to_dataset(
data,
library=self.numpyro,
dims=None,
coords=self.coords,
index_origin=self.index_origin,
)
@requires("posterior")
@requires("model")
def log_likelihood_to_xarray(self):
"""Extract log likelihood from NumPyro posterior."""
if not self.log_likelihood:
return None
data = {}
if self.observations is not None:
samples = self.posterior.get_samples(group_by_chain=False)
log_likelihood_dict = self.numpyro.infer.log_likelihood(
self.model, samples, *self._args, **self._kwargs
)
for obs_name, log_like in log_likelihood_dict.items():
shape = (self.nchains, self.ndraws) + log_like.shape[1:]
data[obs_name] = np.reshape(log_like.copy(), shape)
return dict_to_dataset(
data,
library=self.numpyro,
dims=self.dims,
coords=self.coords,
index_origin=self.index_origin,
skip_event_dims=True,
)
def translate_posterior_predictive_dict_to_xarray(self, dct, dims):
"""Convert posterior_predictive or prediction samples to xarray."""
data = {}
for k, ary in dct.items():
shape = ary.shape
if shape[0] == self.nchains and shape[1] == self.ndraws:
data[k] = ary
elif shape[0] == self.nchains * self.ndraws:
data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))
else:
data[k] = utils.expand_dims(ary)
_log.warning(
"posterior predictive shape not compatible with number of chains and draws. "
"This can mean that some draws or even whole chains are not represented."
)
return dict_to_dataset(
data,
library=self.numpyro,
coords=self.coords,
dims=dims,
index_origin=self.index_origin,
)
@requires("posterior_predictive")
def posterior_predictive_to_xarray(self):
"""Convert posterior_predictive samples to xarray."""
return self.translate_posterior_predictive_dict_to_xarray(
self.posterior_predictive, self.dims
)
@requires("predictions")
def predictions_to_xarray(self):
"""Convert predictions to xarray."""
return self.translate_posterior_predictive_dict_to_xarray(self.predictions, self.pred_dims)
def priors_to_xarray(self):
"""Convert prior samples (and if possible prior predictive too) to xarray."""
if self.prior is None:
return {"prior": None, "prior_predictive": None}
if self.posterior is not None:
prior_vars = list(self._samples.keys())
prior_predictive_vars = [key for key in self.prior.keys() if key not in prior_vars]
else:
prior_vars = self.prior.keys()
prior_predictive_vars = None
priors_dict = {}
for group, var_names in zip(
("prior", "prior_predictive"), (prior_vars, prior_predictive_vars)
):
priors_dict[group] = (
None
if var_names is None
else dict_to_dataset(
{k: utils.expand_dims(self.prior[k]) for k in var_names},
library=self.numpyro,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
)
)
return priors_dict
@requires("observations")
@requires("model")
def observed_data_to_xarray(self):
"""Convert observed data to xarray."""
return dict_to_dataset(
self.observations,
library=self.numpyro,
dims=self.dims,
coords=self.coords,
default_dims=[],
index_origin=self.index_origin,
)
@requires("constant_data")
def constant_data_to_xarray(self):
"""Convert constant_data to xarray."""
return dict_to_dataset(
self.constant_data,
library=self.numpyro,
dims=self.dims,
coords=self.coords,
default_dims=[],
index_origin=self.index_origin,
)
@requires("predictions_constant_data")
def predictions_constant_data_to_xarray(self):
"""Convert predictions_constant_data to xarray."""
return dict_to_dataset(
self.predictions_constant_data,
library=self.numpyro,
dims=self.pred_dims,
coords=self.coords,
default_dims=[],
index_origin=self.index_origin,
)
def to_inference_data(self):
"""Convert all available data to an InferenceData object.
Note that if groups can not be created (i.e., there is no `trace`, so
the `posterior` and `sample_stats` can not be extracted), then the InferenceData
will not have those groups.
"""
return InferenceData(
**{
"posterior": self.posterior_to_xarray(),
"sample_stats": self.sample_stats_to_xarray(),
"log_likelihood": self.log_likelihood_to_xarray(),
"posterior_predictive": self.posterior_predictive_to_xarray(),
"predictions": self.predictions_to_xarray(),
**self.priors_to_xarray(),
"observed_data": self.observed_data_to_xarray(),
"constant_data": self.constant_data_to_xarray(),
"predictions_constant_data": self.predictions_constant_data_to_xarray(),
}
)
def from_numpyro(
posterior=None,
*,
prior=None,
posterior_predictive=None,
predictions=None,
constant_data=None,
predictions_constant_data=None,
log_likelihood=None,
index_origin=None,
coords=None,
dims=None,
pred_dims=None,
num_chains=1
):
"""Convert NumPyro data into an InferenceData object.
For a usage example read the
:ref:`Creating InferenceData section on from_numpyro <creating_InferenceData>`
Parameters
----------
posterior : numpyro.mcmc.MCMC
Fitted MCMC object from NumPyro
prior: dict
Prior samples from a NumPyro model
posterior_predictive : dict
Posterior predictive samples for the posterior
predictions: dict
Out of sample predictions
constant_data: dict
Dictionary containing constant data variables mapped to their values.
predictions_constant_data: dict
Constant data used for out-of-sample predictions.
index_origin : int, optional
coords : dict[str] -> list[str]
Map of dimensions to coordinates
dims : dict[str] -> list[str]
Map variable names to their coordinates
pred_dims: dict
Dims for predictions data. Map variable names to their coordinates.
num_chains: int
Number of chains used for sampling. Ignored if posterior is present.
"""
return NumPyroConverter(
posterior=posterior,
prior=prior,
posterior_predictive=posterior_predictive,
predictions=predictions,
constant_data=constant_data,
predictions_constant_data=predictions_constant_data,
log_likelihood=log_likelihood,
index_origin=index_origin,
coords=coords,
dims=dims,
pred_dims=pred_dims,
num_chains=num_chains,
).to_inference_data()
| 36.703412 | 99 | 0.600329 |
acee5272e4689669aa7e0cd3f1c5cce67b7c046e | 5,017 | py | Python | sdks/python/http_client/v1/polyaxon_sdk/models/v1_pipeline.py | polyaxon/polyaxon | a835f2872a63f6cf5c27d2dd1125ad7c18eb849a | [
"Apache-2.0"
] | 3,200 | 2017-05-09T11:35:31.000Z | 2022-03-28T05:43:22.000Z | sdks/python/http_client/v1/polyaxon_sdk/models/v1_pipeline.py | polyaxon/polyaxon | a835f2872a63f6cf5c27d2dd1125ad7c18eb849a | [
"Apache-2.0"
] | 1,324 | 2017-06-29T07:21:27.000Z | 2022-03-27T12:41:10.000Z | sdks/python/http_client/v1/polyaxon_sdk/models/v1_pipeline.py | polyaxon/polyaxon | a835f2872a63f6cf5c27d2dd1125ad7c18eb849a | [
"Apache-2.0"
] | 341 | 2017-01-10T23:06:53.000Z | 2022-03-10T08:15:18.000Z | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.11.3
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Pipeline(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'uuid': 'str',
'name': 'str',
'kind': 'V1PipelineKind'
}
attribute_map = {
'uuid': 'uuid',
'name': 'name',
'kind': 'kind'
}
def __init__(self, uuid=None, name=None, kind=None, local_vars_configuration=None): # noqa: E501
"""V1Pipeline - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._uuid = None
self._name = None
self._kind = None
self.discriminator = None
if uuid is not None:
self.uuid = uuid
if name is not None:
self.name = name
if kind is not None:
self.kind = kind
@property
def uuid(self):
"""Gets the uuid of this V1Pipeline. # noqa: E501
:return: The uuid of this V1Pipeline. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this V1Pipeline.
:param uuid: The uuid of this V1Pipeline. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def name(self):
"""Gets the name of this V1Pipeline. # noqa: E501
:return: The name of this V1Pipeline. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1Pipeline.
:param name: The name of this V1Pipeline. # noqa: E501
:type: str
"""
self._name = name
@property
def kind(self):
"""Gets the kind of this V1Pipeline. # noqa: E501
:return: The kind of this V1Pipeline. # noqa: E501
:rtype: V1PipelineKind
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Pipeline.
:param kind: The kind of this V1Pipeline. # noqa: E501
:type: V1PipelineKind
"""
self._kind = kind
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Pipeline):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Pipeline):
return True
return self.to_dict() != other.to_dict()
| 26.405263 | 101 | 0.576639 |
acee52dd0566e323b48c1e25cfe5d44ad20b175b | 1,044 | py | Python | kubernetes/test/test_v1beta1_subject_access_review.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1beta1_subject_access_review.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1beta1_subject_access_review.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | 1 | 2019-01-10T11:13:52.000Z | 2019-01-10T11:13:52.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_subject_access_review import V1beta1SubjectAccessReview
class TestV1beta1SubjectAccessReview(unittest.TestCase):
""" V1beta1SubjectAccessReview unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1SubjectAccessReview(self):
"""
Test V1beta1SubjectAccessReview
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_subject_access_review.V1beta1SubjectAccessReview()
pass
if __name__ == '__main__':
unittest.main()
| 23.2 | 105 | 0.731801 |
acee534bc1f2a7feb18935e5d4caa5d3f8df88a2 | 1,059 | py | Python | dev/circuitpython/examples/motorkit_dc_test.py | scripsi/picodeebee | 0ec77e92f09fa8711705623482e57a5e0b702696 | [
"MIT"
] | 7 | 2021-03-15T10:06:20.000Z | 2022-03-23T02:53:15.000Z | Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/motorkit_dc_test.py | IanSMoyes/SpiderPi | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | [
"Apache-2.0"
] | 5 | 2021-04-27T18:21:11.000Z | 2021-05-02T14:17:14.000Z | Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/motorkit_dc_test.py | IanSMoyes/SpiderPi | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
from adafruit_motorkit import MotorKit
kit = MotorKit(i2c=board.I2C())
kit.motor1.throttle = 0
while True:
print("Forward!")
kit.motor1.throttle = 0.5
time.sleep(1)
print("Speed up...")
for i in range(0, 101):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Slow down...")
for i in range(100, -1, -1):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Backward!")
kit.motor1.throttle = -0.5
time.sleep(1)
print("Speed up...")
for i in range(0, -101, -1):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Slow down...")
for i in range(-100, 1):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Stop!")
kit.motor1.throttle = 0
time.sleep(1)
| 22.0625 | 63 | 0.553352 |
acee549faf71128439c2729851a41a83e7483764 | 4,346 | py | Python | tests/rate_limit_test.py | phrfpeixoto/python-redis-rate-limit | 3421580f59426e88009f4f9e4c8ba14f6e1a3002 | [
"MIT"
] | null | null | null | tests/rate_limit_test.py | phrfpeixoto/python-redis-rate-limit | 3421580f59426e88009f4f9e4c8ba14f6e1a3002 | [
"MIT"
] | null | null | null | tests/rate_limit_test.py | phrfpeixoto/python-redis-rate-limit | 3421580f59426e88009f4f9e4c8ba14f6e1a3002 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import time
from redis_rate_limit import RateLimit, RateLimiter, TooManyRequests
class TestRedisRateLimit(unittest.TestCase):
def setUp(self):
"""
Initialises Rate Limit class and delete all keys from Redis.
"""
self.rate_limit = RateLimit(resource='test', client='localhost',
max_requests=10)
self.rate_limit._reset()
def _make_10_requests(self):
"""
Increments usage ten times.
"""
for x in range(0, 10):
with self.rate_limit:
pass
def test_limit_10_max_request(self):
"""
Should raise TooManyRequests Exception when trying to increment for the
eleventh time.
"""
self.assertEqual(self.rate_limit.get_usage(), 0)
self.assertEqual(self.rate_limit.has_been_reached(), False)
self._make_10_requests()
self.assertEqual(self.rate_limit.get_usage(), 10)
self.assertEqual(self.rate_limit.has_been_reached(), True)
with self.assertRaises(TooManyRequests):
with self.rate_limit:
pass
self.assertEqual(self.rate_limit.get_usage(), 11)
self.assertEqual(self.rate_limit.has_been_reached(), True)
def test_expire(self):
"""
Should not raise TooManyRequests Exception when trying to increment for
the eleventh time after the expire time.
"""
self._make_10_requests()
time.sleep(1)
with self.rate_limit:
pass
def test_not_expired(self):
"""
Should raise TooManyRequests Exception when the expire time has not
been reached yet.
"""
self.rate_limit = RateLimit(resource='test', client='localhost',
max_requests=10, expire=2)
self._make_10_requests()
time.sleep(1)
with self.assertRaises(TooManyRequests):
with self.rate_limit:
pass
def test_limit_10_using_rate_limiter(self):
"""
Should raise TooManyRequests Exception when trying to increment for the
eleventh time.
"""
self.rate_limit = RateLimiter(resource='test', max_requests=10,
expire=2).limit(client='localhost')
self.assertEqual(self.rate_limit.get_usage(), 0)
self.assertEqual(self.rate_limit.has_been_reached(), False)
self._make_10_requests()
self.assertEqual(self.rate_limit.get_usage(), 10)
self.assertEqual(self.rate_limit.has_been_reached(), True)
with self.assertRaises(TooManyRequests):
with self.rate_limit:
pass
self.assertEqual(self.rate_limit.get_usage(), 11)
self.assertEqual(self.rate_limit.has_been_reached(), True)
def test_wait_time_limit_reached(self):
"""
Should report wait time approximately equal to expire after reaching
the limit without delay between requests.
"""
self.rate_limit = RateLimit(resource='test', client='localhost',
max_requests=10, expire=1)
self._make_10_requests()
with self.assertRaises(TooManyRequests):
with self.rate_limit:
pass
self.assertAlmostEqual(self.rate_limit.get_wait_time(), 1, places=2)
def test_wait_time_limit_expired(self):
"""
Should report wait time equal to expire / max_requests before any
requests were made and after the limit has expired.
"""
self.rate_limit = RateLimit(resource='test', client='localhost',
max_requests=10, expire=1)
self.assertEqual(self.rate_limit.get_wait_time(), 1./10)
self._make_10_requests()
time.sleep(1)
self.assertEqual(self.rate_limit.get_wait_time(), 1./10)
def test_context_manager_returns_usage(self):
"""
Should return the usage when used as a context manager.
"""
self.rate_limit = RateLimit(resource='test', client='localhost',
max_requests=1, expire=1)
with self.rate_limit as usage:
self.assertEqual(usage, 1)
if __name__ == '__main__':
unittest.main()
| 34.492063 | 79 | 0.615278 |
acee550ec0a0731e342b86d8b890944d2f003b25 | 1,165 | py | Python | structural/flyweight_pattern.py | kkaushik24/python-design-patterns | 5453710a95398c06b388aa64ad0a785fc2fed8bd | [
"Apache-2.0"
] | 1 | 2016-02-05T15:22:10.000Z | 2016-02-05T15:22:10.000Z | structural/flyweight_pattern.py | kkaushik24/python-design-patterns | 5453710a95398c06b388aa64ad0a785fc2fed8bd | [
"Apache-2.0"
] | null | null | null | structural/flyweight_pattern.py | kkaushik24/python-design-patterns | 5453710a95398c06b388aa64ad0a785fc2fed8bd | [
"Apache-2.0"
] | null | null | null | from abc import ABCMeta, abstractmethod
from random import randint
class Shape:
__metaclass__ = ABCMeta
@abstractmethod
def draw(self):
pass
class Circle(Shape):
def __init__(self, color):
self.color = color
def set_x(self, x):
self.x = x
def set_y(self, y):
self.y = y
def set_radius(self, radius):
self.radius = radius
def draw(self):
print "Circle: Draw color:{0} x:{1},y:{2},radius:{3}".format(self.color, self.x, self.y, self.radius)
class ShapeFactory:
circle_dict = {}
def get_circle(self, color):
circle = ShapeFactory.circle_dict.get(color)
if circle is None:
circle = Circle(color)
ShapeFactory.circle_dict.update({color: circle})
print 'creating circle of color: ' + color
return circle
# flyweight demo
if __name__ == '__main__':
colors = ['red', 'green', 'blue', 'white', 'black']
for color in colors:
circle = ShapeFactory().get_circle(color)
circle.set_x(randint(1, 10))
circle.set_y(randint(1, 10))
circle.set_radius(100)
circle.draw()
| 21.574074 | 109 | 0.601717 |
acee5572b0bc2f4a3b9668e9dbf1fc0c310fd2be | 8,961 | py | Python | nwb_conversion_tools/utils.py | sneakers-the-rat/nwb-conversion-tools | 46a242f01ba80e489a1d4e89c8612036c7f04f56 | [
"BSD-3-Clause"
] | null | null | null | nwb_conversion_tools/utils.py | sneakers-the-rat/nwb-conversion-tools | 46a242f01ba80e489a1d4e89c8612036c7f04f56 | [
"BSD-3-Clause"
] | null | null | null | nwb_conversion_tools/utils.py | sneakers-the-rat/nwb-conversion-tools | 46a242f01ba80e489a1d4e89c8612036c7f04f56 | [
"BSD-3-Clause"
] | 1 | 2021-06-28T20:38:31.000Z | 2021-06-28T20:38:31.000Z | """Authors: Cody Baker, Ben Dichter and Luiz Tauffer."""
from datetime import datetime
import pkgutil
import importlib
from pathlib import Path
import typing
import inspect
import numpy as np
import pynwb
from .json_schema_utils import get_base_schema
def get_schema_from_hdmf_class(hdmf_class):
"""Get metadata schema from hdmf class."""
schema = get_base_schema()
schema['tag'] = hdmf_class.__module__ + '.' + hdmf_class.__name__
# Detect child-like (as opposed to link) fields
pynwb_children_fields = [f['name'] for f in hdmf_class.get_fields_conf() if f.get('child', False)]
# For MultiContainerInterface
if hasattr(hdmf_class, '__clsconf__'):
pynwb_children_fields.append(hdmf_class.__clsconf__['attr'])
# Temporary solution before this is solved: https://github.com/hdmf-dev/hdmf/issues/475
if 'device' in pynwb_children_fields:
pynwb_children_fields.remove('device')
docval = hdmf_class.__init__.__docval__
for docval_arg in docval['args']:
schema_arg = {docval_arg['name']: dict(description=docval_arg['doc'])}
# type float
if docval_arg['type'] == 'float' \
or (isinstance(docval_arg['type'], tuple)
and any([it in docval_arg['type'] for it in [float, 'float']])):
schema_arg[docval_arg['name']].update(type='number')
# type string
elif docval_arg['type'] is str \
or (isinstance(docval_arg['type'], tuple)
and str in docval_arg['type']):
schema_arg[docval_arg['name']].update(type='string')
# type datetime
elif docval_arg['type'] is datetime \
or (isinstance(docval_arg['type'], tuple)
and datetime in docval_arg['type']):
schema_arg[docval_arg['name']].update(type='string', format='date-time')
# if TimeSeries, skip it
elif docval_arg['type'] is pynwb.base.TimeSeries \
or (isinstance(docval_arg['type'], tuple)
and pynwb.base.TimeSeries in docval_arg['type']):
continue
# if PlaneSegmentation, skip it
elif docval_arg['type'] is pynwb.ophys.PlaneSegmentation or \
(isinstance(docval_arg['type'], tuple) and
pynwb.ophys.PlaneSegmentation in docval_arg['type']):
continue
else:
if not isinstance(docval_arg['type'], tuple):
docval_arg_type = [docval_arg['type']]
else:
docval_arg_type = docval_arg['type']
# if another nwb object (or list of nwb objects)
if any([hasattr(t, '__nwbfields__') for t in docval_arg_type]):
is_nwb = [hasattr(t, '__nwbfields__') for t in docval_arg_type]
item = docval_arg_type[np.where(is_nwb)[0][0]]
# if it is child
if docval_arg['name'] in pynwb_children_fields:
items = [get_schema_from_hdmf_class(item)]
schema_arg[docval_arg['name']].update(
type='array', items=items, minItems=1, maxItems=1
)
# if it is link
else:
target = item.__module__ + '.' + item.__name__
schema_arg[docval_arg['name']].update(
type='string',
target=target
)
else:
continue
# Check for default arguments
if 'default' in docval_arg:
if docval_arg['default'] is not None:
schema_arg[docval_arg['name']].update(default=docval_arg['default'])
else:
schema['required'].append(docval_arg['name'])
schema['properties'].update(schema_arg)
if 'allow_extra' in docval:
schema['additionalProperties'] = docval['allow_extra']
return schema
def get_schema_for_NWBFile():
schema = get_base_schema()
schema['tag'] = 'pynwb.file.NWBFile'
schema['required'] = ["session_description", "identifier", "session_start_time"]
schema['properties'] = {
"session_description": {
"type": "string",
"format": "long",
"description": "a description of the session where this data was generated"
},
"identifier": {
"type": "string",
"description": "a unique text identifier for the file"
},
"session_start_time": {
"type": "string",
"description": "the start date and time of the recording session",
"format": "date-time"
},
"experimenter": {
"type": "array",
"items": {"type": "string", "title": "experimenter"},
"description": "name of person who performed experiment"
},
"experiment_description": {
"type": "string",
"description": "general description of the experiment"
},
"session_id": {
"type": "string",
"description": "lab-specific ID for the session"
},
"institution": {
"type": "string",
"description": "institution(s) where experiment is performed"
},
"notes": {
"type": "string",
"description": "Notes about the experiment."
},
"pharmacology": {
"type": "string",
"description": "Description of drugs used, including how and when they were administered. Anesthesia(s), "
"painkiller(s), etc., plus dosage, concentration, etc."
},
"protocol": {
"type": "string",
"description": "Experimental protocol, if applicable. E.g., include IACUC protocol"
},
"related_publications": {
"type": "string",
"description": "Publication information.PMID, DOI, URL, etc. If multiple, concatenate together and describe"
" which is which. such as PMID, DOI, URL, etc"
},
"slices": {
"type": "string",
"description": "Description of slices, including information about preparation thickness, orientation, "
"temperature and bath solution"
},
"source_script": {
"type": "string",
"description": "Script file used to create this NWB file."
},
"source_script_file_name": {
"type": "string",
"description": "Name of the source_script file"
},
"data_collection": {
"type": "string",
"description": "Notes about data collection and analysis."
},
"surgery": {
"type": "string",
"description": "Narrative description about surgery/surgeries, including date(s) and who performed surgery."
},
"virus": {
"type": "string",
"description": "Information about virus(es) used in experiments, including virus ID, source, date made, "
"injection location, volume, etc."
},
"stimulus_notes": {
"type": "string",
"description": "Notes about stimuli, such as how and where presented."
},
"lab": {
"type": "string",
"description": "lab where experiment was performed"
}
}
return schema
def _recurse_subclasses(cls, leaves_only=True) -> list:
"""
Given some class, find its subclasses recursively
See: https://stackoverflow.com/a/17246726/13113166
Args:
leave_only (bool): If True, only include classes that have no further subclasses,
if False, return all subclasses.
Returns:
list of subclasses
"""
all_subclasses = []
for subclass in cls.__subclasses__():
if leaves_only:
if len(subclass.__subclasses__()) == 0:
all_subclasses.append(subclass)
else:
all_subclasses.append(subclass)
all_subclasses.extend(_recurse_subclasses(subclass))
return all_subclasses
def _recursive_import(module_name:str) -> typing.List[str]:
"""
Given some path in a python package, import all modules beneath it
Args:
module_name (str): name of module to recursively import
Returns:
list of all modules that were imported
"""
# iterate through modules, importing
# see https://codereview.stackexchange.com/a/70282
# import module (shouldnt hurt if it has already)
base_mod = importlib.import_module(module_name)
pkg_dir = Path(inspect.getsourcefile(base_mod)).resolve().parent
loaded_modules = []
for (module_loader, name, ispkg) in pkgutil.walk_packages([pkg_dir], base_mod.__package__+'.'):
if not ispkg:
importlib.import_module(name)
loaded_modules.append(name)
return loaded_modules | 35.844 | 120 | 0.576387 |
acee5577c38dce689f14ecba2f89067274fac9cd | 894 | py | Python | tests/stdlib/test_socket_ssl.py | li-caspar/eventlet_0.30.2 | a431842e29c26e46cfcfff60c93ca92e07663044 | [
"MIT"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/eventlet-0.24.1/tests/stdlib/test_socket_ssl.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/eventlet-0.24.1/tests/stdlib/test_socket_ssl.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | #!/usr/bin/env python
from eventlet import patcher
from eventlet.green import socket
# enable network resource
import test.test_support
i_r_e = test.test_support.is_resource_enabled
def is_resource_enabled(resource):
if resource == 'network':
return True
else:
return i_r_e(resource)
test.test_support.is_resource_enabled = is_resource_enabled
try:
socket.ssl
socket.sslerror
except AttributeError:
raise ImportError("Socket module doesn't support ssl")
patcher.inject('test.test_socket_ssl', globals())
test_basic = patcher.patch_function(test_basic)
test_rude_shutdown = patcher.patch_function(test_rude_shutdown)
def test_main():
if not hasattr(socket, "ssl"):
raise test_support.TestSkipped("socket module has no ssl support")
test_rude_shutdown()
test_basic()
test_timeout()
if __name__ == "__main__":
test_main()
| 22.35 | 74 | 0.747204 |
acee565f1301161cc81c304abca90fe76eb3088c | 1,150 | py | Python | intensio/core/utils/intensio_error.py | CrackerCat/Intensio-Obfuscator | 8f2e8a072c1c7220c9fb4c5184586bcc5f3c91bc | [
"MIT"
] | 1 | 2020-06-28T20:31:57.000Z | 2020-06-28T20:31:57.000Z | intensio/core/utils/intensio_error.py | CrackerCat/Intensio-Obfuscator | 8f2e8a072c1c7220c9fb4c5184586bcc5f3c91bc | [
"MIT"
] | null | null | null | intensio/core/utils/intensio_error.py | CrackerCat/Intensio-Obfuscator | 8f2e8a072c1c7220c9fb4c5184586bcc5f3c91bc | [
"MIT"
] | 1 | 2019-10-07T19:00:10.000Z | 2019-10-07T19:00:10.000Z | # -*- coding: utf-8 -*-
#---------------------------------------------------------- [Lib] -----------------------------------------------------------#
import sys
#--------------------------------------------------------- [Global] ---------------------------------------------------------#
# -- !! Only for verbosity of code !! -- #
# -- Name generated from winerror.h -- #
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
ERROR_FILE_NOT_FOUND = 1
ERROR_PATH_NOT_FOUND = 1
ERROR_BAD_ENVIRONMENT = 1
ERROR_INVALID_DATA = 1
ERROR_BAD_COMMAND = 1
ERROR_BAD_LENGTH = 1
ERROR_INVALID_PARAMETER = 1
ERROR_BAD_ARGUMENTS = 1
ERROR_BAD_FILE_TYPE = 1
ERROR_CANNOT_COPY = 1
ERROR_NOT_EMPTY = 1
ERROR_INVALID_FUNCTION = 1
# -- Name generated manually -- #
ERROR_CANNOT_REMOVE = 1
ERROR_FILE_EMPTY = 1
ERROR_DIR_EMPTY = 1
ERROR_NOT_DIR = 1
ERROR_NOT_FILE = 1
#--------------------------------------------------- [Function(s)/Class] ----------------------------------------------------#
# -- Not raised an error, only for breaking a loop -- #
class BreakLoop (Exception):
pass
| 28.75 | 126 | 0.453043 |
acee57b4c75d6a306b5181b219ebd68f581517cb | 9,343 | py | Python | zscaler_admin_app/utils.py | manicolas2/zscaler-admin-app | 0ceb1e9236c2f87282e828a598586a356601b622 | [
"MIT"
] | null | null | null | zscaler_admin_app/utils.py | manicolas2/zscaler-admin-app | 0ceb1e9236c2f87282e828a598586a356601b622 | [
"MIT"
] | 1 | 2021-11-10T15:00:48.000Z | 2021-11-12T13:04:50.000Z | zscaler_admin_app/utils.py | manicolas2/zscaler-admin-app | 0ceb1e9236c2f87282e828a598586a356601b622 | [
"MIT"
] | 1 | 2021-12-19T01:05:05.000Z | 2021-12-19T01:05:05.000Z | import json
from typing import Any, Optional
from typing import Dict
from typing import List
from zscaler_python_sdk import admin
from zscaler_python_sdk import url_categories
from zscaler_python_sdk import url_filtering_rules
from zscaler_python_sdk import users
from zscaler_python_sdk import utils
def fetch_adminroles(tenant: str = None) -> List[Dict[Any, Any]]:
adminroles = admin.fetch_adminroles(tenant=tenant)
return adminroles
def fetch_adminrole_names(tenant: Optional[str] = None) -> List[str]:
adminroles = admin.fetch_adminroles(tenant=tenant)
if tenant is not None:
role_names = [role["name"] for role in adminroles[tenant]]
return {tenant: role_names}
else:
all_tenant_results: Dict[str, List[str]] = {}
for tenant_name in adminroles.keys():
all_tenant_results[tenant_name] = [
role["name"] for role in adminroles[tenant_name]
]
return all_tenant_results
def fetch_adminusers(tenant: Optional[str] = None) -> Dict[str, Dict[str, Any]]:
adminusers = admin.fetch_adminusers(tenant=tenant)
return adminusers
def fetch_adminuser_names(tenant: Optional[str] = None) -> Dict[str, Dict[str, Any]]:
adminusers = admin.fetch_adminusers(tenant=tenant)
for tenant_name in adminusers.keys():
adminusers[tenant_name] = [
f"{user['userName']} ({user['loginName']})"
for user in adminusers[tenant_name]
]
return adminusers
def create_new_adminuser(
tenant: str,
source_file_path: Optional[str] = None,
login_name: Optional[str] = None,
user_name: Optional[List[str]] = None,
email: Optional[List[str]] = None,
password: Optional[str] = None,
role_name: Optional[str] = None,
) -> Dict[str, Any]:
if source_file_path is not None:
with open(source_file_path) as file:
parameters = json.load(file)
login_name = parameters["loginName"]
user_name = parameters["userName"]
email = parameters["email"]
password = parameters["password"]
role_name = parameters["role"]
message: str = admin.create_adminuser(
loginName=login_name,
userName=user_name,
email=email,
password=password,
rolename=role_name,
tenant=tenant,
)
return message
def fetch_users(tenant: Optional[str]):
user_list: Dict[str, Any] = users.fetch_users(tenant=tenant)
return user_list
def fetch_user_summary(tenant: Optional[str]):
users_summary: Dict[str, Any] = users.fetch_users(tenant=tenant)
user_summary: List[Dict[str, str]] = []
for tenant_name in users_summary.keys():
for user in users_summary[tenant_name]:
username: str = user["name"]
email: str = user["email"]
groups: List[str] = [group["name"] for group in user["groups"]]
department: str = (
user["department"]["name"] if "department" in user.keys() else None
)
user_summary.append(
f"{username} (Mail: {email}, Group: {groups}, Department: {department})"
)
users_summary[tenant_name] = user_summary
return users_summary
def fetch_departments(tenant: Optional[str]):
departments: Dict[str, Any] = users.fetch_departments(tenant=tenant)
return departments
def fetch_department_summary(tenant: Optional[str]):
departments: Dict[str, Any] = users.fetch_departments(tenant=tenant)
for tenant_name in departments.keys():
dpt_name: List[str] = [dpt["name"] for dpt in departments[tenant_name]]
departments[tenant_name] = dpt_name
return departments
def fetch_groups(tenant: Optional[str]):
groups: Dict[str, Any] = users.fetch_groups(tenant=tenant)
return groups
def fetch_group_summary(tenant: Optional[str]):
groups: Dict[str, Any] = users.fetch_groups(tenant=tenant)
for tenant_name in groups.keys():
group: List[str] = [group["name"] for group in groups[tenant_name]]
groups[tenant_name] = group
return groups
def fetch_url_categories(tenant: Optional[str] = None) -> Dict[Any, Any]:
response: List[Dict[Any, Any]] = url_categories.fetch_url_categories(tenant=tenant)
return response
def fetch_url_category_names(tenant: Optional[str] = None) -> Dict[str, List[str]]:
response: Dict[str, Any] = url_categories.fetch_url_categories(tenant=tenant)
for tenant_name in response.keys():
category_list: List[str] = []
for category in response[tenant_name]:
if "configuredName" not in category.keys():
category_list.append(category["id"])
else:
category_list.append(f"{category['id']} ({category['configuredName']})")
response[tenant_name] = category_list
return response
def create_custom_url_category(
tenant: str,
source_file_path: Optional[str] = None,
configured_name: Optional[str] = None,
urls: Optional[List[str]] = None,
db_categorized_urls: Optional[List[str]] = None,
description: Optional[str] = None,
) -> str:
if source_file_path:
with open(source_file_path) as file:
parameters = json.load(file)
configured_name = parameters["configuredName"]
urls = parameters["urls"]
db_categorized_urls = parameters["dbCategorizedUrls"]
description = parameters["description"]
message = url_categories.create_custom_url_category(
configured_name,
urls,
db_categorized_urls,
description,
tenant=tenant,
)
return message
def fetch_urlfiltering_rule_names(tenant: str) -> Dict[str, List[str]]:
url_filter_rules = url_filtering_rules.fetch_all_url_filering_rules(tenant=tenant)
for tenant_name in url_filter_rules.keys():
names_of_urlfilter_rules = [
(
f"{rule['name']} (Order: {rule['order']}, Action: {rule['action']}, "
f"Status: {rule['state']})"
)
for rule in url_filter_rules[tenant_name]
]
url_filter_rules[tenant_name] = names_of_urlfilter_rules
return url_filter_rules
def fetch_urlfiltering_rule_details(tenant: str) -> List[str]:
url_filter_rules = url_filtering_rules.fetch_all_url_filering_rules(tenant=tenant)
return url_filter_rules
def create_urlfiltering_rule(
source_file_path: str,
tenant: str,
) -> Dict[str, Any]:
with open(source_file_path) as file:
parameters = json.load(file)
rule_name = parameters["name"]
order = parameters["order"]
protocols = parameters["protocols"]
locations = parameters["locations"]
groups = parameters["groups"]
departments = parameters["departments"]
users = parameters["users"]
url_categories = utils.translate_category_from_name_to_id(
parameters["urlCategories"], tenant
)
state = parameters["state"]
rank = parameters["rank"]
action = parameters["action"]
message: Dict[str, Any] = url_filtering_rules.create_url_filering_rules(
name=rule_name,
order=order,
protocols=protocols,
locations=locations,
groups=groups,
departments=departments,
users=users,
url_categories=url_categories,
state=state,
rank=rank,
action=action,
tenant=tenant,
)
return message
def update_urlfiltering_rule(
tenant: str,
source_file_path: Optional[str] = None,
) -> Any:
if source_file_path is not None:
with open(source_file_path) as file:
parameters = json.load(file)
name = parameters["name"] if "name" in parameters.keys() else None
order = parameters["order"] if "order" in parameters.keys() else None
protocols = (
parameters["protocols"] if "protocols" in parameters.keys() else None
)
# TODO: will support, at first need to modify sdk
# locations = (
# parameters["locations"] if "locations" in parameters.keys() else None
# )
# groups = parameters["groups"] if "groups" in parameters.keys() else None
# departments = (
# parameters["departments"]
# if "departments" in parameters.keys()
# else None
# )
# users = parameters["users"] if "users" in parameters.keys() else None
# url_categories = utils.translate_category_from_name_to_id(
# parameters["urlCategories"]
# if "urlCategories" in parameters.keys()
# else None,
# tenant,
# )
state = parameters["state"] if "state" in parameters.keys() else None
rank = parameters["rank"] if "rank" in parameters.keys() else None
action = parameters["action"] if "action" in parameters.keys() else None
message = url_filtering_rules.update_url_filtering_rule(
rule_name=name,
tenant=tenant,
order=order,
rank=rank,
state=state,
protocols=protocols,
action=action,
)
return f"[{message.status_code}]{message.text}"
| 34.732342 | 88 | 0.638767 |
acee57cb654bd15daeb52b50c1f9b27852d11f46 | 2,589 | py | Python | fcn_models/pre/tf_loadable_chain.py | pfnet-research/nips17-adversarial-attack | b10f37d9d315c3c5367a575765d56ea6505f85f1 | [
"MIT"
] | 153 | 2017-11-02T02:34:11.000Z | 2021-12-22T00:53:11.000Z | fcn_models/pre/tf_loadable_chain.py | jacksonjack/nips17-adversarial-attack | b10f37d9d315c3c5367a575765d56ea6505f85f1 | [
"MIT"
] | 1 | 2019-03-24T12:22:32.000Z | 2019-03-24T12:22:32.000Z | fcn_models/pre/tf_loadable_chain.py | jacksonjack/nips17-adversarial-attack | b10f37d9d315c3c5367a575765d56ea6505f85f1 | [
"MIT"
] | 30 | 2017-11-02T03:10:29.000Z | 2021-12-15T11:33:13.000Z | import chainer
import chainer.links as L
class TFLoadableChain(chainer.Chain):
"""Chain class that support load_tf_checkpoint() method."""
def __init__(self, final_layer_name=''):
super(TFLoadableChain, self).__init__()
self.final_layer_name = final_layer_name
def load_tf_checkpoint(self, reader, path):
"""Load TF checkpoint.
It calls ``load_tf_checkpoint()`` of the direct children, or extract the weights and biases for ``Linear`` and
``Convolution2D`` chains. The final layer of the network is specially treated; the original models output 1,001
way label scores including "background" label, while our models writtein in Chainer output 1,000 way label
scores. The name of the final layer is specified by the ``final_layer_name`` attribute.
Args:
reader (CheckpointReader): Checkpoint reader. It can be created by
``tensorflow.python.pywrap_tensorflow.NewCheckpointReader(path)``.
path (str): Root object path.
"""
for child in self.children():
full_name = '{}/{}'.format(path, child.name)
if isinstance(child, (L.Convolution2D, L.Linear)):
try:
start_index = int(child.name == self.final_layer_name)
W = reader.get_tensor(full_name + '/weights')
if W.ndim == 4: # conv2d
W = W.transpose(3, 2, 0, 1)
else: # linear
W = W.T
child.W.data[...] = W[start_index:]
if hasattr(child, 'b'):
b = reader.get_tensor(full_name + '/biases')
child.b.data[...] = b[start_index:]
except Exception:
print('failed at', full_name)
raise
else:
child.load_tf_checkpoint(reader, full_name)
class TFLoadableRepeat(chainer.ChainList):
"""ChainList that support load_tf_checkpoint() method.
It corresponds to ``slim.repeat()``.
"""
def __init__(self, generator, count, genname):
super(TFLoadableRepeat, self).__init__()
for i in range(count):
self.add_link(generator())
self.genname = genname
def __call__(self, x):
for layer in self:
x = layer(x)
return x
def load_tf_checkpoint(self, reader, path):
for i, child in enumerate(self, 1):
child.load_tf_checkpoint(reader, '{}/{}_{}'.format(path, self.genname, i))
| 37.521739 | 119 | 0.577443 |
acee594cd6126586d06c53429f2ec1b22cb312af | 2,625 | py | Python | fabio/test/test_nexus.py | picca/fabio | bc3aae330bef6e1c983007562157edfe6d7daf91 | [
"Apache-2.0"
] | null | null | null | fabio/test/test_nexus.py | picca/fabio | bc3aae330bef6e1c983007562157edfe6d7daf91 | [
"Apache-2.0"
] | 2 | 2019-04-24T13:43:41.000Z | 2019-06-13T08:54:02.000Z | fabio/test/test_nexus.py | boesecke/fabio | 11350e445a6def4d02c6860aea3ae7f36652af6a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: X-ray image reader
# https://github.com/silx-kit/fabio
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Unit tests for nexus file reader
"""
from __future__ import print_function, with_statement, division, absolute_import
import unittest
import os
import logging
logger = logging.getLogger(__name__)
from .utilstest import UtilsTest
from .. import nexus
class TestNexus(unittest.TestCase):
def setUp(self):
if nexus.h5py is None:
self.skipTest("h5py library is not available. Skipping Nexus test")
def test_nexus(self):
"Test creation of Nexus files"
fname = os.path.join(UtilsTest.tempdir, "nexus.h5")
nex = nexus.Nexus(fname)
entry = nex.new_entry("entry")
nex.new_instrument(entry, "ID00")
nex.new_detector("camera")
self.assertEqual(len(nex.get_entries()), 2, "nexus file has 2 entries")
nex.close()
self.assertTrue(os.path.exists(fname))
os.unlink(fname)
def test_from_time(self):
fname = os.path.join(UtilsTest.tempdir, "nexus.h5")
nex = nexus.Nexus(fname)
entry = nex.new_entry("entry")
time1 = nexus.from_isotime(entry["start_time"].value)
entry["bad_time"] = [entry["start_time"].value] # this is a list
time2 = nexus.from_isotime(entry["bad_time"].value)
self.assertEqual(time1, time2, "start_time in list does not works !")
nex.close()
self.assertTrue(os.path.exists(fname))
os.unlink(fname)
def suite():
loadTests = unittest.defaultTestLoader.loadTestsFromTestCase
testsuite = unittest.TestSuite()
testsuite.addTest(loadTests(TestNexus))
return testsuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 32.8125 | 80 | 0.679619 |
acee59f6516b33efbcafa3e0f4dc9b2bcb4d676c | 2,507 | py | Python | datasets/cbm/__init__.py | kokikwbt/predictive-maintenance | 38b3abadca54a973e777a0b6fea1fe51fa1c7370 | [
"MIT"
] | 4 | 2021-11-09T05:56:24.000Z | 2022-03-17T11:33:27.000Z | datasets/cbm/__init__.py | kokikwbt/predictive-maintenance | 38b3abadca54a973e777a0b6fea1fe51fa1c7370 | [
"MIT"
] | null | null | null | datasets/cbm/__init__.py | kokikwbt/predictive-maintenance | 38b3abadca54a973e777a0b6fea1fe51fa1c7370 | [
"MIT"
] | 2 | 2022-02-28T03:41:05.000Z | 2022-03-06T22:27:05.000Z | import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import tqdm
import pandas as pd
def parse_feature_names(fn):
with open(fn) as f:
names, lines = [], f.readlines()
for line in lines:
names.append(line.split('-')[-1].lstrip().rstrip())
return names
def load_data(shorten_feature_names=True):
fp = os.path.dirname(__file__)
raw_data = np.loadtxt(fp + '/data.txt.gz')
features = parse_feature_names(fp + '/Features.txt')
if shorten_feature_names == True:
for i in range(len(features) - 2):
features[i] = features[i].split('(')[-1].split(')')[0].upper()
features[-2] = 'comp_decay_state'
features[-1] = 'turb_decay_state'
return pd.DataFrame(raw_data, columns=features)
def normalize(df):
df_norm = df.copy()
for i in range(df_norm.shape[1]):
if df_norm.iloc[:, i].max() - df_norm.iloc[:, i].min() > 0:
maxv = df_norm.iloc[:, i].max()
minv = df_norm.iloc[:, i].min()
df_norm.iloc[:, i] = (df_norm.iloc[:, i] - minv) / (maxv - minv)
else:
df_norm.iloc[:, i] = 0.
return df_norm
def load_clean_data():
return normalize(load_data())
def gen_summary(wd=400, outdir=None):
if outdir is None:
outdir = os.path.dirname(__file__)
os.makedirs(outdir, exist_ok=True)
data = normalize(load_data(shorten_feature_names=False))
with PdfPages(outdir + '/cbm_summary.pdf') as pp:
for st in tqdm.trange(0, data.shape[0], wd):
ed = st + wd
fig, ax = plt.subplots(9, figsize=(24, 15))
data.iloc[st:ed, 1].plot(legend=True, ax=ax[0])
data.iloc[st:ed, 2].plot(legend=True, ax=ax[1])
data.iloc[st:ed, 3:5].plot(legend=True, ax=ax[2])
data.iloc[st:ed, 5:7].plot(legend=True, ax=ax[3])
data.iloc[st:ed, 7:10].plot(legend=True, ax=ax[4])
data.iloc[st:ed, 10:14].plot(legend=True, ax=ax[5])
data.iloc[st:ed, 14].plot(legend=True, ax=ax[6])
data.iloc[st:ed, 15].plot(legend=True, ax=ax[7])
data.iloc[st:ed, [0, -2, -1]].plot(legend=True, ax=ax[8])
ax[0].set_title('Normalized Sensor/Label data')
ax[-1].set_xlabel('Time')
for axi in ax: axi.set_ylabel('Value')
fig.savefig(pp, bbox_inches='tight', format='pdf')
plt.clf()
plt.close()
| 30.204819 | 76 | 0.577982 |
acee5a587d6a5782a6d011d3ffb3cb00e7409683 | 2,598 | py | Python | qcodes/instrument_drivers/Keysight/keysightb1500/KeysightB1520A.py | ThorvaldLarsen/Qcodes | 230881f7a4b5378c504c6e52c964a211191bde2e | [
"MIT"
] | null | null | null | qcodes/instrument_drivers/Keysight/keysightb1500/KeysightB1520A.py | ThorvaldLarsen/Qcodes | 230881f7a4b5378c504c6e52c964a211191bde2e | [
"MIT"
] | null | null | null | qcodes/instrument_drivers/Keysight/keysightb1500/KeysightB1520A.py | ThorvaldLarsen/Qcodes | 230881f7a4b5378c504c6e52c964a211191bde2e | [
"MIT"
] | 1 | 2020-04-24T01:15:44.000Z | 2020-04-24T01:15:44.000Z | import re
from typing import Optional, TYPE_CHECKING, Tuple
from .KeysightB1500_module import B1500Module
from .message_builder import MessageBuilder
from . import constants
from .constants import ModuleKind, ChNr
if TYPE_CHECKING:
from .KeysightB1500 import KeysightB1500
_pattern = re.compile(
r"((?P<status>\w)(?P<chnr>\w)(?P<dtype>\w))?"
r"(?P<value>[+-]\d{1,3}\.\d{3,6}E[+-]\d{2})"
)
class B1520A(B1500Module):
"""
Driver for Keysight B1520A Capacitance Measurement Unit module for B1500
Semiconductor Parameter Analyzer.
Args:
parent: mainframe B1500 instance that this module belongs to
name: Name of the instrument instance to create. If `None`
(Default), then the name is autogenerated from the instrument
class.
slot_nr: Slot number of this module (not channel number)
"""
MODULE_KIND = ModuleKind.CMU
def __init__(self, parent: 'KeysightB1500', name: Optional[str], slot_nr,
**kwargs):
super().__init__(parent, name, slot_nr, **kwargs)
self.channels = (ChNr(slot_nr),)
self.add_parameter(
name="voltage_dc", set_cmd=self._set_voltage_dc, get_cmd=None
)
self.add_parameter(
name="voltage_ac", set_cmd=self._set_voltage_ac, get_cmd=None
)
self.add_parameter(
name="frequency", set_cmd=self._set_frequency, get_cmd=None
)
self.add_parameter(name="capacitance",
get_cmd=self._get_capacitance,
snapshot_value=False)
def _set_voltage_dc(self, value: float) -> None:
msg = MessageBuilder().dcv(self.channels[0], value)
self.write(msg.message)
def _set_voltage_ac(self, value: float) -> None:
msg = MessageBuilder().acv(self.channels[0], value)
self.write(msg.message)
def _set_frequency(self, value: float) -> None:
msg = MessageBuilder().fc(self.channels[0], value)
self.write(msg.message)
def _get_capacitance(self) -> Tuple[float, float]:
msg = MessageBuilder().tc(
chnum=self.channels[0], mode=constants.RangingMode.AUTO
)
response = self.ask(msg.message)
parsed = [item for item in re.finditer(_pattern, response)]
if (
len(parsed) != 2
or parsed[0]["dtype"] != "C"
or parsed[1]["dtype"] != "Y"
):
raise ValueError("Result format not supported.")
return float(parsed[0]["value"]), float(parsed[1]["value"])
| 30.209302 | 77 | 0.615473 |
acee5aaebe3fdd8437e967d981ebd81c90df1320 | 285 | py | Python | mSite/models.py | ejigma/countdownTimer-ejigma-website | 26dbc865b1627940268ccb5160cae45bc7a24e27 | [
"MIT"
] | 3 | 2019-07-06T07:24:35.000Z | 2020-01-15T19:15:27.000Z | mSite/models.py | ejigma/countdownTimer-ejigma-website | 26dbc865b1627940268ccb5160cae45bc7a24e27 | [
"MIT"
] | null | null | null | mSite/models.py | ejigma/countdownTimer-ejigma-website | 26dbc865b1627940268ccb5160cae45bc7a24e27 | [
"MIT"
] | null | null | null | from django.db import models
class FirstPage(models.Model):
title = models.CharField(max_length=255, null=True)
background_color = models.CharField(max_length=20, null=True)
opening_date = models.DateTimeField(null=True)
def __str__(self):
return self.title
| 25.909091 | 65 | 0.733333 |
acee5c03637fccbb40927178c5e904c5d54681bd | 7,418 | py | Python | fastapi_mqtt/fastmqtt.py | Turall/fastapi-mqtt | dcd8fe510f6344e8b6b4aa28ed4dabc7fa62a439 | [
"MIT"
] | null | null | null | fastapi_mqtt/fastmqtt.py | Turall/fastapi-mqtt | dcd8fe510f6344e8b6b4aa28ed4dabc7fa62a439 | [
"MIT"
] | null | null | null | fastapi_mqtt/fastmqtt.py | Turall/fastapi-mqtt | dcd8fe510f6344e8b6b4aa28ed4dabc7fa62a439 | [
"MIT"
] | null | null | null | import os
import ssl
import uuid
import asyncio
from ssl import SSLContext
from functools import partial
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Callable, Dict, Optional, Type, Union
from gmqtt import Message
from gmqtt import Client as MQTTClient
from gmqtt.mqtt.constants import MQTTv311,MQTTv50
from .config import MQQTConfig
try:
from uvicorn.config import logger
log_info = logger
except:
import logging
log_info = logging.getLogger()
class FastMQTT:
'''
FastMQTT client object to establish connection parametrs beforeconnect and manipulate MQTT service.
The class object holds session information necesseary to connect MQTT broker.
```
param :: config : "Config parameters for gmqtt.Client"
type :: config: MQQTConfig
```
```
param :: client_id : client_id "should be unique identfiyer for connection to MQQT broker"
type :: client_id: Any
```
```
param :: clean_session : "The clean session flag tells the broker whether the client wants to establish \
a persistent session or not. In a persistent session clean_session = False, the broker stores all subscriptions for the client and \
all missed messages for the client that subscribed with a Quality of Service (QoS) level 1 or 2. \
If the session is not persistent (clean_session = True),the broker does not store anything for the client and \
purges all information from any previous persistent session.The client_id that the client provides when it establishes connection to the broker identifies the session."
type :: clean_session: bool
```
```
param :: optimistic_acknowledgement : #TODO more info needed
type :: optimistic_acknowledgement: bool
```
'''
def __init__(
self,
config: MQQTConfig,
*,
client_id: Optional[Type[str]] = None,
clean_session: bool = True,
optimistic_acknowledgement: bool = True,
**kwargs: Any
) -> None:
if not client_id: client_id = uuid.uuid4().hex
self.client: MQTTClient = MQTTClient(client_id)
self.config: Dict[Any,Any] = config
self.client._clean_session = clean_session
self.client._username: Optional[str] = config.username or None
self.client._password: Optional[str] = config.password or None
self.client._host: str = config.host
self.client._port: int = config.port
self.client._keepalive: int = config.keepalive
self.client._ssl: Union[bool,SSLContext] = config.ssl
self.client.optimistic_acknowledgement: bool = optimistic_acknowledgement
self.client._connect_properties: Any = kwargs
self.executor = ThreadPoolExecutor()
self.loop = asyncio.get_event_loop()
log_info = logger
if self.config.will_message_topic and self.config.will_message_payload and self.config.will_delay_interval:
self.client._will_message = Message(
self.config.will_message_topic,
self.config.will_message_payload,
self.config.will_delay_interval
)
log_info.debug(f"topic -> {self.config.will_message_topic} \n payload -> {self.config.will_message_payload} \n will_delay_interval -> {self.config.will_delay_interval}")
log_info.info("WILL MESSAGE INITIALIZED")
async def connection(self) -> None:
if self.client._username:
self.client.set_auth_credentials(self.client._username, self.client._password)
log_info.info("user is authenticated")
await self.__set_connetion_config()
version = self.config.version or MQTTv50
log_info.warning(f"Used broker version is {version}")
await self.client.connect(self.client._host,self.client._port,self.client._ssl,self.client._keepalive,version)
log_info.info("connected to broker..")
async def __set_connetion_config(self) -> None:
'''
By default, connected MQTT client will always try to reconnect in case of lost connections.
Number of reconnect attempts is unlimited. If you want to change this behaviour ass reconnect_retries and reconnect_delay with its values.
For more info: # https://github.com/wialon/gmqtt#reconnects
'''
if self.config.reconnect_retries:
self.client.set_config(reconnect_retries=self.config.reconnect_retries)
if self.config.reconnect_delay:
self.client.set_config(reconnect_delay=self.config.reconnect_delay)
def on_message(self):
"""
Decarator method used to subscirbe messages from all topics.
"""
def message_handler(handler: Callable) -> Callable:
log_info.info("on_message handler accepted")
self.client.on_message = handler
return handler
return message_handler
async def publish(self, message_or_topic, payload=None, qos=0, retain=False, **kwargs):
'''
publish method
param :: message_or_topic :
type :: message_or_topic:
param :: payload :
type :: payload:
param :: qos :
type :: qos:
param :: retain :
type :: retain:
'''
loop = asyncio.get_event_loop()
func = partial(self.client.publish, message_or_topic, payload=payload, qos=qos, retain=retain, **kwargs)
log_info.info("publish")
return await loop.run_in_executor(self.executor, func)
async def unsubscribe(self, topic: str, **kwargs):
'''
unsubscribe method
def unsubscribe(self, topic: str, **kwargs):
return self.client._connection.unsubscribe(topic, **kwargs)
param :: retain :
type :: retain:
'''
func = partial(self.client.unsubscribe, topic, **kwargs)
log_info.info("unsubscribe")
return await self.loop.run_in_executor(self.executor, func)
def on_connect(self):
"""
Decarator method used to handle connection to MQTT.
"""
def connect_handler(handler: Callable) -> Callable:
log_info.info("handler accepted")
self.client.on_connect = handler
return handler
return connect_handler
def on_subscribe(self):
"""
Decarator method used to obtain subscibred topics and properties.
"""
def subscribe_handler(handler: Callable):
log_info.info("on_subscribe handler accepted")
self.client.on_subscribe = handler
return handler
return subscribe_handler
def on_disconnect(self):
"""
Decarator method used wrap disconnet callback.
"""
def disconnect_handler(handler: Callable) -> Callable:
log_info.info("on_disconnect handler accepted")
self.client.on_disconnect = handler
return handler
return disconnect_handler | 36.541872 | 204 | 0.62591 |
acee5cfcaffe86cd29226bd50a0470eb2e719313 | 417 | py | Python | Python/07. Collections/01. collections.Counter()/Solution.py | AdityaSingh17/HackerRank-Solutions | 65b7fcd6e82be242fcc7e5b1771941206a8b7940 | [
"MIT"
] | null | null | null | Python/07. Collections/01. collections.Counter()/Solution.py | AdityaSingh17/HackerRank-Solutions | 65b7fcd6e82be242fcc7e5b1771941206a8b7940 | [
"MIT"
] | null | null | null | Python/07. Collections/01. collections.Counter()/Solution.py | AdityaSingh17/HackerRank-Solutions | 65b7fcd6e82be242fcc7e5b1771941206a8b7940 | [
"MIT"
] | null | null | null | # collections.Counter()
# Problem Link: https://www.hackerrank.com/challenges/collections-counter/problem
from collections import Counter
x = int(input())
shoe = map(int, input().split())
cust = int(input())
shoeCollection = Counter(shoe)
money = 0
for i in range(cust):
size, price = map(int, input().split())
if shoeCollection[size]:
money += price
shoeCollection[size] -= 1
print(money)
| 23.166667 | 81 | 0.681055 |
acee5d36ea5dba8d0cbc6d759f90ac8364b5a7fd | 5,776 | py | Python | toko/resources/proposta_resource.py | erickotsuka/sistema-contratacao-backend | 8c9d2e8b076e4701cb14a12eb60634b79bbe34c5 | [
"MIT"
] | null | null | null | toko/resources/proposta_resource.py | erickotsuka/sistema-contratacao-backend | 8c9d2e8b076e4701cb14a12eb60634b79bbe34c5 | [
"MIT"
] | null | null | null | toko/resources/proposta_resource.py | erickotsuka/sistema-contratacao-backend | 8c9d2e8b076e4701cb14a12eb60634b79bbe34c5 | [
"MIT"
] | null | null | null | from flask_restful import Resource, reqparse, abort
from flask import request
from toko.models.proposta_model import PropostaModel
from toko.schemas.proposta_schema import PropostaSchema
from datetime import datetime
class PropostaResource(Resource):
parser = reqparse.RequestParser()
parser.add_argument("id_demanda",
type=int,
required=True,
help="O ID da proposta não pode estar em branco."
)
parser.add_argument("prazo",
type=lambda x: datetime.strptime(x,'%Y-%m-%dT%H:%M:%S'),
required=True,
help="O prazo não pode estar em branco."
)
parser.add_argument('orcamento',
type=float,
required=True,
help="O orçamento da Proposta não pode estar em branco."
)
parser.add_argument('id_integrador',
type=int,
required=True,
help="O ID do integrador da proposta não pode estar em branco."
)
parser.add_argument('titulo',
type=str,
required=True,
help="O título da Proposta não pode estar em branco."
)
parser.add_argument('descricao',
type=str,
required=True,
help="A descricao da Proposta não pode estar em branco."
)
parser.add_argument('status',
type=str,
required=True,
help="O status da Proposta não pode estar em branco."
)
def get(self,id):
json = ''
try:
proposta = PropostaModel.encontrar_pelo_id(id)
print(proposta)
if proposta:
schema = PropostaSchema()
json = schema.dump(proposta).data
else:
return {"message":"Proposta {} não existe".format(id)},404
except Exception as e:
print(e)
return {"message","Erro na requisição".format(id)},500
return json,200
def delete(self,id):
json = []
try:
proposta = PropostaModel.encontrar_pelo_id(id)
if proposta:
proposta.remover()
lista = PropostaModel.listar()
schema = PropostaSchema(many=True,exclude=['listas'])
json = schema.dump(lista).data
else:
return {"message":"Proposta {} não está na lista".format(id)},404
except Exception as e:
print(e)
return json, 201
def post(self):
try:
data = PropostaResource.parser.parse_args()
if not data:
return {"message": "Requisição sem JSON"}, 400
if PropostaModel.encontrar_pelo_titulo(data['titulo']):
return {"message": "Usuário ja existe"}, 400
else:
proposta = PropostaModel(data['id_demanda'],
data['prazo'],
data['orcamento'],
data['id_integrador'],
data['titulo'],
data['descricao'],
data['status'],
)
proposta.adicionar()
proposta = PropostaModel.encontrar_pelo_titulo(data['titulo'])
user_schema = PropostaSchema()
json = user_schema.dump(proposta).data
return json, 201
except Exception as ex:
print(ex)
return {"message": "erro"}, 500
def put(self):
json = ''
try:
data = PropostaResource.parser.parse_args()
id_demanda = data['id_demanda']
prazo = data['prazo']
orcamento = data['orcamento']
id_integrador = data['id_integrador']
titulo = data['titulo']
descricao = data['descricao']
status = data['status']
proposta = PropostaModel.encontrar_pelo_titulo(titulo)
if proposta:
return {"message":"Proposta {} já está na lista".format(proposta.titulo)},200
else:
proposta = PropostaModel(
id_demanda=id_demanda,
prazo=prazo,
orcamento=orcamento,
id_integrador=id_integrador,
titulo=titulo,
descricao=descricao,
status=status
)
proposta.adicionar()
schema = PropostaSchema(many=True)
proposta = PropostaModel.encontrar_pelo_titulo(titulo)
json = schema.dump(proposta).data
except Exception as e:
print(e)
return json, 201
class PropostasResource(Resource):
def get(self):
json = ""
try:
if request.args:
if request.args['id_demanda']:
propostas = PropostaModel.listar_pela_demanda(request.args['id_demanda'])
else:
propostas = PropostaModel.listar()
schema = PropostaSchema(many=True)
json = schema.dump(propostas).data
except Exception as e:
print(e)
return {"message": "Aconteceu um erro tentando retornar a lista de propostas."}, 500
return json, 200
| 37.751634 | 96 | 0.484072 |
acee5e4431b18d3db5910ef2a564bdf1694bc6a0 | 827 | bzl | Python | third_party/tf_runtime/workspace.bzl | ssnd/tensorflow | 5c245f4e49bd575e78478592df5f63e60a686852 | [
"Apache-2.0"
] | null | null | null | third_party/tf_runtime/workspace.bzl | ssnd/tensorflow | 5c245f4e49bd575e78478592df5f63e60a686852 | [
"Apache-2.0"
] | null | null | null | third_party/tf_runtime/workspace.bzl | ssnd/tensorflow | 5c245f4e49bd575e78478592df5f63e60a686852 | [
"Apache-2.0"
] | null | null | null | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "8df6f6934173524a3fcd1c956dbb24fd32af72d7"
TFRT_SHA256 = "b5de8f658865a125de10489385f62ce481bf5bb5351092fffc4613d09dcd281a"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| 39.380952 | 124 | 0.698912 |
acee5eacd2f401c3e43bf2770bee80823d76364a | 869 | py | Python | JWTAuth/migrations/0001_initial.py | rafimuhammad01/mtf-hackathon | 83ab410239a93ff04e57d7ceb2d1d292ba365866 | [
"Unlicense"
] | null | null | null | JWTAuth/migrations/0001_initial.py | rafimuhammad01/mtf-hackathon | 83ab410239a93ff04e57d7ceb2d1d292ba365866 | [
"Unlicense"
] | null | null | null | JWTAuth/migrations/0001_initial.py | rafimuhammad01/mtf-hackathon | 83ab410239a93ff04e57d7ceb2d1d292ba365866 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.2 on 2021-04-23 05:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pewiraMilesBalance', models.FloatField(default=0)),
('status', models.IntegerField(choices=[(0, 'Staff'), (1, 'Admin'), (2, 'Super Admin')], default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 32.185185 | 118 | 0.629459 |
acee5ee1d002ae7c064db9da390b61a70016fc06 | 5,482 | py | Python | examples/QMIX/qmix_agent.py | lp2333/PARL | e4bde1f5b7e69c5f8d3ee3a90a647dfe12204bd3 | [
"ECL-2.0",
"Apache-2.0"
] | 3,172 | 2018-05-22T02:02:29.000Z | 2022-03-31T09:14:56.000Z | examples/QMIX/qmix_agent.py | BKBK00/PARL | f508bc6085420431b504441c7ff129e64826603e | [
"Apache-2.0"
] | 422 | 2018-05-17T16:58:45.000Z | 2022-03-31T02:03:25.000Z | examples/QMIX/qmix_agent.py | BKBK00/PARL | f508bc6085420431b504441c7ff129e64826603e | [
"Apache-2.0"
] | 794 | 2018-05-21T18:33:19.000Z | 2022-03-30T13:38:09.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import parl
import numpy as np
from utils import AvailableActionsSampler
import os
class QMixAgent(parl.Agent):
def __init__(self, algorithm, exploration_start, min_exploration,
exploration_decay, update_target_interval):
self.alg = algorithm
self.global_step = 0
self.exploration = exploration_start
self.min_exploration = min_exploration
self.exploration_decay = exploration_decay
self.target_update_count = 0
self.update_target_interval = update_target_interval
def save(self, save_dir, agent_model_name, qmixer_model_name):
if not os.path.exists(save_dir):
os.mkdir(save_dir)
agent_model_path = os.path.join(save_dir, agent_model_name)
qmixer_model_path = os.path.join(save_dir, qmixer_model_name)
paddle.save(self.alg.agent_model.state_dict(), agent_model_path)
paddle.save(self.alg.qmixer_model.state_dict(), qmixer_model_path)
print('save model successfully!')
def restore(self, save_dir, agent_model_name, qmixer_model_name):
if not os.path.exists(save_dir):
os.mkdir(save_dir)
agent_model_path = os.path.join(save_dir, agent_model_name)
qmixer_model_path = os.path.join(save_dir, qmixer_model_name)
self.alg.agent_model.set_state_dict(paddle.load(agent_model_path))
self.alg.qmixer_model.set_state_dict(paddle.load(qmixer_model_path))
print('restore model successfully!')
def reset_agent(self, batch_size=1):
self.alg._init_hidden_states(batch_size)
def sample(self, obs, available_actions):
""" sample actions via epsilon-greedy
Args:
obs (np.ndarray): (n_agents, obs_shape)
available_actions (np.ndarray): (n_agents, n_actions)
Returns:
actions (np.ndarray): (n_agents, )
"""
epsilon = np.random.random()
if epsilon > self.exploration:
actions = self.predict(obs, available_actions)
else:
actions = AvailableActionsSampler(available_actions).sample()
self.exploration = max(self.min_exploration,
self.exploration - self.exploration_decay)
return actions
def predict(self, obs, available_actions):
""" take greedy actions
Args:
obs (np.ndarray): (n_agents, obs_shape)
available_actions (np.ndarray): (n_agents, n_actions)
Returns:
actions (np.ndarray): (n_agents, )
"""
obs = paddle.to_tensor(obs, dtype='float32')
available_actions = paddle.to_tensor(available_actions, dtype='int32')
agents_q, self.alg.hidden_states = self.alg.predict_local_q(
obs, self.alg.hidden_states)
# mask unavailable actions
unavailable_actions_mask = (available_actions == 0).cast('float32')
agents_q -= 1e8 * unavailable_actions_mask
actions = paddle.argmax(agents_q, axis=-1).detach().cpu().numpy()
return actions
def learn(self, state_batch, actions_batch, reward_batch, terminated_batch,
obs_batch, available_actions_batch, filled_batch):
'''
Args:
state (np.ndarray): (batch_size, T, state_shape)
actions (np.ndarray): (batch_size, T, n_agents)
reward (np.ndarray): (batch_size, T, 1)
terminated (np.ndarray): (batch_size, T, 1)
obs (np.ndarray): (batch_size, T, n_agents, obs_shape)
available_actions_batch (np.ndarray): (batch_size, T, n_agents, n_actions)
filled_batch (np.ndarray): (batch_size, T, 1)
Returns:
mean_loss (float): train loss
mean_td_error (float): train TD error
'''
if self.global_step % self.update_target_interval == 0:
self.alg.sync_target()
self.target_update_count += 1
self.global_step += 1
state_batch = paddle.to_tensor(state_batch, dtype='float32')
actions_batch = paddle.to_tensor(actions_batch, dtype='int64')
reward_batch = paddle.to_tensor(reward_batch, dtype='float32')
terminated_batch = paddle.to_tensor(terminated_batch, dtype='float32')
obs_batch = paddle.to_tensor(obs_batch, dtype='float32')
available_actions_batch = paddle.to_tensor(
available_actions_batch, dtype='int64')
filled_batch = paddle.to_tensor(filled_batch, dtype='float32')
mean_loss, mean_td_error = self.alg.learn(
state_batch, actions_batch, reward_batch, terminated_batch,
obs_batch, available_actions_batch, filled_batch)
return mean_loss, mean_td_error
| 44.934426 | 86 | 0.651587 |
acee5fbdbde3e5cdcfbf6ed2ea730e021da47300 | 2,905 | py | Python | tests/test_12_context.py | Roomored/JWTConnect-Python-OidcMsg | 3b7d667c0c36cd85e34db56cb745ef77eaeda140 | [
"Apache-2.0"
] | null | null | null | tests/test_12_context.py | Roomored/JWTConnect-Python-OidcMsg | 3b7d667c0c36cd85e34db56cb745ef77eaeda140 | [
"Apache-2.0"
] | null | null | null | tests/test_12_context.py | Roomored/JWTConnect-Python-OidcMsg | 3b7d667c0c36cd85e34db56cb745ef77eaeda140 | [
"Apache-2.0"
] | 1 | 2020-11-08T21:22:55.000Z | 2020-11-08T21:22:55.000Z | import copy
import shutil
import pytest
from oidcmsg.context import OidcContext
KEYDEF = [
{"type": "EC", "crv": "P-256", "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["enc"]}
]
JWKS = {
"keys": [
{
"n":
'zkpUgEgXICI54blf6iWiD2RbMDCOO1jV0VSff1MFFnujM4othfMsad7H1kRo50YM5S'
'_X9TdvrpdOfpz5aBaKFhT6Ziv0nhtcekq1eRl8mjBlvGKCE5XGk-0LFSDwvqgkJoFY'
'Inq7bu0a4JEzKs5AyJY75YlGh879k1Uu2Sv3ZZOunfV1O1Orta-NvS-aG_jN5cstVb'
'CGWE20H0vFVrJKNx0Zf-u-aA-syM4uX7wdWgQ-owoEMHge0GmGgzso2lwOYf_4znan'
'LwEuO3p5aabEaFoKNR4K6GjQcjBcYmDEE4CtfRU9AEmhcD1kleiTB9TjPWkgDmT9MX'
'sGxBHf3AKT5w',
"e": "AQAB", "kty": "RSA", "kid": "rsa1"
},
{
"k":
'YTEyZjBlMDgxMGI4YWU4Y2JjZDFiYTFlZTBjYzljNDU3YWM0ZWNiNzhmNmFlYTNkNTY0NzMzYjE',
"kty": "oct"
},
]
}
def test_context():
c = OidcContext({})
assert c.keyjar is not None
class TestContext(object):
@pytest.fixture(autouse=True)
def setup(self):
try:
shutil.rmtree('db')
except FileNotFoundError:
pass
self.conf = {
'issuer': 'https://example.com',
'db_conf': {
'keyjar': {
'handler': 'oidcmsg.storage.abfile.LabeledAbstractFileSystem',
'fdir': 'db/keyjar',
'key_conv': 'oidcmsg.storage.converter.QPKey',
'value_conv': 'cryptojwt.serialize.item.KeyIssuer',
'label': 'foo'
},
'default': {
'handler': 'oidcmsg.storage.abfile.AbstractFileSystem',
'fdir': 'db',
'key_conv': 'oidcmsg.storage.converter.QPKey',
'value_conv': 'oidcmsg.storage.converter.JSON'
}
}
}
def test_context_with_entity_id_no_keys(self):
c = OidcContext(self.conf, entity_id='https://example.com')
assert c.keyjar.owners() == []
def test_context_with_entity_id_and_keys(self):
conf = copy.deepcopy(self.conf)
conf['keys'] = {'key_defs': KEYDEF}
c = OidcContext(conf, entity_id='https://example.com')
assert set(c.keyjar.owners()) == {'', 'https://example.com'}
def test_context_with_entity_id_and_jwks(self):
conf = copy.deepcopy(self.conf)
conf['jwks'] = JWKS
c = OidcContext(conf, entity_id='https://example.com')
assert set(c.keyjar.owners()) == {'', 'https://example.com'}
assert len(c.keyjar.get('sig', 'RSA')) == 1
assert len(c.keyjar.get('sig', 'RSA', issuer_id='https://example.com')) == 1
assert len(c.keyjar.get('sig', 'oct')) == 1
assert len(c.keyjar.get('sig', 'oct', issuer_id='https://example.com')) == 1
| 33.390805 | 94 | 0.553873 |
acee5fcb1f83370858ff9aee66a2ed05aede9f9a | 1,466 | py | Python | discounting.py | jusdesoja/Preference_Fusion | a30b864de244cfac8a4d9161b089c39952f47141 | [
"Unlicense"
] | 1 | 2019-08-17T08:52:33.000Z | 2019-08-17T08:52:33.000Z | discounting.py | jusdesoja/PreferenceFusion | a30b864de244cfac8a4d9161b089c39952f47141 | [
"Unlicense"
] | null | null | null | discounting.py | jusdesoja/PreferenceFusion | a30b864de244cfac8a4d9161b089c39952f47141 | [
"Unlicense"
] | null | null | null | import numpy as np
import math
from exceptions import IllegalMassSizeError
def discounting(massIn, alpha):
"""Dicount masses with given the factors
Parameters
-----------
massIn: ndarray of 2 dimension
a matrix containing multiple bba vectors.
attention: each bba is represented in a column. and each row represent on focal element
alpha: float or ndarray of 1 demension
alpha discounting factor. a float or a vector with number of bba vectors
"""
massIn = massIn.copy()
alpha = np.array(alpha)
if len(massIn.shape) == 1: # massIn is a 1-D matrix (vector)
massIn = massIn.reshape(massIn.size, 1)
nbFE,nbMass = massIn.shape # nbFE : the number of focal elements
antoms = round(math.log(nbFE,2))
if (nbFE != math.pow(2,antoms) or nbFE == 2):
raise IllegalMassSizeError('The number of focal element should be 2^n (n>1), with n the number of elements in the discernment frame\n')
return None
if alpha.size == 1: # complete the alpha vector
alpha = np.full(nbMass,alpha, dtype = float)
if alpha.size == nbMass:
alpha_mat = np.repeat(alpha[:,np.newaxis], nbFE, axis = 1).T
massOut = np.multiply(alpha_mat, massIn)
massOut[-1, :] = 1 - np.apply_along_axis(np.sum, 0, massOut[0:-1,:])
return massOut
else:
raise IllegalMassSizeError("Accident: in discounting the size of alpha is incorrect\n")
| 40.722222 | 143 | 0.653479 |
acee61769570c168c72189b26eebd2029d4dc0da | 5,057 | py | Python | neat_src/wann_ind.py | andrewhu/wann-nlp | 13e5388dc3f2ea61510759ec1cf29d91f51c330b | [
"Apache-2.0"
] | 1 | 2021-06-17T06:37:07.000Z | 2021-06-17T06:37:07.000Z | neat_src/wann_ind.py | andrewhu/wann-nlp | 13e5388dc3f2ea61510759ec1cf29d91f51c330b | [
"Apache-2.0"
] | null | null | null | neat_src/wann_ind.py | andrewhu/wann-nlp | 13e5388dc3f2ea61510759ec1cf29d91f51c330b | [
"Apache-2.0"
] | 2 | 2020-05-14T12:20:29.000Z | 2020-05-16T03:59:32.000Z | import numpy as np
import copy
from .ind import *
from .ann import getLayer, getNodeOrder
from utils import listXor
class WannInd(Ind):
"""Individual class: genes, network, and fitness
"""
def __init__(self, conn, node):
"""Intialize individual with given genes
Args:
conn - [5 X nUniqueGenes]
[0,:] == Innovation Number
[1,:] == Source
[2,:] == Destination
[3,:] == Weight
[4,:] == Enabled?
node - [3 X nUniqueGenes]
[0,:] == Node Id
[1,:] == Type (1=input, 2=output 3=hidden 4=bias)
[2,:] == Activation function (as int)
Attributes:
node - (np_array) - node genes (see args)
conn - (np_array) - conn genes (see args)
nInput - (int) - number of inputs
nOutput - (int) - number of outputs
wMat - (np_array) - weight matrix, one row and column for each node
[N X N] - rows: connection from; cols: connection to
wVec - (np_array) - wMat as a flattened vector
[N**2 X 1]
aVec - (np_array) - activation function of each node (as int)
[N X 1]
nConn - (int) - number of connections
fitness - (double) - fitness averaged over all trials (higher better)
fitMax - (double) - best fitness over all trials (higher better)
rank - (int) - rank in population (lower better)
birth - (int) - generation born
species - (int) - ID of species
"""
Ind.__init__(self,conn,node)
self.fitMax = [] # Best fitness over trials
def createChild(self, p, innov, gen=0):
"""Create new individual with this individual as a parent
Args:
p - (dict) - algorithm hyperparameters (see p/hypkey.txt)
innov - (np_array) - innovation record
[5 X nUniqueGenes]
[0,:] == Innovation Number
[1,:] == Source
[2,:] == Destination
[3,:] == New Node?
[4,:] == Generation evolved
gen - (int) - (optional) generation (for innovation recording)
Returns:
child - (Ind) - newly created individual
innov - (np_array) - updated innovation record
"""
child = WannInd(self.conn, self.node)
child, innov = child.topoMutate(p,innov,gen)
return child, innov
# -- 'Single Weight Network' topological mutation ------------------------ -- #
def topoMutate(self, p, innov,gen):
"""Randomly alter topology of individual
Note: This operator forces precisely ONE topological change
Args:
child - (Ind) - individual to be mutated
.conns - (np_array) - connection genes
[5 X nUniqueGenes]
[0,:] == Innovation Number (unique Id)
[1,:] == Source Node Id
[2,:] == Destination Node Id
[3,:] == Weight Value
[4,:] == Enabled?
.nodes - (np_array) - node genes
[3 X nUniqueGenes]
[0,:] == Node Id
[1,:] == Type (1=input, 2=output 3=hidden 4=bias)
[2,:] == Activation function (as int)
innov - (np_array) - innovation record
[5 X nUniqueGenes]
[0,:] == Innovation Number
[1,:] == Source
[2,:] == Destination
[3,:] == New Node?
[4,:] == Generation evolved
Returns:
child - (Ind) - newly created individual
innov - (np_array) - innovation record
"""
# Readability
nConn = np.shape(self.conn)[1]
connG = np.copy(self.conn)
nodeG = np.copy(self.node)
# Choose topological mutation
topoRoulette = np.array((p['prob_addConn'], p['prob_addNode'], \
p['prob_enable'] , p['prob_mutAct']))
spin = np.random.rand()*np.sum(topoRoulette)
slot = topoRoulette[0]
choice = topoRoulette.size
for i in range(1,topoRoulette.size):
if spin < slot:
choice = i
break
else:
slot += topoRoulette[i]
# Add Connection
if choice is 1:
connG, innov = self.mutAddConn(connG, nodeG, innov, gen, p)
# Add Node
elif choice is 2:
connG, nodeG, innov = self.mutAddNode(connG, nodeG, innov, gen, p)
# Enable Connection
elif choice is 3:
disabled = np.where(connG[4,:] == 0)[0]
if len(disabled) > 0:
enable = np.random.randint(len(disabled))
connG[4,disabled[enable]] = 1
# Mutate Activation
elif choice is 4:
start = 1+self.nInput + self.nOutput
end = nodeG.shape[1]
if start != end:
mutNode = np.random.randint(start,end)
newActPool = listXor([int(nodeG[2,mutNode])], list(p['ann_actRange']))
nodeG[2,mutNode] = int(newActPool[np.random.randint(len(newActPool))])
child = WannInd(connG, nodeG)
child.birth = gen
return child, innov
| 33.490066 | 79 | 0.536682 |
acee628b81db0d859a587c1057836fa2df022702 | 230 | py | Python | procurement_customized/procurement_customized/doctype/procurement_customized_settings/test_procurement_customized_settings.py | vr-greycube/procurement_customized | 31880ceca15b7ae612c27d10ee6834c8b53b079e | [
"MIT"
] | 1 | 2021-12-28T08:12:48.000Z | 2021-12-28T08:12:48.000Z | procurement_customized/procurement_customized/doctype/procurement_customized_settings/test_procurement_customized_settings.py | vr-greycube/procurement_customized | 31880ceca15b7ae612c27d10ee6834c8b53b079e | [
"MIT"
] | null | null | null | procurement_customized/procurement_customized/doctype/procurement_customized_settings/test_procurement_customized_settings.py | vr-greycube/procurement_customized | 31880ceca15b7ae612c27d10ee6834c8b53b079e | [
"MIT"
] | 1 | 2021-12-28T08:12:49.000Z | 2021-12-28T08:12:49.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Greycube and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestProcurementCustomizedSettings(unittest.TestCase):
pass
| 20.909091 | 59 | 0.782609 |
acee62b0df8f1c1d8dda2b587e6ad734d43d1537 | 2,482 | py | Python | stickbugml/base.py | aaronduino/stick-bug-ml | 53ae0ebb9047a9e4dde56cd7d73ecabb28496e3f | [
"Apache-2.0"
] | 3 | 2017-08-07T14:26:18.000Z | 2018-02-10T09:20:34.000Z | stickbugml/base.py | aaronjanse/stick-bug-ml | 53ae0ebb9047a9e4dde56cd7d73ecabb28496e3f | [
"Apache-2.0"
] | 1 | 2018-09-28T02:11:32.000Z | 2018-09-28T05:27:03.000Z | stickbugml/base.py | aaronduino/stick-bug-ml | 53ae0ebb9047a9e4dde56cd7d73ecabb28496e3f | [
"Apache-2.0"
] | null | null | null | __copyright__ = 'Copyright 2017 Aaron Janse'
__license__ = 'Apache 2.0'
from sklearn.model_selection import train_test_split
import sklearn.metrics
import pandas as pd
from ._util import FrameworkManager
def train(model_name, params):
# Add in features
_, valid_amnt, test_amnt = FrameworkManager.train_valid_test_splits
f_train_valid, _ = train_test_split(FrameworkManager.features, test_size=test_amnt, random_state=137)
f_train, f_valid = train_test_split(f_train_valid, test_size=valid_amnt/(1-test_amnt), random_state=137)
train_X = pd.concat([FrameworkManager.train['X'], f_train], axis=1)
validation_X = pd.concat([FrameworkManager.validation['X'], f_valid], axis=1)
train_data = {'X': train_X.copy(), 'y': FrameworkManager.train['y'].copy()}
validation_data = {'X': validation_X.copy(), 'y': FrameworkManager.validation['y'].copy()}
# Train model
model = FrameworkManager.models[model_name]
FrameworkManager.models[model_name]['model'] = model['train'](model['model'], params, train_data, validation_data)
def evaluate(model_name, all_classes=None):
_, _, test_amnt = FrameworkManager.train_valid_test_splits
# Add in features
_, f_test = train_test_split(FrameworkManager.features, test_size=test_amnt, random_state=137)
test_X = pd.concat([FrameworkManager.test['X'], f_test], axis=1)
test_data = {'X': test_X.copy(), 'y': FrameworkManager.test['y'].copy()}
# Make predictions
model = FrameworkManager.models[model_name]
predictions = model['predict'](model['model'], test_data['X'])
if all_classes is None:
labels_arg = {}
else:
labels_arg = {'labels': all_classes}
# Calculate log_loss score
return sklearn.metrics.log_loss(list(test_data['y']), predictions, **labels_arg)
# Used for applying preprocessing and adding features to data not in the training dataset (never-before-seen data)
def process(raw_data):
X = FrameworkManager.preprocess_func(raw_data)
features = pd.DataFrame(index=X.index.copy())
for func in FrameworkManager.feature_funcs:
feature_output = pd.DataFrame(func(X=X.copy()), index=features.index)
features = features.join(feature_output)
return pd.concat([X, features], axis=1) # join preprocessed X with its features
def predict(model_name, processed_X):
model = FrameworkManager.models[model_name]
predictions = model['predict'](model['model'], processed_X)
return predictions
| 38.78125 | 118 | 0.727639 |
acee645e5356ce0ffa6439fdc5199b2ac38bbbe1 | 38 | py | Python | spea/minimum_clique_cover/__init__.py | heyaroom/spea_echo | fd05285aaa55d358bde4458cc73f4e3d39058b68 | [
"MIT"
] | null | null | null | spea/minimum_clique_cover/__init__.py | heyaroom/spea_echo | fd05285aaa55d358bde4458cc73f4e3d39058b68 | [
"MIT"
] | null | null | null | spea/minimum_clique_cover/__init__.py | heyaroom/spea_echo | fd05285aaa55d358bde4458cc73f4e3d39058b68 | [
"MIT"
] | null | null | null | from .clique_cover import clique_cover | 38 | 38 | 0.894737 |
acee65da6c951eb61a753513745467cc865a6f48 | 440 | py | Python | products/migrations/0006_auto_20180302_2002.py | minaeid90/ecommerce | 8b14db5426ded6c7b208f98aa5b4fce425a818b9 | [
"MIT"
] | null | null | null | products/migrations/0006_auto_20180302_2002.py | minaeid90/ecommerce | 8b14db5426ded6c7b208f98aa5b4fce425a818b9 | [
"MIT"
] | null | null | null | products/migrations/0006_auto_20180302_2002.py | minaeid90/ecommerce | 8b14db5426ded6c7b208f98aa5b4fce425a818b9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-02 18:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0005_product_slug'),
]
operations = [
migrations.AlterField(
model_name='product',
name='slug',
field=models.SlugField(blank=True),
),
]
| 20.952381 | 47 | 0.609091 |
acee66913461e8e11b250b33258b092ee7715cb5 | 4,784 | py | Python | docs/conf.py | leon-nn/face-fitting | 239c0826f77aaba1c1c77f221f18d733967dfd63 | [
"MIT"
] | 18 | 2018-03-22T21:24:45.000Z | 2021-11-28T15:52:33.000Z | docs/conf.py | leon-nn/face-fitting | 239c0826f77aaba1c1c77f221f18d733967dfd63 | [
"MIT"
] | null | null | null | docs/conf.py | leon-nn/face-fitting | 239c0826f77aaba1c1c77f221f18d733967dfd63 | [
"MIT"
] | 3 | 2020-04-08T07:28:10.000Z | 2020-11-13T01:29:45.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mm documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 16 21:03:43 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../mm'))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'mm'
copyright = '2018, Leon Nguyen'
author = 'Leon Nguyen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mmdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mm.tex', 'mm Documentation',
'Leon Nguyen', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mm', 'mm Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mm', 'mm Documentation',
author, 'mm', 'One line description of project.',
'Miscellaneous'),
]
| 29.9 | 79 | 0.677676 |
acee676043bc40328a06ed6c514266d48d764981 | 872 | py | Python | src/plugins/nat/extras/nat_1Ms.py | amithbraj/vpp | edf1da94dc099c6e2ab1d455ce8652fada3cdb04 | [
"Apache-2.0"
] | 52 | 2016-09-20T15:08:46.000Z | 2020-12-22T23:03:25.000Z | src/plugins/nat/extras/nat_1Ms.py | fantastic2085/vpp | c599c6f001bc28e1023fb5e74a27db37b1aae847 | [
"Apache-2.0"
] | 63 | 2018-06-11T09:48:35.000Z | 2021-01-05T09:11:03.000Z | src/plugins/nat/extras/nat_1Ms.py | fantastic2085/vpp | c599c6f001bc28e1023fb5e74a27db37b1aae847 | [
"Apache-2.0"
] | 36 | 2016-07-21T11:20:33.000Z | 2022-01-16T15:55:45.000Z | from trex_stl_lib.api import *
class STLS1(object):
def create_stream (self):
base_pkt = Ether()/IP(dst="2.2.0.1")/UDP(dport=12)
pad = Padding()
if len(base_pkt) < 64:
pad_len = 64 - len(base_pkt)
pad.load = '\x00' * pad_len
vm = STLVM()
vm.tuple_var(name="tuple", ip_min="10.0.0.3", ip_max="10.0.39.18", port_min=1025, port_max=1124, limit_flows = 1000000)
vm.write(fv_name="tuple.ip", pkt_offset="IP.src")
vm.fix_chksum()
vm.write(fv_name="tuple.port", pkt_offset="UDP.sport")
pkt = STLPktBuilder(pkt=base_pkt/pad, vm=vm)
return STLStream(packet=pkt, mode=STLTXCont())
def get_streams (self, direction = 0, **kwargs):
return [self.create_stream()]
# dynamic load - used for trex console or simulator
def register():
return STLS1()
| 24.222222 | 127 | 0.604358 |
acee67a6d89ae1a7556dbc1cb4e1f3fc57aa298e | 10,642 | py | Python | pythran/tests/scikit-image/_colormixer.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,647 | 2015-01-13T01:45:38.000Z | 2022-03-28T01:23:41.000Z | pythran/tests/scikit-image/_colormixer.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,116 | 2015-01-01T09:52:05.000Z | 2022-03-18T21:06:40.000Z | pythran/tests/scikit-image/_colormixer.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 180 | 2015-02-12T02:47:28.000Z | 2022-03-14T10:28:18.000Z | """Color Mixer
NumPy does not do overflow checking when adding or multiplying
integers, so currently the only way to clip results efficiently
(without making copies of the data) is with an extension such as this
one.
"""
import numpy as np
# pythran export add(uint8[:,:,:], uint8[:,:,:], int, int)
# pythran export multiply(uint8[:,:,:], uint8[:,:,:], int, float)
# pythran export brightness(uint8[:,:,:], uint8[:,:,:], float, int)
# pythran export sigmoid_gamma(uint8[:,:,:], uint8[:,:,:], float, float)
# pythran export gamma(uint8[:,:,:], uint8[:,:,:], float)
# pythran export py_hsv_2_rgb(float, float, float)
# pythran export py_rgb_2_hsv(int, int, int)
# pythran export hsv_add (uint8[:,:,:], uint8[:,:,:], float, float, float)
def add(img, stateimg, channel, amount):
"""Add a given amount to a color channel of `stateimg`, and
store the result in `img`. Overflow is clipped.
Parameters
----------
img : (M, N, 3) ndarray of uint8
Output image.
stateimg : (M, N, 3) ndarray of uint8
Input image.
channel : int
Channel (0 for "red", 1 for "green", 2 for "blue").
amount : int
Value to add.
"""
height, width, _ = img.shape
k = channel
n = amount
lut = [np.uint8()] * 256
for l in range(256):
op_result = l + n
if op_result > 255:
op_result = 255
elif op_result < 0:
op_result = 0
lut[l] = np.uint8(op_result)
for i in range(height):
for j in range(width):
img[i, j, k] = lut[stateimg[i, j, k]]
def multiply(img, stateimg, channel, amount):
"""Multiply a color channel of `stateimg` by a certain amount, and
store the result in `img`. Overflow is clipped.
Parameters
----------
img : (M, N, 3) ndarray of uint8
Output image.
stateimg : (M, N, 3) ndarray of uint8
Input image.
channel : int
Channel (0 for "red", 1 for "green", 2 for "blue").
amount : float
Multiplication factor.
"""
height, width, _ = img.shape
k = channel
n = amount
lut = [np.uint8()] * 256
for l in range(256):
op_result = l * n
if op_result > 255:
op_result = 255
elif op_result < 0:
op_result = 0
lut[l] = np.uint8(op_result)
for i in range(height):
for j in range(width):
img[i, j, k] = lut[stateimg[i, j, k]]
def brightness(img, stateimg, factor, offset):
"""Modify the brightness of an image.
'factor' is multiplied to all channels, which are
then added by 'amount'. Overflow is clipped.
Parameters
----------
img : (M, N, 3) ndarray of uint8
Output image.
stateimg : (M, N, 3) ndarray of uint8
Input image.
factor : float
Multiplication factor.
offset : int
Ammount to add to each channel.
"""
height, width, _ = img.shape
lut = [np.uint8()] * 256
for k in range(256):
op_result = k * factor + offset
if op_result > 255:
op_result = 255
elif op_result < 0:
op_result = 0
lut[k] = np.uint8(op_result)
for i in range(height):
for j in range(width):
img[i, j, 0] = lut[stateimg[i, j, 0]]
img[i, j, 1] = lut[stateimg[i, j, 1]]
img[i, j, 2] = lut[stateimg[i, j, 2]]
def sigmoid_gamma(img,
stateimg,
alpha, beta):
height, width, _ = img.shape
c1 = 1 / (1 + np.exp(beta))
c2 = 1 / (1 + np.exp(beta - alpha)) - c1
lut = [np.uint8()] * 256
# compute the lut
for k in range(256):
lut[k] = np.uint8(((1 / (1 + np.exp(beta - (k / 255.) * alpha)))
- c1) * 255 / c2)
for i in range(height):
for j in range(width):
img[i, j, 0] = lut[stateimg[i, j, 0]]
img[i, j, 1] = lut[stateimg[i, j, 1]]
img[i, j, 2] = lut[stateimg[i, j, 2]]
def gamma(img,
stateimg,
gamma):
height, width, _ = img.shape
lut = [np.uint8()] * 256
if gamma == 0:
gamma = 0.00000000000000000001
gamma = 1./gamma
# compute the lut
for k in range(256):
lut[k] = np.uint8((pow((k / 255.), gamma) * 255))
for i in range(height):
for j in range(width):
img[i, j, 0] = lut[stateimg[i, j, 0]]
img[i, j, 1] = lut[stateimg[i, j, 1]]
img[i, j, 2] = lut[stateimg[i, j, 2]]
def rgb_2_hsv(RGB, HSV):
R, G, B = RGB
if R > 255:
R = 255
elif R < 0:
R = 0
if G > 255:
G = 255
elif G < 0:
G = 0
if B > 255:
B = 255
elif B < 0:
B = 0
if R < G:
MIN = R
MAX = G
else:
MIN = G
MAX = R
if B < MIN:
MIN = B
elif B > MAX:
MAX = B
else:
pass
V = MAX / 255.
if MAX == MIN:
H = 0.
elif MAX == R:
H = (60 * (G - B) / (MAX - MIN) + 360) % 360
elif MAX == G:
H = 60 * (B - R) / (MAX - MIN) + 120
else:
H = 60 * (R - G) / (MAX - MIN) + 240
if MAX == 0:
S = 0
else:
S = 1 - MIN / MAX
HSV[0] = H
HSV[1] = S
HSV[2] = V
def hsv_2_rgb(HSV, RGB):
H, S, V = HSV
if H > 360:
H = H % 360
elif H < 0:
H = 360 - ((-1 * H) % 360)
else:
pass
if S > 1:
S = 1
elif S < 0:
S = 0
else:
pass
if V > 1:
V = 1
elif V < 0:
V = 0
else:
pass
hi = int(H / 60.) % 6
f = (H / 60.) - int(H / 60.)
p = V * (1 - S)
q = V * (1 - f * S)
t = V * (1 - (1 - f) * S)
if hi == 0:
r = V
g = t
b = p
elif hi == 1:
r = q
g = V
b = p
elif hi == 2:
r = p
g = V
b = t
elif hi == 3:
r = p
g = q
b = V
elif hi == 4:
r = t
g = p
b = V
else:
r = V
g = p
b = q
RGB[0] = r
RGB[1] = g
RGB[2] = b
def py_hsv_2_rgb(H, S, V):
'''Convert an HSV value to RGB.
Automatic clipping.
Parameters
----------
H : float
From 0. - 360.
S : float
From 0. - 1.
V : float
From 0. - 1.
Returns
-------
out : (R, G, B) ints
Each from 0 - 255
conversion convention from here:
http://en.wikipedia.org/wiki/HSL_and_HSV
'''
HSV = [H, S, V]
RGB = [0] * 3
hsv_2_rgb(HSV, RGB)
R = int(RGB[0] * 255)
G = int(RGB[1] * 255)
B = int(RGB[2] * 255)
return R, G, B
def py_rgb_2_hsv(R, G, B):
'''Convert an HSV value to RGB.
Automatic clipping.
Parameters
----------
R : int
From 0. - 255.
G : int
From 0. - 255.
B : int
From 0. - 255.
Returns
-------
out : (H, S, V) floats
Ranges (0...360), (0...1), (0...1)
conversion convention from here:
http://en.wikipedia.org/wiki/HSL_and_HSV
'''
RGB = [float(R), float(G), float(B)]
HSV = [0.] * 3
rgb_2_hsv(RGB, HSV)
return HSV
def hsv_add(img, stateimg, h_amt, s_amt, v_amt):
"""Modify the image color by specifying additive HSV Values.
Since the underlying images are RGB, all three values HSV
must be specified at the same time.
The RGB triplet in the image is converted to HSV, the operation
is applied, and then the HSV triplet is converted back to RGB
HSV values are scaled to H(0. - 360.), S(0. - 1.), V(0. - 1.)
then the operation is performed and any overflow is clipped, then the
reverse transform is performed. Those are the ranges to keep in mind,
when passing in values.
Parameters
----------
img : (M, N, 3) ndarray of uint8
Output image.
stateimg : (M, N, 3) ndarray of uint8
Input image.
h_amt : float
Ammount to add to H channel.
s_amt : float
Ammount to add to S channel.
v_amt : float
Ammount to add to V channel.
"""
height, width, _ = img.shape
HSV = [0.] * 3
RGB = [0.] * 3
for i in range(height):
for j in range(width):
RGB[0] = stateimg[i, j, 0]
RGB[1] = stateimg[i, j, 1]
RGB[2] = stateimg[i, j, 2]
rgb_2_hsv(RGB, HSV)
# Add operation
HSV[0] += h_amt
HSV[1] += s_amt
HSV[2] += v_amt
hsv_2_rgb(HSV, RGB)
RGB[0] *= 255
RGB[1] *= 255
RGB[2] *= 255
img[i, j, 0] = RGB[0]
img[i, j, 1] = RGB[1]
img[i, j, 2] = RGB[2]
# pythran export hsv_multiply(uint8[:,:,:], uint8[:,:,:], float, float, float)
def hsv_multiply(img,
stateimg,
h_amt, s_amt, v_amt):
"""Modify the image color by specifying multiplicative HSV Values.
Since the underlying images are RGB, all three values HSV
must be specified at the same time.
The RGB triplet in the image is converted to HSV, the operation
is applied, and then the HSV triplet is converted back to RGB
HSV values are scaled to H(0. - 360.), S(0. - 1.), V(0. - 1.)
then the operation is performed and any overflow is clipped, then the
reverse transform is performed. Those are the ranges to keep in mind,
when passing in values.
Note that since hue is in degrees, it makes no sense to multiply
that channel, thus an add operation is performed on the hue. And the
values given for h_amt, should be the same as for hsv_add
Parameters
----------
img : (M, N, 3) ndarray of uint8
Output image.
stateimg : (M, N, 3) ndarray of uint8
Input image.
h_amt : float
Ammount to add to H channel.
s_amt : float
Ammount by which to multiply S channel.
v_amt : float
Ammount by which to multiply V channel.
"""
height, width, _ = img.shape
HSV = [0.] * 3
RGB = [0.] * 3
for i in range(height):
for j in range(width):
RGB[0] = stateimg[i, j, 0]
RGB[1] = stateimg[i, j, 1]
RGB[2] = stateimg[i, j, 2]
rgb_2_hsv(RGB, HSV)
# Multiply operation
HSV[0] += h_amt
HSV[1] *= s_amt
HSV[2] *= v_amt
hsv_2_rgb(HSV, RGB)
RGB[0] *= 255
RGB[1] *= 255
RGB[2] *= 255
img[i, j, 0] = RGB[0]
img[i, j, 1] = RGB[1]
img[i, j, 2] = RGB[2]
| 22.12474 | 78 | 0.496805 |
acee6a41c34e13606265a066c923c4bee60b64b3 | 3,120 | py | Python | assets/baekjoon/1260_DFS_BFS/python_1260.py | TakeaimK/TakeaimK.github.io | 13ef7dd7093fed5f60b16599b6b6d76190a2aaf8 | [
"MIT"
] | null | null | null | assets/baekjoon/1260_DFS_BFS/python_1260.py | TakeaimK/TakeaimK.github.io | 13ef7dd7093fed5f60b16599b6b6d76190a2aaf8 | [
"MIT"
] | null | null | null | assets/baekjoon/1260_DFS_BFS/python_1260.py | TakeaimK/TakeaimK.github.io | 13ef7dd7093fed5f60b16599b6b6d76190a2aaf8 | [
"MIT"
] | null | null | null |
from collections import deque
def DFS(graph, root): #dfs 수행 함수
visited = [] #방문한 노드를 여기에 담음
stack = [root] #스택의 시작은 Root 노드가 담당
while stack: #스택일 빌 때까지 수행
n = stack.pop() #stack에서 원소 하나를 꺼냄
if n not in visited: #stack에서 꺼낸 원소가 visited에 없으면
visited.append(n) #visited에 추가
if n in graph: #stack에서 꺼낸 원소가 graph에 있다면
temp = list(set(graph[n]) - set(visited)) #stack에서 꺼낸 원소의 항목을 전부 temp로 옮김
temp.sort(reverse=True) #stack에 넣을 때 큰 수부터 넣어야 작은 수에 대해 먼저 탐색함
stack += temp #stack에 temp를 전부 넣어줌
#return " ".join(str(i) for i in visited) #visited을 문자열로 출력하고 문자열 사이에 " " 삽입
return visited
def BFS(graph, root): #bfs 수행 함수
visited = [] #방문한 노드를 여기에 담음
queue = deque([root]) #큐의 시작은 Root 노드가 담당
while queue: #큐가 빌 때까지 수행
n = queue.popleft() #큐에서 원소 하나를 꺼냄
if n not in visited: #큐에서 꺼낸 원소가 visited에 없으면
visited.append(n) #visited에 추가
if n in graph: #큐에서 꺼낸 원소가 그래프에 있다면
temp = list(set(graph[n]) - set(visited)) #큐에서 꺼낸 원소의 항목을 전부 temp로 옮김
temp.sort() #순서대로 정렬
queue += temp #큐에 temp를 전부 넣어줌
#return " ".join(str(i) for i in visited) #visited을 문자열로 출력하고 문자열 사이에 " " 삽입
return visited
graph={} #딕셔너리 형 graph 생성
#Node N, Edge M, Start Node V
N, M, V = map(int, input().split()) #각각의 변수에 입력된 값이 정수로 들어감 #노드 이름이 정수일 때
''' #노드 이름이 문자일 때
n = input().split()
N = int(n[0])
M = int(n[1])
V = n[2]
'''
for i in range(M):
n1, n2 = map(int, input().split()) #노드 이름이 정수일 때
''' #노드 이름이 문자일 때
m=input().split()
n1, n2 = [i for i in m]
'''
if n1 not in graph: #노드가 아직 등록되지 않았다면
graph[n1] = [n2] #노드 등록 후 이어진 노드 추가
elif n2 not in graph[n1]: #노드는 있지만 이어있지 않다면
graph[n1].append(n2) #해당 노드에 이어진 노드 추가
if n2 not in graph: #무향 그래프인 경우 양방향 모두 등록
graph[n2] = [n1] #위와 동일함
elif n1 not in graph[n2]:
graph[n2].append(n1)
#print(DFS(graph, V))
#print(BFS(graph, V))
print(*DFS(graph, V))
print(*BFS(graph, V))
"""
N, M, V = map(int, input().split())
matrix = [[0] * (N + 1) for _ in range(N + 1)]
for _ in range(M):
link = list(map(int, input().split()))
matrix[link[0]][link[1]] = 1
matrix[link[1]][link[0]] = 1
def dfs(current_node, row, foot_prints):
foot_prints += [current_node]
for search_node in range(len(row[current_node])):
if row[current_node][search_node] and search_node not in foot_prints:
foot_prints = dfs(search_node, row, foot_prints)
return foot_prints
def bfs(start):
queue = [start]
foot_prints = [start]
while queue:
current_node = queue.pop(0)
for search_node in range(len(matrix[current_node])):
if matrix[current_node][search_node] and search_node not in foot_prints:
foot_prints += [search_node]
queue += [search_node]
return foot_prints
print(*dfs(V, matrix, []))
print(*bfs(V))
""" | 31.2 | 91 | 0.559615 |
acee6bb232433b1b34b0360476ac590a81b49a3e | 1,097 | py | Python | nodes/mid_reynolds.py | MarianoOG/Swam | 9a0fcabd6c74212c80319cea93ef27ed7256d426 | [
"MIT"
] | null | null | null | nodes/mid_reynolds.py | MarianoOG/Swam | 9a0fcabd6c74212c80319cea93ef27ed7256d426 | [
"MIT"
] | null | null | null | nodes/mid_reynolds.py | MarianoOG/Swam | 9a0fcabd6c74212c80319cea93ef27ed7256d426 | [
"MIT"
] | 1 | 2020-03-05T19:42:05.000Z | 2020-03-05T19:42:05.000Z | #!/usr/bin/env python
import rospy
from swarm.msg import QuadStamped, QuadState
def des_callback(q_des):
global des
des = q_des
# rospy.loginfo("des = [%f, %f, %f - %f]", des.x, des.y, des.z, des.yaw)
def info_callback(state):
global pub, quad, des
quad = state
quad.pos.x = des.x
quad.pos.y = des.y
quad.pos.z = des.z
quad.pos.yaw = des.yaw
# Publish:
try:
quad.header.stamp = rospy.Time.now()
pub.publish(quad)
except rospy.ROSException:
pass
if __name__ == '__main__':
rospy.init_node('mid_reynolds', anonymous=True)
rospy.loginfo("Node %s started!", rospy.get_name())
des = QuadStamped()
pub = rospy.Publisher('next_generation', QuadState, queue_size=10)
quad = QuadState()
try:
rospy.sleep(1)
rospy.Subscriber('quad_state', QuadState, info_callback)
rospy.Subscriber('mid_state', QuadStamped, des_callback)
rospy.loginfo("Node %s start spining!", rospy.get_name())
rospy.spin()
except rospy.ROSInterruptException:
pass
finally:
quad.header.stamp = rospy.Time.now()
pub.publish(quad)
rospy.loginfo("Node %s finished!", rospy.get_name())
| 21.94 | 73 | 0.701003 |
acee6bc69299ea2580a7f7b5ef3801abec0b513b | 4,915 | py | Python | Tests/DASTests/SiddhiAppManagerTests.py | grainier/PySiddhi | df02e6c398d9d2cde0a1295d8a6f842492dfc441 | [
"Apache-2.0"
] | null | null | null | Tests/DASTests/SiddhiAppManagerTests.py | grainier/PySiddhi | df02e6c398d9d2cde0a1295d8a6f842492dfc441 | [
"Apache-2.0"
] | null | null | null | Tests/DASTests/SiddhiAppManagerTests.py | grainier/PySiddhi | df02e6c398d9d2cde0a1295d8a6f842492dfc441 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# WSO2 Inc. licenses this file to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file except
# in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
import logging
from time import sleep
from PySiddhi4.das.DASClient import DASClient
from PySiddhi4.das.SiddhiAppManagement.SiddhiAppManagementClient import UpdateAppStatusResponse
logging.basicConfig(level=logging.INFO)
resources_path = os.path.join(os.path.dirname(__file__), "Resources")
class EventSimulatorTests(unittest.TestCase):
def setUp(self):
self.hostUrl = "http://localhost:9090"
logging.info("Prior to launching tests, make sure DAS 4 is running at " + self.hostUrl)
def tearDown(self):
sleep(5) # Sleep to provide sufficient time for DAS 4.0 to update status
def testRetrieveSiddhiAppStatus(self):
logging.info("Test1: Retrieving a Siddhi App Status")
dasPythonClient = DASClient(self.hostUrl)
siddhiAppManagementClient = dasPythonClient.getSiddhiAppManagementClient()
status = siddhiAppManagementClient.retrieveStatusSiddhiApp("TestSiddhiApp")
self.assertEqual(status, "active")
def testRetrieveSiddhiApp(self):
logging.info("Test1: Retrieving a Siddhi App")
dasPythonClient = DASClient(self.hostUrl)
siddhiAppManagementClient = dasPythonClient.getSiddhiAppManagementClient()
app = siddhiAppManagementClient.retrieveSiddhiApp("TestSiddhiApp")
lines = []
with open(resources_path + "/TestSiddhiApp.siddhi", "rb") as f:
lines = [line.decode() for line in f.readlines()]
target_app = "".join(lines)
logging.info(target_app)
logging.info(app)
self.assertEqual(app, target_app)
def testListSiddhiApps(self):
logging.info("Test1: List Siddhi Apps")
dasPythonClient = DASClient(self.hostUrl)
siddhiAppManagementClient = dasPythonClient.getSiddhiAppManagementClient()
lines = []
with open(resources_path + "/TestSiddhiApp1.siddhi", "rb") as f:
lines = [line.decode() for line in f.readlines()]
siddhiApp = "".join(lines)
result = siddhiAppManagementClient.saveSiddhiApp(siddhiApp)
self.assertTrue(result)
sleep(5)
apps = siddhiAppManagementClient.listSiddhiApps()
logging.info(apps)
self.assertTrue("TestSiddhiApp1" in apps)
logging.info(apps)
apps = siddhiAppManagementClient.listSiddhiApps(isActive=True)
self.assertTrue("TestSiddhiApp1" in apps)
logging.info(apps)
apps = siddhiAppManagementClient.listSiddhiApps(isActive=False)
self.assertTrue("TestSiddhiApp1" not in apps)
logging.info(apps)
result = siddhiAppManagementClient.deleteSiddhiApp("TestSiddhiApp1")
self.assertTrue(result)
def testSaveAndDeleteSiddhiApp(self):
logging.info("Test1: Save and Delete Siddhi App")
dasPythonClient = DASClient(self.hostUrl)
siddhiAppManagerClient = dasPythonClient.getSiddhiAppManagementClient()
lines = []
with open(resources_path + "/TestSiddhiApp1.siddhi", "rb") as f:
lines = [line.decode() for line in f.readlines()]
siddhiApp = "".join(lines)
result = siddhiAppManagerClient.saveSiddhiApp(siddhiApp)
self.assertTrue(result)
sleep(5)
result = siddhiAppManagerClient.deleteSiddhiApp("TestSiddhiApp1")
self.assertTrue(result)
def testUpdateAndDeleteSiddhiApp(self):
logging.info("Test: Update and Delete Siddhi App")
dasPythonClient = DASClient(self.hostUrl)
siddhiAppManagerClient = dasPythonClient.getSiddhiAppManagementClient()
lines = []
with open(resources_path + "/TestSiddhiApp1.siddhi", "rb") as f:
lines = [line.decode() for line in f.readlines()]
siddhiApp = "".join(lines)
result = siddhiAppManagerClient.updateSiddhiApp(siddhiApp)
self.assertTrue(result.name == UpdateAppStatusResponse.savedNew.name)
sleep(5)
result = siddhiAppManagerClient.updateSiddhiApp(siddhiApp)
self.assertTrue(result.name == UpdateAppStatusResponse.updated.name)
sleep(5)
result = siddhiAppManagerClient.deleteSiddhiApp("TestSiddhiApp1")
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
| 33.209459 | 95 | 0.697253 |
acee6c1cc90bb2dcd671fe038509fe8b496fc097 | 436 | py | Python | todo/migrations/0006_todoitem_deadline.py | AnimeAllstar/To-Do-App | 795bef61e9dcace7e86b67a188558be76459c19b | [
"MIT"
] | 1 | 2020-09-22T10:36:40.000Z | 2020-09-22T10:36:40.000Z | todo/migrations/0006_todoitem_deadline.py | AnimeAllstar/To-Do-App | 795bef61e9dcace7e86b67a188558be76459c19b | [
"MIT"
] | 2 | 2020-09-22T08:11:35.000Z | 2021-06-10T20:12:59.000Z | todo/migrations/0006_todoitem_deadline.py | AnimeAllstar/to-do-app | ec342a0d5032c0a240d212ba6af1919396761efc | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-09-30 14:56
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('todo', '0005_auto_20200920_0125'),
]
operations = [
migrations.AddField(
model_name='todoitem',
name='deadline',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| 21.8 | 74 | 0.630734 |
acee6c6b406ec9322ce8568dddd7453a1bcd65fe | 4,257 | py | Python | src/Modbus/sunspec/core/smdx.py | jakkaj/SmartMeterMonitor | 9868ed5cea3dbcd110c0803404bb9041e803340f | [
"MIT"
] | 1 | 2021-06-01T07:08:17.000Z | 2021-06-01T07:08:17.000Z | src/Modbus/sunspec/core/smdx.py | jakkaj/SmartMeterMonitor | 9868ed5cea3dbcd110c0803404bb9041e803340f | [
"MIT"
] | null | null | null | src/Modbus/sunspec/core/smdx.py | jakkaj/SmartMeterMonitor | 9868ed5cea3dbcd110c0803404bb9041e803340f | [
"MIT"
] | null | null | null |
"""
Copyright (C) 2018 SunSpec Alliance
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import sunspec.core.suns as suns
SMDX_ROOT = 'sunSpecModels'
SMDX_MODEL = 'model'
SMDX_BLOCK = 'block'
SMDX_POINT = 'point'
SMDX_ATTR_ID = 'id'
SMDX_ATTR_LEN = 'len'
SMDX_ATTR_NAME = 'name'
SMDX_ATTR_TYPE = 'type'
SMDX_ATTR_TYPE_FIXED = 'fixed'
SMDX_ATTR_TYPE_REPEATING = 'repeating'
SMDX_ATTR_OFFSET = 'offset'
SMDX_ATTR_MANDATORY = 'mandatory'
SMDX_ATTR_ACCESS = 'access'
SMDX_ATTR_SF = 'sf'
SMDX_ATTR_UNITS = 'units'
SMDX_SYMBOL = 'symbol'
SMDX_STRINGS = 'strings'
SMDX_LABEL = 'label'
SMDX_DESCRIPTION = 'description'
SMDX_NOTES = 'notes'
SMDX_TYPE_INT16 = 'int16'
SMDX_TYPE_UINT16 = 'uint16'
SMDX_TYPE_COUNT = 'count'
SMDX_TYPE_ACC16 = 'acc16'
SMDX_TYPE_ENUM16 = 'enum16'
SMDX_TYPE_BITFIELD16 = 'bitfield16'
SMDX_TYPE_PAD = 'pad'
SMDX_TYPE_INT32 = 'int32'
SMDX_TYPE_UINT32 = 'uint32'
SMDX_TYPE_ACC32 = 'acc32'
SMDX_TYPE_ENUM32 = 'enum32'
SMDX_TYPE_BITFIELD32 = 'bitfield32'
SMDX_TYPE_IPADDR = 'ipaddr'
SMDX_TYPE_INT64 = 'int64'
SMDX_TYPE_UINT64 = 'uint64'
SMDX_TYPE_ACC64 = 'acc64'
SMDX_TYPE_IPV6ADDR = 'ipv6addr'
SMDX_TYPE_FLOAT32 = 'float32'
SMDX_TYPE_STRING = 'string'
SMDX_TYPE_SUNSSF = 'sunssf'
SMDX_TYPE_EUI48 = 'eui48'
SMDX_ACCESS_R = 'r'
SMDX_ACCESS_RW = 'rw'
SMDX_MANDATORY_FALSE = 'false'
SMDX_MANDATORY_TRUE = 'true'
smdx_access_types = {
SMDX_ACCESS_R: suns.SUNS_ACCESS_R,
SMDX_ACCESS_RW: suns.SUNS_ACCESS_RW
}
smdx_mandatory_types = {
SMDX_MANDATORY_FALSE: suns.SUNS_MANDATORY_FALSE,
SMDX_MANDATORY_TRUE: suns.SUNS_MANDATORY_TRUE
}
# map SMDX block types to SunSpec block types
smdx_block_types = {
SMDX_ATTR_TYPE_FIXED: suns.SUNS_BLOCK_FIXED,
SMDX_ATTR_TYPE_REPEATING: suns.SUNS_BLOCK_REPEATING
}
# map SMDX point types to SunSpec point types
smdx_point_types = {
SMDX_TYPE_INT16: suns.SUNS_TYPE_INT16,
SMDX_TYPE_UINT16: suns.SUNS_TYPE_UINT16,
SMDX_TYPE_COUNT: suns.SUNS_TYPE_COUNT,
SMDX_TYPE_ACC16: suns.SUNS_TYPE_ACC16,
SMDX_TYPE_ENUM16: suns.SUNS_TYPE_ENUM16,
SMDX_TYPE_BITFIELD16: suns.SUNS_TYPE_BITFIELD16,
SMDX_TYPE_PAD: suns.SUNS_TYPE_PAD,
SMDX_TYPE_INT32: suns.SUNS_TYPE_INT32,
SMDX_TYPE_UINT32: suns.SUNS_TYPE_UINT32,
SMDX_TYPE_ACC32: suns.SUNS_TYPE_ACC32,
SMDX_TYPE_ENUM32: suns.SUNS_TYPE_ENUM32,
SMDX_TYPE_BITFIELD32: suns.SUNS_TYPE_BITFIELD32,
SMDX_TYPE_IPADDR: suns.SUNS_TYPE_IPADDR,
SMDX_TYPE_INT64: suns.SUNS_TYPE_INT64,
SMDX_TYPE_UINT64: suns.SUNS_TYPE_UINT64,
SMDX_TYPE_ACC64: suns.SUNS_TYPE_ACC64,
SMDX_TYPE_IPV6ADDR: suns.SUNS_TYPE_IPV6ADDR,
SMDX_TYPE_FLOAT32: suns.SUNS_TYPE_FLOAT32,
SMDX_TYPE_STRING: suns.SUNS_TYPE_STRING,
SMDX_TYPE_SUNSSF: suns.SUNS_TYPE_SUNSSF,
SMDX_TYPE_EUI48: suns.SUNS_TYPE_EUI48
}
def model_id_to_filename(model_id):
return 'smdx_%05d.xml' % (int(model_id))
def model_filename_to_id(filename):
model_id = None
if filename[0:5] == 'smdx_' and filename[-4:] == '.xml':
try:
model_id = int(filename[5:-4])
except Exception as e:
pass
return model_id
| 32.007519 | 81 | 0.737374 |
acee6ce668f5076853e6bbafe89401d7f7992c08 | 2,941 | py | Python | app/config.py | snakeoiljosh/Webster | 8fa56c009dafa8ba00fd5c2b462b2e88afe2bd97 | [
"MIT"
] | null | null | null | app/config.py | snakeoiljosh/Webster | 8fa56c009dafa8ba00fd5c2b462b2e88afe2bd97 | [
"MIT"
] | null | null | null | app/config.py | snakeoiljosh/Webster | 8fa56c009dafa8ba00fd5c2b462b2e88afe2bd97 | [
"MIT"
] | null | null | null | import secrets
from typing import Any, Dict, List, Optional, Union
from pydantic import AnyHttpUrl, BaseSettings, EmailStr, PostgresDsn, validator, ValidationError, AnyUrl
class Settings(BaseSettings):
DEBUG: bool = False
MOCK_CLASSIFIERS: bool = False
API_V1_STR: str = "/api/v1"
SECRET_KEY: str = secrets.token_urlsafe(32)
CSRF_KEY: str = secrets.token_urlsafe(32)
SESSION_COOKIE: str = 'session'
SESSION_EXPIRE_SECONDS: int = 60 * 60 * 24 * 10
SESSION_SAME_SITE: str = 'lax' # lax, strict, or none
# ACCESS_TOKEN_EXPIRE_MINUTES: int = 30 # 60 * 24 * 8
SERVER_NAME: str
SERVER_HOST: AnyHttpUrl = 'http://localhost:8000'
ALLOWED_HOSTS: List[str]
# BACKEND_CORS_ORIGINS is a JSON-formatted list of origins
# e.g: '["http://localhost", "http://localhost:4200", "http://localhost:3000", \
# "http://localhost:8080", "http://local.dockertoolbox.tiangolo.com"]'
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = ['http://localhost:8080']
@validator("BACKEND_CORS_ORIGINS", pre=True)
def assemble_cors_origins(cls, v: Union[str, List[str]]) -> Union[List[str], str]:
if isinstance(v, str) and not v.startswith("["):
return [i.strip() for i in v.split(",")]
elif isinstance(v, (list, str)):
return v
raise ValueError(v)
PROJECT_NAME: str
# TODO: Create a validator for this. Pydantic provides a PostgresDsn, but
# not other DSN types. We want to at least support sqlite here, so just
# making this a non-validated string for now.
SQLALCHEMY_DATABASE_URI: str = None
SMTP_TLS: bool = True
SMTP_PORT: Optional[int] = None
SMTP_HOST: Optional[str] = None
SMTP_USER: Optional[str] = None
SMTP_PASSWORD: Optional[str] = None
EMAILS_FROM_EMAIL: Optional[EmailStr] = None
EMAILS_FROM_NAME: Optional[str] = None
# OAUTH
OAUTH2_ACCESS_TOKEN_TIMEOUT_SECONDS: int = 30 # 300
OAUTH2_REFRESH_TOKEN_TIMEOUT_SECONDS: int = 600
@validator("EMAILS_FROM_NAME")
def get_project_name(cls, v: Optional[str], values: Dict[str, Any]) -> str:
if not v:
return values["PROJECT_NAME"]
return v
EMAIL_RESET_TOKEN_EXPIRE_HOURS: int = 48
EMAIL_TEMPLATES_DIR: str = "templates/email"
EMAILS_ENABLED: bool = False
@validator("EMAILS_ENABLED", pre=True)
def get_emails_enabled(cls, v: bool, values: Dict[str, Any]) -> bool:
return bool(
values.get("SMTP_HOST")
and values.get("SMTP_PORT")
and values.get("EMAILS_FROM_EMAIL")
)
EMAIL_TEST_USER: EmailStr = "test@example.com" # type: ignore
USERS_OPEN_REGISTRATION: bool = False
DOCSET: str = 'full' # some docs are flagged only to show in full mode
class Config:
#env_file = '.env'
case_sensitive = False # Has no effect on Windows ∴ not recommended
env_prefix = 'WEBSTER_'
settings = Settings()
| 36.7625 | 104 | 0.66712 |
acee6cf532e4e9eb39967311c4d9eda884fc6409 | 2,170 | py | Python | migration/ams_migrator.py | eclissi91/brahma | 5ef9f8e9d9d3871c21aa9832962ee9ee3800c9a9 | [
"MIT"
] | null | null | null | migration/ams_migrator.py | eclissi91/brahma | 5ef9f8e9d9d3871c21aa9832962ee9ee3800c9a9 | [
"MIT"
] | null | null | null | migration/ams_migrator.py | eclissi91/brahma | 5ef9f8e9d9d3871c21aa9832962ee9ee3800c9a9 | [
"MIT"
] | null | null | null | #
# Copyright (c) Ionplus AG and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for details.
#
from migration import mappings
from migration import queries
from migration.migrator import Migrator
class AmsMigrator(Migrator):
def __init__(self, db_session, source_schema, target_schema, isotope_number):
super().__init__(db_session, '_ams_', source_schema, target_schema)
self.isotope_number = isotope_number
def migrate_customer(self):
return self._map(mappings.customer)
def migrate_project_advisor(self):
return self._execute(self._prepare(queries.migrate_project_advisor))
def migrate_project(self):
return self._map(mappings.project)
def migrate_sample(self):
return self._map(mappings.sample, (mappings.isotope_number(self.isotope_number),))
def migrate_preparation(self):
return self._map(mappings.preparation, (mappings.isotope_number(self.isotope_number),))
def migrate_target(self):
return self._map(mappings.target, (mappings.isotope_number(self.isotope_number),))
def migrate_magazine(self):
return self._execute(self._prepare(queries.migrate_magazine))
def associate_magazine(self):
self._execute(queries.disable_target_triggers)
result = self._execute(self._prepare(queries.update_target_set_magazine))
self._execute(queries.enable_target_triggers)
return result
def migrate_measurement_sequence(self):
self._execute(queries.disable_measurement_sequence_triggers)
result = self._execute(self._prepare(queries.migrate_measurement_sequence))
self._execute(queries.enable_measurement_sequence_triggers)
return result
def migrate_calculation_set(self):
return self._map(mappings.calculation_set)
def migrate_calculation_correction(self):
return self._map(mappings.calculation_correction)
def migrate_calculation_sample(self):
return self._map(mappings.calculation_sample, (mappings.isotope_number(self.isotope_number),))
| 38.75 | 103 | 0.734562 |
acee6fc18d4119b52658d11a27ff71781ee8bb97 | 3,688 | py | Python | backend/tests/baserow/contrib/database/ws/test_ws_rows_signals.py | ericderace/baserow | 7b35e81f75166d914d07ef4ad0c30c625b6bb396 | [
"MIT"
] | 1 | 2021-04-13T16:27:58.000Z | 2021-04-13T16:27:58.000Z | backend/tests/baserow/contrib/database/ws/test_ws_rows_signals.py | jacklicn/baserow | 978d9462ededbaa96674a6653028ba19876ea273 | [
"MIT"
] | 6 | 2021-04-08T22:03:06.000Z | 2022-01-13T03:38:17.000Z | backend/tests/baserow/contrib/database/ws/test_ws_rows_signals.py | jacklicn/baserow | 978d9462ededbaa96674a6653028ba19876ea273 | [
"MIT"
] | null | null | null | import pytest
from unittest.mock import patch
from baserow.contrib.database.rows.handler import RowHandler
@pytest.mark.django_db(transaction=True)
@patch('baserow.ws.registries.broadcast_to_channel_group')
def test_row_created(mock_broadcast_to_channel_group, data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
field = data_fixture.create_text_field(table=table)
row = RowHandler().create_row(user=user, table=table, values={
f'field_{field.id}': 'Test'
})
mock_broadcast_to_channel_group.delay.assert_called_once()
args = mock_broadcast_to_channel_group.delay.call_args
assert args[0][0] == f'table-{table.id}'
assert args[0][1]['type'] == 'row_created'
assert args[0][1]['table_id'] == table.id
assert args[0][1]['row']['id'] == row.id
assert args[0][1]['before_row_id'] is None
assert args[0][1]['row'][f'field_{field.id}'] == 'Test'
row_2 = RowHandler().create_row(user=user, table=table, before=row, values={
f'field_{field.id}': 'Test2'
})
args = mock_broadcast_to_channel_group.delay.call_args
assert args[0][0] == f'table-{table.id}'
assert args[0][1]['type'] == 'row_created'
assert args[0][1]['table_id'] == table.id
assert args[0][1]['row']['id'] == row_2.id
assert args[0][1]['before_row_id'] == row.id
assert args[0][1]['row'][f'field_{field.id}'] == 'Test2'
@pytest.mark.django_db(transaction=True)
@patch('baserow.ws.registries.broadcast_to_channel_group')
def test_row_updated(mock_broadcast_to_channel_group, data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
field = data_fixture.create_text_field(table=table)
field_2 = data_fixture.create_text_field(table=table)
row = table.get_model().objects.create()
RowHandler().update_row(user=user, table=table, row_id=row.id, values={
f'field_{field.id}': 'Test'
})
mock_broadcast_to_channel_group.delay.assert_called_once()
args = mock_broadcast_to_channel_group.delay.call_args
assert args[0][0] == f'table-{table.id}'
assert args[0][1]['type'] == 'row_updated'
assert args[0][1]['table_id'] == table.id
assert args[0][1]['row']['id'] == row.id
assert args[0][1]['row'][f'field_{field.id}'] == 'Test'
assert args[0][1]['row'][f'field_{field_2.id}'] is None
row.refresh_from_db()
setattr(row, f'field_{field_2.id}', 'Second')
row.save()
RowHandler().update_row(user=user, table=table, row_id=row.id, values={
f'field_{field.id}': 'First'
})
args = mock_broadcast_to_channel_group.delay.call_args
assert args[0][0] == f'table-{table.id}'
assert args[0][1]['type'] == 'row_updated'
assert args[0][1]['table_id'] == table.id
assert args[0][1]['row']['id'] == row.id
assert args[0][1]['row'][f'field_{field.id}'] == 'First'
assert args[0][1]['row'][f'field_{field_2.id}'] == 'Second'
@pytest.mark.django_db(transaction=True)
@patch('baserow.ws.registries.broadcast_to_channel_group')
def test_row_deleted(mock_broadcast_to_channel_group, data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
row = table.get_model().objects.create()
row_id = row.id
RowHandler().delete_row(user=user, table=table, row_id=row_id)
mock_broadcast_to_channel_group.delay.assert_called_once()
args = mock_broadcast_to_channel_group.delay.call_args
assert args[0][0] == f'table-{table.id}'
assert args[0][1]['type'] == 'row_deleted'
assert args[0][1]['row_id'] == row_id
assert args[0][1]['table_id'] == table.id
| 40.527473 | 80 | 0.687364 |
acee7001e3cbe07beaf11c44d119705773ca263f | 7,827 | py | Python | metric/modeling/backbones/resnet_wider.py | jireh-father/pymetric | bb3fd85f872da7bf867cb92b0eb17ad22cc5f96e | [
"MIT"
] | 62 | 2020-08-26T11:06:37.000Z | 2022-03-29T03:26:00.000Z | metric/modeling/backbones/resnet_wider.py | ym547559398/pycls | f7c4f354f87969142263c87e1fb33499b7b2d62a | [
"MIT"
] | 2 | 2021-06-02T10:19:53.000Z | 2021-12-06T05:41:23.000Z | metric/modeling/backbones/resnet_wider.py | ym547559398/pycls | f7c4f354f87969142263c87e1fb33499b7b2d62a | [
"MIT"
] | 11 | 2020-09-14T12:26:17.000Z | 2021-10-04T06:29:35.000Z | import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
self.downsample = downsample # hack: moving downsample to the first to make order correct
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, width_mult=1):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64 * width_mult
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64 * width_mult, layers[0])
self.layer2 = self._make_layer(block, 128 * width_mult, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256 * width_mult, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512 * width_mult, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
#self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
#self.fc = nn.Linear(512 * block.expansion * width_mult, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
#x = self.avgpool(x)
#x = torch.flatten(x, 1)
#x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def resnet50x1(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], width_mult=1)
def resnet50x2(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], width_mult=2)
def resnet50x4(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], width_mult=4)
def resnet152x2(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], width_mult=2)
def resnet152x1(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], width_mult=1)
| 36.404651 | 106 | 0.599719 |
acee702c2f2cefa983c50bfafcd3617f9ca926ec | 2,608 | py | Python | LeapMotionBlender/Properties/record_properties.py | ALucatero03/Blender-Puppet-Motion | ce3beea7d681966a17aeaf6382424efaa3f05f5d | [
"MIT"
] | 21 | 2020-11-05T23:11:28.000Z | 2022-03-22T16:01:08.000Z | LeapMotionBlender/Properties/record_properties.py | ALucatero03/Blender-Puppet-Motion | ce3beea7d681966a17aeaf6382424efaa3f05f5d | [
"MIT"
] | 6 | 2019-10-08T20:55:22.000Z | 2019-12-17T07:05:22.000Z | LeapMotionBlender/Properties/record_properties.py | ALucatero03/Blender-Puppet-Motion | ce3beea7d681966a17aeaf6382424efaa3f05f5d | [
"MIT"
] | 5 | 2019-10-09T18:08:44.000Z | 2019-12-24T10:46:43.000Z | import bpy
from bpy.props import IntProperty,BoolProperty, StringProperty
from bpy.types import PropertyGroup
from ..general_helpers import RegisterMixin
from ..bone_mover import move_bones
from ..communicator import message_queue as mq
from ..communicator import clear_queue
from .leap_bone_properties import Leap2BoneProperty
class RecordProperties(RegisterMixin, PropertyGroup):
def record_toggle(self, context):
clear_queue()
props = context.scene.RecordProperties
new_verb = "Stop" if props.recording else "Start"
props.icon = "CANCEL" if props.recording else "VIEW_CAMERA"
props.start_frame = context.scene.frame_current if props.recording else 0
props.button_text = f"{new_verb} Recording"
def move_toggle(self, context):
clear_queue()
props = context.scene.RecordProperties
bpy.ops.pose.user_transforms_clear()
if not bpy.app.timers.is_registered(move_bones) and props.move_bones:
bpy.app.timers.register(move_bones, first_interval=1, persistent=False)
if not props.move_bones and bpy.app.timers.is_registered(move_bones):
bpy.app.timers.unregister(move_bones)
move_bones : BoolProperty(
name="Move Bones",
description="Should the bones be moved with incoming data?",
default = False,
update = move_toggle
)
button_text : StringProperty(
name="Name used in the operator",
default="Start Recording"
)
icon : StringProperty(
name="Icon used",
default="VIEW_CAMERA"
)
recording: BoolProperty(
name="Record",
description="Should new keyframes be inserted from the data acquired?",
default= False,
update=record_toggle
)
start_frame : IntProperty(
name="Start frame",
description="Frame offset used in the keyframe insertion",
default= 0
)
framerate : IntProperty(
name="Sample Rate",
description="How many frames per second should be sampled while moving the bone?",
default = 24,
soft_max = 60,
soft_min = 12
)
record_rate : IntProperty(
name="Record rate",
description="How many movement samples must be taken before inserting a keyframe?",
default = 12,
soft_max = 60,
soft_min = 12
)
frame_counter : IntProperty(
name="Frame counter",
description="Internal counter used to insert frames at every record_rate",
default = 1,
soft_min = 1
) | 31.804878 | 91 | 0.656058 |
acee70bc3633a206dee0512de75e22fd1978b767 | 4,555 | py | Python | Metallicity_Stack_Commons/__init__.py | jefferycline1/Metallicity_Stack_Commons | b726609692bccc623d8a7bf1dde4559caa196720 | [
"MIT"
] | null | null | null | Metallicity_Stack_Commons/__init__.py | jefferycline1/Metallicity_Stack_Commons | b726609692bccc623d8a7bf1dde4559caa196720 | [
"MIT"
] | 67 | 2019-12-23T02:23:41.000Z | 2021-06-22T23:17:50.000Z | Metallicity_Stack_Commons/__init__.py | jefferycline1/Metallicity_Stack_Commons | b726609692bccc623d8a7bf1dde4559caa196720 | [
"MIT"
] | 1 | 2020-11-30T21:47:25.000Z | 2020-11-30T21:47:25.000Z | from logging import Logger
from typing import Union
from chun_codes.cardelli import cardelli
import astropy.units as u
from datetime import date
import os
import getpass
import numpy as np
from .logging import log_stdout, log_verbose
version = "1.4.9"
lambda0 = [3726.18, 4101.73, 4340.46, 4363.21, 4861.32, 4958.91, 5006.84]
line_type = ['Oxy2', 'Balmer', 'Balmer', 'Single', 'Balmer', 'Single', 'Single']
line_name = ['OII_3727', 'HDELTA', 'HGAMMA', 'OIII_4363', 'HBETA', 'OIII_4958',
'OIII_5007']
line_name_short = {
"OII": line_name[0],
"4363": line_name[3],
"HB": line_name[4],
"OIII": line_name[-1],
"HG": line_name[2],
"HD": line_name[1]
}
fitting_lines_dict = {
"lambda0": lambda0,
"line_type": line_type,
"line_name": line_name
}
all_lambda0 = [lambda0[0]] + [3728.91] + lambda0[1:]
all_line_name = ['OII_3726', 'OII_3729'] + line_name[1:]
wavelength_dict = dict(zip(all_line_name, all_lambda0))
fitspath_dict = {
'reagenleimbach': '/Users/reagenleimbach/GoogleDrive/Research/',
'carol': 'C:/Users/carol/Google Drive/',
'cly': '/Users/cly/GoogleDrive/Research/',
'travis': '/home/travis/',
'runner': '/home/runner/'
}
scalefact = 1e-17
# Flux ratio of [OIII]5007 to [OIII]4959
OIII_r = 3.1
# Define k values for dust attenuation
k_values = cardelli(lambda0 * u.Angstrom)
k_dict = dict(zip(line_name, k_values))
def exclude_outliers(objno: Union[list, np.ndarray], verbose: bool = False,
log: Logger = log_stdout()) -> np.ndarray:
"""
Exclude spectra that are identified as outliers.
Generally this is because the spectra have very high S/N on the continuum.
:param objno: Array of eight-digit identifier
:param verbose: Write verbose message to stdout. Default: file only
:param log: logging.Logger object
:return: Array of zeros (not flagged) and ones (flagged
"""
log_verbose(log, "starting ...", verbose=verbose)
flag = np.zeros(len(objno), dtype=int)
bad_data = np.array(['32007727', '32101412', '42006031',
'32035286', '14023705'])
for ii in range(len(bad_data)):
idx = [xx for xx in range(len(objno)) if
bad_data[ii] in str(objno[xx])]
flag[idx] = 1
log_verbose(log, "finished.", verbose=verbose)
return flag
def dir_date(folder_name: str, path_init: str = '', year: bool = False,
verbose: bool = False, log: Logger = log_stdout()) \
-> str:
"""
This function finds and returns the path to a directory named after the
current date (MMDDYYYY). If the directory doesn't exist yet, it creates
a new directory named after the current date in the provided
``folder_name`` directory.
Originally from https://github.com/rafia37/Evolution-of-Galaxies/blob/master/general.py
Usage:
fitspath = dir_date(folder_name, year=True)
:param folder_name: Directory for date subdirectory will be in
:param path_init: root path. Default: empty string
:param year: Indicate whether to include year in date folder. Default: False
:param verbose: Write verbose message to stdout. Default: file only
:param log: logging.Logger object
:return: Full path to the date directory
"""
log_verbose(log, "starting ...", verbose=verbose)
today = date.today()
list_path = [path_init, folder_name,
f"{today.month:02d}{today.day:02d}", '']
if year:
list_path[-2] = f"{today.year:d}" + list_path[-2]
fitspath = os.path.join(*list_path)
try:
os.makedirs(fitspath)
except OSError:
log.warning(f"Path already exists : {fitspath}")
log_verbose(log, "finished.", verbose=verbose)
return fitspath
def get_user(username: Union[None, str] = None,
verbose: bool = False, log: Logger = log_stdout()) -> str:
"""
Get the corresponding path for a given ``username``
:param username: Optional input for username
:param verbose: Write verbose message to stdout. Default: file only
:param log: logging.Logger object
:return: Full path to the date directory
"""
log_verbose(log, "starting ...", verbose=verbose)
if username is None:
username = getpass.getuser()
if username in fitspath_dict.keys():
fitspath = fitspath_dict[username]
else:
log.warning("Incorrect username input")
raise ValueError("Incorrect username input")
log_verbose(log, "finished.", verbose=verbose)
return fitspath
| 29.967105 | 91 | 0.656422 |
acee70ea82878964543c5f9f4be9ecba8dde4c2d | 27,427 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_local_network_gateways_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_local_network_gateways_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_local_network_gateways_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LocalNetworkGatewaysOperations:
"""LocalNetworkGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.LocalNetworkGateway",
**kwargs: Any
) -> "_models.LocalNetworkGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.LocalNetworkGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.LocalNetworkGateway"]:
"""Creates or updates a local network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local network gateway operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.LocalNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs: Any
) -> "_models.LocalNetworkGateway":
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LocalNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.LocalNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
local_network_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.LocalNetworkGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
local_network_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.LocalNetworkGateway"]:
"""Updates a local network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to update local network gateway tags.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LocalNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.LocalNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LocalNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LocalNetworkGatewayListResult"]:
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocalNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.LocalNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocalNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LocalNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'} # type: ignore
| 51.651601 | 209 | 0.681919 |
acee712ab106b4fca705d27857c4fc0884c57209 | 1,658 | py | Python | recipes/Python/577155_iterexcept__useful_variant/recipe-577155.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/577155_iterexcept__useful_variant/recipe-577155.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/577155_iterexcept__useful_variant/recipe-577155.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | def iter_except(func, exception, start=None):
'Yield a function repeatedly until it raises an exception'
try:
if start is not None:
yield start()
while 1:
yield func()
except exception:
pass
### Examples ####################################################
if __name__ == '__main__':
# Example using BSDDB's last() and next() methods
import bsddb
db = bsddb.btopen('/tmp/spam.db', 'c')
for i in range(10):
db['%d'%i] = '%d'% (i*i)
for k, v in iter_except(db.next, bsddb.error, start=db.first):
print k, v
# Example of fetching tasks from a priority queue
from random import random
from heapq import heappush, heappop
from functools import partial
pq = []
for i in range(10):
heappush(pq, (random(), 'task %d' % i))
for priority, task in iter_except(partial(heappop, pq), IndexError):
print priority, task
# Example of atomic, destructive reads from a dictionary
d = dict(enumerate('abcdefghi'))
for k, v in iter_except(d.popitem, KeyError):
print k, v
# Example of atomic, destructive reads from a deque
import collections
d = collections.deque('abcdefghi')
for v in iter_except(d.popleft, IndexError):
print v
# Example of iterating over a producer Queue:
import Queue
q = Queue.Queue()
for i in range(10):
q.put('*' * i)
for v in iter_except(q.get_nowait, Queue.Empty):
print v
# Example of iterating destructively over a set
s = set('abracadabra')
for elem in iter_except(s.pop, KeyError):
print elem
| 26.741935 | 72 | 0.598311 |
acee7209cd35d08f8bee4bd3410a13761decf251 | 160 | py | Python | project/university/urls.py | minaee/cd557 | 4db87e1412709d1dff70a5d10642814e35a251f1 | [
"MIT"
] | null | null | null | project/university/urls.py | minaee/cd557 | 4db87e1412709d1dff70a5d10642814e35a251f1 | [
"MIT"
] | null | null | null | project/university/urls.py | minaee/cd557 | 4db87e1412709d1dff70a5d10642814e35a251f1 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.uni_index, name='uni_index'),
] | 16 | 48 | 0.68125 |
acee7218430df9bac1dc8fb89536fb811b473c88 | 627 | py | Python | 05_communication_thread/decoratorSet.py | 837477/Python_Parallel | 7590ff38e6a2b1dbec139e9571b6564b5bbf276f | [
"MIT"
] | 67 | 2017-08-27T18:00:46.000Z | 2022-03-01T11:54:16.000Z | 05_communication_thread/decoratorSet.py | 837477/python_parallel_programming_study | 2294f4a3de9e9f5b16b6772a3a8d96c37eb63c0f | [
"MIT"
] | 8 | 2018-10-18T17:15:45.000Z | 2021-12-24T01:58:05.000Z | 05_communication_thread/decoratorSet.py | 837477/python_parallel_programming_study | 2294f4a3de9e9f5b16b6772a3a8d96c37eb63c0f | [
"MIT"
] | 49 | 2017-08-18T12:53:30.000Z | 2022-02-06T09:40:50.000Z | def locked_method(method):
"""Method decorator. Requires a lock object at self._lock"""
def newmethod(self, *args, **kwargs):
with self._lock:
return method(self, *args, **kwargs)
return newmethod
class DecoratorLockedSet(set):
def __init__(self, *args, **kwargs):
self._lock = Lock()
super(DecoratorLockedSet, self).__init__(*args, **kwargs)
@locked_method
def add(self, *args, **kwargs):
return super(DecoratorLockedSet, self).add(elem)
@locked_method
def remove(self, *args, **kwargs):
return super(DecoratorLockedSet, self).remove(elem) | 33 | 65 | 0.650718 |
acee721fac1c4d191d4fdc62607543c2de88ea4b | 3,510 | py | Python | creat_seg_map.py | zhonghao-hub/DeepLabv3- | e2d1f8e34a03217eb90c359daf2711bb8cee2ee9 | [
"MIT"
] | null | null | null | creat_seg_map.py | zhonghao-hub/DeepLabv3- | e2d1f8e34a03217eb90c359daf2711bb8cee2ee9 | [
"MIT"
] | null | null | null | creat_seg_map.py | zhonghao-hub/DeepLabv3- | e2d1f8e34a03217eb90c359daf2711bb8cee2ee9 | [
"MIT"
] | null | null | null | import json
import os.path as p
import pandas as pd
import numpy as np
import cv2
from PIL import Image
from tqdm.notebook import tqdm
import os
import pycocotools
def decode_segmap(label_mask, plot=False):
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, 21):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r
rgb[:, :, 1] = g
rgb[:, :, 2] = b
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
label_colours = np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]])
Map= [0, 15, 15, 2, 7, 5, 6, 11, 8, 9, 10, 0]
### Examine COCO format to see how it is done
## Conversion starts here
# set = 'train'
set = 'val'
VISDRONE_ANNOT_DIR = p.join('Visdrone/VisDrone2019-DET-train/annotations_origins')
VISDRONE_IM_DIR = p.join('Visdrone/VisDrone2019-DET-train/images')
# %%
OUTPUT_annot_black = p.join('Visdrone/VisDrone2019-DET-train/annotations_created')
OUTPUT_annot_color = p.join('Visdrone/VisDrone2019-DET-train/annotations_map')
# %%
#### Verify splitting func and CSV reading
# bla = '9999999_00877_d_0000402'.split('_')
# int(bla[0] + bla[1] + bla[3])
#
# csv = pd.read_csv(os.path.join(VISDRONE_ANNOT_DIR, '0000277_04401_d_0000560.txt'),
# names=['x', 'y', 'w', 'h', 'score', 'category', 'truncation', 'occlusion'])
#
# im = Image.open(p.join(VISDRONE_IM_DIR, '0000277_04401_d_0000560.jpg'))
# im_size = im.size
# im.close()
# img = np.zeros((im_size[1], im_size[0]))
# print(im_size[1], im_size[0])
# for r in csv.itertuples():
# print(r.y, r.x, r.h, r.w, r.y+r.h, r.x+r.w, r.category)
# img[r.y : r.y+r.h, r.x : r.x+r.w]=Map[r.category]
#
# cv2.imwrite(os.path.join(OUTPUT_annot_black, '0000277_04401_d_0000560.jpg'), img)
#
# i = cv2.imread(os.path.join(OUTPUT_annot_black, '0000277_04401_d_0000560.jpg'), cv2.IMREAD_GRAYSCALE)
#
# ou = decode_segmap(i, plot=False)
# cv2.imwrite(os.path.join(OUTPUT_annot_color, '0000277_04401_d_0000560.jpg'), ou)
## Convert!
# Iterate all existing CSV annotations
for root, dirs, files in os.walk(VISDRONE_ANNOT_DIR):
for f in tqdm(files):
(r, e) = p.splitext(f)
if e != '.txt':
continue
# Read VisDrone source annotation
print(f)
csv = pd.read_csv(p.join(root, f), names=['x', 'y', 'w', 'h', 'score', 'category', 'truncation', 'occlusion'])
# Get image size
im = Image.open(p.join(VISDRONE_IM_DIR, r + '.jpg'))
im_size = im.size
im.close()
img = np.zeros((im_size[1], im_size[0]))
for k in csv.itertuples():
img[k.y: k.y + k.h, k.x: k.x + k.w] = Map[k.category]
cv2.imwrite(os.path.join(OUTPUT_annot_black, r + '.jpg'), img)
i = cv2.imread(os.path.join(OUTPUT_annot_black, r + '.jpg'), cv2.IMREAD_GRAYSCALE)
ou = decode_segmap(i, plot=False)
cv2.imwrite(os.path.join(OUTPUT_annot_color, r + '.jpg'), ou)
| 28.770492 | 118 | 0.589744 |
acee72ebb1e20503fdc07f21f1046ac995953aaf | 2,147 | py | Python | tools/gyp/pylib/gyp/generator/dump_dependency_json.py | racker/node | 7338d9b66ee8b15aeb38d0bb3f03861f2458b10b | [
"BSD-2-Clause"
] | 140 | 2017-05-17T03:01:59.000Z | 2022-03-08T08:06:04.000Z | jni/v8core/v8/build/gyp/pylib/gyp/generator/dump_dependency_json.py | shangsony/weex_js_engine | 2bdf4b6f020c1fc99c63f649718f6faf7e27fdde | [
"Apache-2.0"
] | 72 | 2015-02-05T11:42:13.000Z | 2015-12-09T22:18:41.000Z | jni/v8core/v8/build/gyp/pylib/gyp/generator/dump_dependency_json.py | shangsony/weex_js_engine | 2bdf4b6f020c1fc99c63f649718f6faf7e27fdde | [
"Apache-2.0"
] | 48 | 2017-05-15T05:45:00.000Z | 2022-01-23T02:50:47.000Z | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import gyp
import gyp.common
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
| 32.044776 | 77 | 0.725198 |
acee737ad9665dfea5235ec2216921af6c076357 | 28,158 | py | Python | verification/flopy/mt3d/mtuzt.py | INTERA-Inc/mf6cts | 13967af777e88b112b1a9026b35841c322d34bf4 | [
"Unlicense"
] | 2 | 2022-02-23T00:05:14.000Z | 2022-03-14T18:42:24.000Z | verification/flopy/mt3d/mtuzt.py | INTERA-Inc/mf6cts | 13967af777e88b112b1a9026b35841c322d34bf4 | [
"Unlicense"
] | null | null | null | verification/flopy/mt3d/mtuzt.py | INTERA-Inc/mf6cts | 13967af777e88b112b1a9026b35841c322d34bf4 | [
"Unlicense"
] | null | null | null | __author__ = "emorway"
import numpy as np
from ..pakbase import Package
from ..utils import Util2d, Util3d, Transient2d
class Mt3dUzt(Package):
"""
MT3D-USGS Unsaturated-Zone Transport package class
Parameters
----------
model : model object
The model object (of type :class:`flopy.mt3dms.mt.Mt3dms`) to which
this package will be added.
icbcuz : int
Is the unit number to which unsaturated-zone concentration will be
written out.
iet : int
Is a flag that indicates whether or not ET is being simulated in the
UZF1 flow package (=0 indicates that ET is not being simulated).
If ET is not being simulated, IET informs FMI package not to look
for UZET and GWET arrays in the flow-transport link file.
iuzfbnd : array of ints
Specifies which row/column indices variably-saturated transport will
be simulated in.
>0 indicates variably-saturated transport will be simulated;
=0 indicates variably-saturated transport will not be simulated;
<0 Corresponds to IUZFBND < 0 in the UZF1 input package, meaning
that user-supplied values for FINF are specified recharge and
therefore transport through the unsaturated zone is not
simulated.
incuzinf : int
(This value is repeated for each stress period as explained next) A
flag indicating whether an array containing the concentration of
infiltrating water (FINF) for each simulated species (ncomp) will be
read for the current stress period. If INCUZINF >= 0, an array
containing the concentration of infiltrating flux for each species
will be read. If INCUZINF < 0, the concentration of infiltrating flux
will be reused from the previous stress period. If INCUZINF < 0 is
specified for the first stress period, then by default the
concentration of positive infiltrating flux (source) is set equal to
zero. There is no possibility of a negative infiltration flux being
specified. If infiltrating water is rejected due to an infiltration
rate exceeding the vertical hydraulic conductivity, or because
saturation is reached in the unsaturated zone and the water table is
therefore at land surface, the concentration of the runoff will be
equal to CUZINF specified next. The runoff is routed if IRNBND is
specified in the MODFLOW simulation.
cuzinf : array of floats
Is the concentration of the infiltrating flux for a particular species.
An array for each species will be read.
incuzet : int
(This value is repeated for each stress period as explained next) A
flag indicating whether an array containing the concentration of
evapotranspiration flux originating from the unsaturated zone will be
read for the current stress period. If INCUZET >= 0, an array
containing the concentration of evapotranspiration flux originating
from the unsaturated zone for each species will be read. If
INCUZET < 0, the concentration of evapotranspiration flux for each
species will be reused from the last stress period. If INCUZET < 0
is specified for the first stress period, then by default, the
concentration of negative evapotranspiration flux (sink) is set
equal to the aquifer concentration, while the concentration of
positive evapotranspiration flux (source) is set to zero.
cuzet : array of floats
Is the concentration of ET fluxes originating from the unsaturated
zone. As a default, this array is set equal to 0 and only overridden
if the user specifies INCUZET > 1. If empirical evidence suggest
volatilization of simulated constituents from the unsaturated zone,
this may be one mechanism for simulating this process, though it would
depend on the amount of simulated ET originating from the unsaturated
zone. An array for each species will be read.
incgwet : int
(This value is repeated for each stress period as explained next) Is
a flag indicating whether an array containing the concentration of
evapotranspiration flux originating from the saturated zone will be
read for the current stress period. If INCGWET >= 0, an array
containing the concentration of evapotranspiration flux originating
from the saturated zone for each species will be read. If
INCGWET < 0, the concentration of evapotranspiration flux for each
species will be reused from the last stress period. If INCUZET < 0
is specified for the first stress period, then by default, the
concentration of negative evapotranspiration flux (sink) is set to
the aquifer concentration, while the concentration of positive
evapotranspiration flux (source) is set to zero.
cgwet : array of floats
Is the concentration of ET fluxes originating from the saturated zone.
As a default, this array is set equal to 0 and only overridden if the
user specifies INCUZET > 1. An array for each species will be read.
extension : string
Filename extension (default is 'uzt')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the uzf output name will be created using
the model name and uzf concentration observation extension
(for example, modflowtest.cbc and modflowtest.uzcobs.out), if icbcuz
is a number greater than zero. If a single string is passed the
package will be set to the string and uzf concentration observation
output name will be created using the model name and .uzcobs.out
extension, if icbcuz is a number greater than zero. To define the
names for all package files (input and output) the length of the list
of strings should be 2. Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
Examples
--------
>>> import flopy
>>> datadir = 'examples/data/mt3d_test/mfnwt_mt3dusgs/keat_uzf'
>>> mt = flopy.mt3d.Mt3dms.load(
... 'Keat_UZF_mt.nam', exe_name='mt3d-usgs_1.0.00.exe',
... model_ws=datadir, load_only='btn')
>>> uzt = flopy.mt3d.Mt3dUzt('Keat_UZF.uzt', mt)
"""
def __init__(
self,
model,
icbcuz=None,
iet=0,
iuzfbnd=None,
cuzinf=None,
cuzet=None,
cgwet=None,
extension="uzt",
unitnumber=None,
filenames=None,
**kwargs,
):
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = Mt3dUzt._defaultunit()
elif unitnumber == 0:
unitnumber = Mt3dUzt._reservedunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
for idx in range(len(filenames), 2):
filenames.append(None)
if icbcuz is not None:
fname = filenames[1]
extension = "uzcobs.out"
model.add_output_file(
icbcuz,
fname=fname,
extension=extension,
binflag=False,
package=Mt3dUzt._ftype(),
)
else:
icbcuz = 0
# Fill namefile items
name = [Mt3dUzt._ftype()]
units = [unitnumber]
extra = [""]
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(
self,
model,
extension=extension,
name=name,
unit_number=units,
extra=extra,
filenames=fname,
)
# Set dimensions
nrow = model.nrow
ncol = model.ncol
nlay = model.nlay
ncomp = model.ncomp
mcomp = model.mcomp
# Set package specific parameters
self.heading1 = "# UZT for MT3D-USGS, generated by Flopy"
self.icbcuz = icbcuz
self.iet = iet
if iuzfbnd is not None:
self.iuzfbnd = Util2d(
self.parent,
(nrow, ncol),
np.int32,
iuzfbnd,
name="iuzfbnd",
locat=self.unit_number[0],
)
# set iuzfbnd based on UZF input file
else:
arr = np.zeros((nrow, ncol), dtype=np.int32)
self.iuzfbnd = Util2d(
self.parent,
(nrow, ncol),
np.int32,
arr,
name="iuzfbnd",
locat=self.unit_number[0],
)
# Note: list is used for multi-species, NOT for stress periods!
if cuzinf is not None:
self.cuzinf = []
t2d = Transient2d(
model,
(nrow, ncol),
np.float32,
cuzinf,
name="cuzinf1",
locat=self.unit_number[0],
)
self.cuzinf.append(t2d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
val = 0.0
name = f"cuzinf{icomp}"
if name in list(kwargs.keys()):
val = kwargs.pop(name)
else:
print(
"UZT: setting cuzinf for component "
f"{icomp} to zero. kwarg name {name}"
)
t2d = Transient2d(
model,
(nrow, ncol),
np.float32,
val,
name=name,
locat=self.unit_number[0],
)
self.cuzinf.append(t2d)
if cuzet is not None:
self.cuzet = []
t2d = Transient2d(
model,
(nrow, ncol),
np.float32,
cuzet,
name="cuzet1",
locat=self.unit_number[0],
)
self.cuzet.append(t2d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
val = 0.0
name = f"cuzet{icomp}"
if name in list(kwargs.keys()):
val = kwargs.pop(name)
else:
print(
"UZT: setting cuzet for component "
f"{icomp} to zero. kwarg name {name}"
)
t2d = Transient2d(
model,
(nrow, ncol),
np.float32,
val,
name=name,
locat=self.unit_number[0],
)
self.cuzet.append(t2d)
if cgwet is not None:
self.cgwet = []
t2d = Transient2d(
model,
(nrow, ncol),
np.float32,
cgwet,
name="cgwet1",
locat=self.unit_number[0],
)
self.cgwet.append(t2d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
val = 0.0
name = f"cgwet{icomp}"
if name in list(kwargs.keys()):
val = kwargs.pop(name)
else:
print(
"UZT: setting cgwet for component "
f"{icomp} to zero. kwarg name {name}"
)
t2d = Transient2d(
model,
(nrow, ncol),
np.float32,
val,
name=name,
locat=self.unit_number[0],
)
self.cgwet.append(t2d)
self.parent.add_package(self)
return
def write_file(self):
"""
Write the package file
Returns
-------
None
"""
# Open file for writing
f_uzt = open(self.fn_path, "w")
# Write header
f_uzt.write(f"#{self.heading1}\n")
# Item 2
f_uzt.write(
f"{self.icbcuz:10d}{self.iet:10d} #ICBCUZ, IET\n"
)
# Item 3
f_uzt.write(self.iuzfbnd.get_file_entry())
# Items 4-9
# (Loop through each stress period and write uzt information)
nper = self.parent.nper
for kper in range(nper):
if f_uzt.closed == True:
f_uzt = open(f_uzt.name, "a")
# Concentrations associated with distributed stresses (Infil, ET)
if self.cuzinf is not None:
# If any species needs to be written, then all need to be
# written
incuzinf = -1
for t2d in self.cuzinf:
incuzinficomp, file_entry = t2d.get_kper_entry(kper)
incuzinf = max(incuzinf, incuzinficomp)
if incuzinf == 1:
break
f_uzt.write(
f"{incuzinf:10d} # INCUZINF - SP {kper + 1:5d}\n"
)
if incuzinf == 1:
for t2d in self.cuzinf:
u2d = t2d[kper]
file_entry = u2d.get_file_entry()
f_uzt.write(file_entry)
if self.iet != 0:
if self.cuzet is not None:
# If any species needs to be written, then all need to be
# written
incuzet = -1
for t2d in self.cuzet:
incuzeticomp, file_entry = t2d.get_kper_entry(kper)
incuzet = max(incuzet, incuzeticomp)
if incuzet == 1:
break
f_uzt.write(
f"{incuzet:10d} # INCUZET - SP {kper + 1:5d}\n"
)
if incuzet == 1:
for t2d in self.cuzet:
u2d = t2d[kper]
file_entry = u2d.get_file_entry()
f_uzt.write(file_entry)
if self.cgwet is not None:
# If any species needs to be written, then all need to be
# written
incgwet = -1
for t2d in self.cgwet:
incgweticomp, file_entry = t2d.get_kper_entry(kper)
incgwet = max(incgwet, incgweticomp)
if incgwet == 1:
break
f_uzt.write(
f"{incgwet:10d} # INCGWET - SP {kper + 1:5d}\n"
)
if incgwet == 1:
for t2d in self.cgwet:
u2d = t2d[kper]
file_entry = u2d.get_file_entry()
f_uzt.write(file_entry)
f_uzt.write("\n")
f_uzt.close()
return
@classmethod
def load(
cls,
f,
model,
nlay=None,
nrow=None,
ncol=None,
nper=None,
ncomp=None,
ext_unit_dict=None,
):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
uzt : Mt3dSsm object
Mt3dUzt object.
Examples
--------
>>> import flopy
>>> mt = flopy.mt3d.Mt3dms()
>>> uzt = flopy.mt3d.Mt3dUzt.load('test.uzt', mt)
"""
if model.verbose:
print("loading uzt package file...\n")
# Open file if necessary
openfile = not hasattr(f, "read")
if openfile:
filename = f
f = open(filename, "r")
# Set dimensions if necessary
if nlay is None:
nlay = model.nlay
if nrow is None:
nrow = model.nrow
if ncol is None:
ncol = model.ncol
if nper is None:
nper = model.nper
if ncomp is None:
ncomp = model.ncomp
# Item 1 (comments, must be preceded by '#')
if model.verbose:
print(" Reading off comment lines...")
line = f.readline()
while line[0:1] == "#":
i = 1
if model.verbose:
print(f" Comment Line {i}: {line.strip()}")
i += 1
line = f.readline()
# Item 2 (ICBCUZ, IET)
if line[0:1] != "#":
# Don't yet read the next line because the current line
# contains the values in item 2
m_arr = line.strip().split()
icbcuz = int(m_arr[0])
iet = int(m_arr[1])
# Item 3 [IUZFBND(NROW,NCOL) (one array for each layer)]
if model.verbose:
print(" loading IUZFBND...")
iuzfbnd = Util2d.load(
f, model, (nrow, ncol), np.int32, "iuzfbnd", ext_unit_dict
)
# kwargs needed to construct cuzinf2, cuzinf3, etc. for multispecies
kwargs = {}
cuzinf = None
# At least one species being simulated, so set up a place holder
t2d = Transient2d(
model, (nrow, ncol), np.float32, 0.0, name="cuzinf", locat=0
)
cuzinf = {0: t2d}
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = f"cuzinf{icomp}"
t2d = Transient2d(
model, (nrow, ncol), np.float32, 0.0, name=name, locat=0
)
kwargs[name] = {0: t2d}
# Repeat cuzinf initialization procedure for cuzet only if iet != 0
if iet != 0:
cuzet = None
t2d = Transient2d(
model, (nrow, ncol), np.float32, 0.0, name="cuzet", locat=0
)
cuzet = {0: t2d}
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = f"cuzet{icomp}"
t2d = Transient2d(
model,
(nrow, ncol),
np.float32,
0.0,
name=name,
locat=0,
)
kwargs[name] = {0: t2d}
# Repeat cuzinf initialization procedures for cgwet
cgwet = None
t2d = Transient2d(
model, (nrow, ncol), np.float32, 0.0, name="cgwet", locat=0
)
cgwet = {0: t2d}
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = f"cgwet{icomp}"
t2d = Transient2d(
model,
(nrow, ncol),
np.float32,
0.0,
name=name,
locat=0,
)
kwargs[name] = {0: t2d}
elif iet == 0:
cuzet = None
cgwet = None
# Start of transient data
for iper in range(nper):
if model.verbose:
print(f" loading UZT data for kper {iper + 1:5d}")
# Item 4 (INCUZINF)
line = f.readline()
m_arr = line.strip().split()
incuzinf = int(m_arr[0])
# Item 5 (CUZINF)
if incuzinf >= 0:
if model.verbose:
print(f" Reading CUZINF array for kper {iper + 1:5d}")
t = Util2d.load(
f, model, (nrow, ncol), np.float32, "cuzinf", ext_unit_dict
)
cuzinf[iper] = t
# Load each multispecies array
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = f"cuzinf{icomp}"
if model.verbose:
print(f" loading {name}...")
t = Util2d.load(
f,
model,
(nrow, ncol),
np.float32,
name,
ext_unit_dict,
)
cuzinficomp = kwargs[name]
cuzinficomp[iper] = t
elif incuzinf < 0 and iper == 0:
if model.verbose:
print(
" INCUZINF < 0 in first stress period. Setting "
"CUZINF to default value of 0.00 for all calls"
)
# This happens implicitly and is taken care of my
# existing functionality within flopy. This elif
# statement exist for the purpose of printing the message
# above
pass
elif incuzinf < 0 and iper > 0:
if model.verbose:
print(
f" Reusing CUZINF array from kper {iper:5d}"
f" in kper {iper + 1:5d}"
)
if iet != 0:
# Item 6 (INCUZET)
line = f.readline()
m_arr = line.strip().split()
incuzet = int(m_arr[0])
# Item 7 (CUZET)
if incuzet >= 0:
if model.verbose:
print(f" Reading CUZET array for kper {iper + 1:5d}")
t = Util2d.load(
f,
model,
(nrow, ncol),
np.float32,
"cuzet",
ext_unit_dict,
)
cuzet[iper] = t
# Load each multispecies array
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = f"cuzet{icomp}"
if model.verbose:
print(f" loading {name}")
t = Util2d.load(
f,
model,
(nrow, ncol),
np.float32,
name,
ext_unit_dict,
)
cuzeticomp = kwargs[name]
cuzeticomp[iper] = t
elif incuzet < 0 and iper == 0:
if model.verbose:
print(
" INCUZET < 0 in first stress period. Setting "
"CUZET to default value of 0.00 for all calls"
)
# This happens implicitly and is taken care of my
# existing functionality within flopy. This elif
# statement exist for the purpose of printing the message
# above
pass
else:
if model.verbose:
print(
f" Reusing CUZET array from kper {iper:5d}"
f" in kper {iper + 1:5d}"
)
# Item 8 (INCGWET)
line = f.readline()
m_arr = line.strip().split()
incgwet = int(m_arr[0])
# Item 9 (CGWET)
if model.verbose:
if incuzet >= 0:
print(f" Reading CGWET array for kper {iper + 1:5d}")
t = Util2d.load(
f,
model,
(nrow, ncol),
np.float32,
"cgwet",
ext_unit_dict,
)
cgwet[iper] = t
# Load each multispecies array
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = f"cgwet{icomp}"
if model.verbose:
print(f" loading {name}...")
t = Util2d.load(
f,
model,
(nrow, ncol),
np.float32,
name,
ext_unit_dict,
)
cgweticomp = kwargs[name]
cgweticomp[iper] = t
elif incuzet < 0 and iper == 0:
if model.verbose:
print(
" INCGWET < 0 in first stress period. Setting "
"CGWET to default value of 0.00 for all calls"
)
# This happens implicitly and is taken care of my
# existing functionality within flopy. This elif
# statement exist for the purpose of printing the
# message above
pass
elif incgwet < 0 and iper > 0:
if model.verbose:
print(
f" Reusing CGWET array from kper {iper:5d}"
f" in kper {iper + 1:5d}"
)
if openfile:
f.close()
unitnumber = None
filenames = [None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = model.get_ext_dict_attr(
ext_unit_dict, filetype=Mt3dUzt._ftype()
)
if icbcuz > 0:
iu, filenames[1] = model.get_ext_dict_attr(
ext_unit_dict, unit=icbcuz
)
model.add_pop_key_list(icbcuz)
# Construct and return uzt package
return cls(
model,
icbcuz=icbcuz,
iet=iet,
iuzfbnd=iuzfbnd,
cuzinf=cuzinf,
cuzet=cuzet,
cgwet=cgwet,
unitnumber=unitnumber,
filenames=filenames,
**kwargs,
)
@staticmethod
def _ftype():
return "UZT2"
@staticmethod
def _defaultunit():
return 7
@staticmethod
def _reservedunit():
return 7
| 36.1 | 82 | 0.465765 |
acee74853caaa5c6073ae28b866cce725422467e | 472 | py | Python | utils/utils.py | atlashugs/dqn_pytorch | 131295448e9a993fd5e0a3a509bd76204f644396 | [
"MIT"
] | null | null | null | utils/utils.py | atlashugs/dqn_pytorch | 131295448e9a993fd5e0a3a509bd76204f644396 | [
"MIT"
] | null | null | null | utils/utils.py | atlashugs/dqn_pytorch | 131295448e9a993fd5e0a3a509bd76204f644396 | [
"MIT"
] | null | null | null | import os
import shutil
import numpy as np
import torch as t
def set_requires_grad(network, grad):
for param in network.parameters():
param.requires_grad = grad
def create_exp_logfile(logdir_path):
if not os.path.exists(logdir_path):
os.makedirs(logdir_path)
else:
shutil.rmtree(logdir_path)
os.makedirs(logdir_path)
f_path = logdir_path + '/results.csv'
return open(f_path, 'w+')
def performance_avg(episodes, k):
return np.mean(episodes[-k:]) | 21.454545 | 38 | 0.741525 |
acee749c0bb45488a667761943e73910896ee32c | 2,325 | py | Python | tests/test_parallel.py | appsplash99/sciris | bf09f18d53f7db17d92d1ef4792e3f5aa719cb99 | [
"MIT"
] | null | null | null | tests/test_parallel.py | appsplash99/sciris | bf09f18d53f7db17d92d1ef4792e3f5aa719cb99 | [
"MIT"
] | null | null | null | tests/test_parallel.py | appsplash99/sciris | bf09f18d53f7db17d92d1ef4792e3f5aa719cb99 | [
"MIT"
] | null | null | null | '''
Test parallelization. Not written as pytest tests because it conflicts with pytest's
built-in parallelization, and since functions-within-functions can't be pickled.
'''
import sciris as sc
import pylab as pl
torun = [
'simple',
'embarrassing',
'multiargs',
'noniterated',
'parallelcmd'
]
if 'doplot' not in locals(): doplot = True
if __name__ == '__main__':
#Example 1 -- simple usage as a shortcut to multiprocessing.map():
if 'simple' in torun:
def f(x):
return x*x
results = sc.parallelize(f, [1,2,3])
print(results)
#Example 2 -- simple usage for "embarrassingly parallel" processing:
if 'embarrassing' in torun:
def rnd():
import pylab as pl
return pl.rand()
results = sc.parallelize(rnd, 10)
print(results)
#Example 3 -- using multiple arguments:
if 'multiargs' in torun:
def f(x,y):
return x*y
results1 = sc.parallelize(func=f, iterarg=[(1,2),(2,3),(3,4)])
results2 = sc.parallelize(func=f, iterkwargs={'x':[1,2,3], 'y':[2,3,4]})
results3 = sc.parallelize(func=f, iterkwargs=[{'x':1, 'y':2}, {'x':2, 'y':3}, {'x':3, 'y':4}])
assert results1 == results2 == results3
print(results1)
#Example 4 -- using non-iterated arguments and dynamic load balancing:
if 'noniterated' in torun:
def myfunc(i, x, y):
xy = [x+i*pl.randn(100), y+i*pl.randn(100)]
return xy
xylist1 = sc.parallelize(myfunc, kwargs={'x':3, 'y':8}, iterarg=range(5), maxload=0.8, interval=0.2) # Use kwargs dict
xylist2 = sc.parallelize(myfunc, x=5, y=10, iterarg=[5,10,15]) # Supply kwargs directly
if doplot:
for p,xylist in enumerate([xylist1, xylist2]):
pl.subplot(2,1,p+1)
for i,xy in enumerate(reversed(xylist)):
pl.scatter(xy[0], xy[1], label='Run %i'%i)
pl.legend()
if 'parallelcmd' in torun:
const = 4
parfor = {'val':[3,5,9]}
returnval = 'result'
cmd = """
newval = val+const # Note that this can't be indented
result = newval**2
"""
results = sc.parallelcmd(cmd=cmd, parfor=parfor, returnval=returnval, const=const, maxload=0)
print(results)
| 26.420455 | 126 | 0.578925 |
acee7522c2a6d7b363adfbf537519a60ea381549 | 759 | py | Python | profiles_api/permissions.py | ProtoArkim/profiles-rest-api | 01b427856a7076fc8c77da2784eae0a0303e1896 | [
"MIT"
] | null | null | null | profiles_api/permissions.py | ProtoArkim/profiles-rest-api | 01b427856a7076fc8c77da2784eae0a0303e1896 | [
"MIT"
] | null | null | null | profiles_api/permissions.py | ProtoArkim/profiles-rest-api | 01b427856a7076fc8c77da2784eae0a0303e1896 | [
"MIT"
] | null | null | null | from rest_framework import permissions
class updateOwnProfile(permissions.BasePermission):
"""Allow user to edit their own profile"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to edit their own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
"""Allow users to update their own status"""
def has_object_permission(self, request, view, obj):
"""Check the user is tying to update their own status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
| 33 | 64 | 0.673254 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.