hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
906abee93fd7e753a2591443bd28f75813d6c5f4 | 1,239 | py | Python | 0/spiral_matrix2.py | IronCore864/leetcode | a62a4cdde9814ae48997176debcaad537f7ad01f | [
"Apache-2.0"
] | 4 | 2018-03-07T02:56:03.000Z | 2021-06-15T05:43:31.000Z | 0/spiral_matrix2.py | IronCore864/leetcode | a62a4cdde9814ae48997176debcaad537f7ad01f | [
"Apache-2.0"
] | null | null | null | 0/spiral_matrix2.py | IronCore864/leetcode | a62a4cdde9814ae48997176debcaad537f7ad01f | [
"Apache-2.0"
] | 1 | 2021-09-02T12:05:15.000Z | 2021-09-02T12:05:15.000Z | class Solution(object):
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
res = [[0 for _ in range(n)] for _ in range(n)]
i, j = 0, 0
di, dj = 0, 1
for num in range(1, n * n + 1):
res[i][j] = num
if di == 0 and dj == 1 and (j == n - 1 or res[i + di][j + dj] != 0):
di, dj = 1, 0
elif di == 1 and dj == 0 and (i == n - 1 or res[i + di][j + dj] != 0):
di, dj = 0, -1
elif di == 0 and dj == -1 and (i == 0 or res[i + di][j + dj] != 0):
di, dj = -1, 0
elif di == -1 and dj == 0 and (i == 0 or res[i + di][j + dj] != 0):
di, dj = 0, 1
i, j = i + di, j + dj
return res
def printMatrix(m):
for row in m:
print row
s = Solution()
printMatrix(s.generateMatrix(0))
printMatrix(s.generateMatrix(1))
printMatrix(s.generateMatrix(2))
printMatrix(s.generateMatrix(3))
printMatrix(s.generateMatrix(4))
printMatrix(s.generateMatrix(5))
printMatrix(s.generateMatrix(6))
printMatrix(s.generateMatrix(7))
printMatrix(s.generateMatrix(8))
printMatrix(s.generateMatrix(9))
printMatrix(s.generateMatrix(10))
| 29.5 | 82 | 0.50686 |
ab00e82e6d65d6ed6998c68f04b3983bcc3154c1 | 17,420 | py | Python | src/bindings/python/src/openvino/runtime/ie_api.py | si-eun-kim/openvino | 1db4446e2a6ead55d066e0b4e718fa37f509353a | [
"Apache-2.0"
] | 2 | 2021-12-14T15:27:46.000Z | 2021-12-14T15:34:16.000Z | src/bindings/python/src/openvino/runtime/ie_api.py | si-eun-kim/openvino | 1db4446e2a6ead55d066e0b4e718fa37f509353a | [
"Apache-2.0"
] | 33 | 2021-09-23T04:14:30.000Z | 2022-01-24T13:21:32.000Z | src/bindings/python/src/openvino/runtime/ie_api.py | si-eun-kim/openvino | 1db4446e2a6ead55d066e0b4e718fa37f509353a | [
"Apache-2.0"
] | 11 | 2021-11-09T00:51:40.000Z | 2021-11-10T12:04:16.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from functools import singledispatch
from typing import Any, Union, Dict
import numpy as np
from openvino.pyopenvino import Model
from openvino.pyopenvino import Core as CoreBase
from openvino.pyopenvino import CompiledModel as CompiledModelBase
from openvino.pyopenvino import InferRequest as InferRequestBase
from openvino.pyopenvino import AsyncInferQueue as AsyncInferQueueBase
from openvino.pyopenvino import ConstOutput
from openvino.pyopenvino import Tensor
from openvino.pyopenvino import Type
from openvino.pyopenvino import Shape
def tensor_from_file(path: str) -> Tensor:
"""Create Tensor from file. Data will be read with dtype of unit8."""
return Tensor(np.fromfile(path, dtype=np.uint8))
def set_scalar_tensor(
request: InferRequestBase, tensor: Tensor, key: Union[str, int, ConstOutput] = None
) -> None:
if key is None:
request.set_input_tensor(tensor)
elif isinstance(key, int):
request.set_input_tensor(key, tensor)
elif isinstance(key, (str, ConstOutput)):
request.set_tensor(key, tensor)
else:
raise TypeError(
"Unsupported key type: {} for Tensor under key: {}".format(type(key), key)
)
@singledispatch
def update_tensor(
inputs: Union[np.ndarray, np.number, int, float],
request: InferRequestBase,
key: Union[str, int, ConstOutput] = None,
) -> None:
raise TypeError(
"Incompatible input data of type {} under {} key!".format(type(inputs), key)
)
@update_tensor.register(np.ndarray)
def _(
inputs: np.ndarray,
request: InferRequestBase,
key: Union[str, int, ConstOutput] = None,
) -> None:
# If shape is "empty", assume this is a scalar value
if not inputs.shape:
set_scalar_tensor(request, Tensor(inputs), key)
else:
if key is None:
tensor = request.get_input_tensor()
elif isinstance(key, int):
tensor = request.get_input_tensor(key)
elif isinstance(key, (str, ConstOutput)):
tensor = request.get_tensor(key)
else:
raise TypeError(
"Unsupported key type: {} for Tensor under key: {}".format(
type(key), key
)
)
# Update shape if there is a mismatch
if tensor.shape != inputs.shape:
tensor.shape = inputs.shape
# When copying, type should be up/down-casted automatically.
tensor.data[:] = inputs[:]
@update_tensor.register(np.number)
@update_tensor.register(float)
@update_tensor.register(int)
def _(
inputs: Union[np.number, float, int],
request: InferRequestBase,
key: Union[str, int, ConstOutput] = None,
) -> None:
set_scalar_tensor(
request, Tensor(np.ndarray([], type(inputs), np.array(inputs))), key
)
def normalize_inputs(request: InferRequestBase, inputs: dict) -> dict:
"""Helper function to prepare inputs for inference.
It creates copy of Tensors or copy data to already allocated Tensors on device
if the item is of type `np.ndarray`, `np.number`, `int`, `float`.
"""
# Create new temporary dictionary.
# new_inputs will be used to transfer data to inference calls,
# ensuring that original inputs are not overwritten with Tensors.
new_inputs: Dict[Union[str, int, ConstOutput], Tensor] = {}
for k, val in inputs.items():
if not isinstance(k, (str, int, ConstOutput)):
raise TypeError("Incompatible key type for input: {}".format(k))
# Copy numpy arrays to already allocated Tensors.
if isinstance(val, (np.ndarray, np.number, int, float)):
update_tensor(val, request, k)
# If value is of Tensor type, put it into temporary dictionary.
elif isinstance(val, Tensor):
new_inputs[k] = val
# Throw error otherwise.
else:
raise TypeError(
"Incompatible input data of type {} under {} key!".format(type(val), k)
)
return new_inputs
class InferRequest(InferRequestBase):
"""InferRequest class represents infer request which can be run in asynchronous or synchronous manners."""
def infer(
self, inputs: Union[dict, list, tuple, Tensor, np.ndarray] = None
) -> dict:
"""Infers specified input(s) in synchronous mode.
Blocks all methods of InferRequest while request is running.
Calling any method will lead to throwing exceptions.
The allowed types of keys in the `inputs` dictionary are:
(1) `int`
(2) `str`
(3) `openvino.runtime.ConstOutput`
The allowed types of values in the `inputs` are:
(1) `numpy.array`
(2) `openvino.runtime.Tensor`
Can be called with only one `openvino.runtime.Tensor` or `numpy.array`,
it will work only with one-input models. When model has more inputs,
function throws error.
:param inputs: Data to be set on input tensors.
:type inputs: Union[Dict[keys, values], List[values], Tuple[values], Tensor, numpy.array], optional
:return: Dictionary of results from output tensors with ports as keys.
:rtype: Dict[openvino.runtime.ConstOutput, numpy.array]
"""
# If inputs are empty, pass empty dictionary.
if inputs is None:
return super().infer({})
# If inputs are dict, normalize dictionary and call infer method.
elif isinstance(inputs, dict):
return super().infer(normalize_inputs(self, inputs))
# If inputs are list or tuple, enumarate inputs and save them as dictionary.
# It is an extension of above branch with dict inputs.
elif isinstance(inputs, (list, tuple)):
return super().infer(
normalize_inputs(
self, {index: input for index, input in enumerate(inputs)}
)
)
# If inputs are Tensor, call infer method directly.
elif isinstance(inputs, Tensor):
return super().infer(inputs)
# If inputs are single numpy array or scalars, use helper function to copy them
# directly to Tensor or create temporary Tensor to pass into the InferRequest.
# Pass empty dictionary to infer method, inputs are already set by helper function.
elif isinstance(inputs, (np.ndarray, np.number, int, float)):
update_tensor(inputs, self)
return super().infer({})
else:
raise TypeError(f"Incompatible inputs of type: {type(inputs)}")
def start_async(
self,
inputs: Union[dict, list, tuple, Tensor, np.ndarray] = None,
userdata: Any = None,
) -> None:
"""Starts inference of specified input(s) in asynchronous mode.
Returns immediately. Inference starts also immediately.
Calling any method on the `InferRequest` object while the request is running
will lead to throwing exceptions.
The allowed types of keys in the `inputs` dictionary are:
(1) `int`
(2) `str`
(3) `openvino.runtime.ConstOutput`
The allowed types of values in the `inputs` are:
(1) `numpy.array`
(2) `openvino.runtime.Tensor`
Can be called with only one `openvino.runtime.Tensor` or `numpy.array`,
it will work only with one-input models. When model has more inputs,
function throws error.
:param inputs: Data to be set on input tensors.
:type inputs: Union[Dict[keys, values], List[values], Tuple[values], Tensor, numpy.array], optional
:param userdata: Any data that will be passed inside the callback.
:type userdata: Any
"""
if inputs is None:
super().start_async({}, userdata)
elif isinstance(inputs, dict):
super().start_async(normalize_inputs(self, inputs), userdata)
elif isinstance(inputs, (list, tuple)):
super().start_async(
normalize_inputs(
self, {index: input for index, input in enumerate(inputs)}
),
userdata,
)
elif isinstance(inputs, Tensor):
super().start_async(inputs, userdata)
elif isinstance(inputs, (np.ndarray, np.number, int, float)):
update_tensor(inputs, self)
return super().start_async({}, userdata)
else:
raise TypeError(f"Incompatible inputs of type: {type(inputs)}")
class CompiledModel(CompiledModelBase):
"""CompiledModel class.
CompiledModel represents Model that is compiled for a specific device by applying
multiple optimization transformations, then mapping to compute kernels.
"""
def create_infer_request(self) -> InferRequest:
"""Creates an inference request object used to infer the compiled model.
The created request has allocated input and output tensors.
:return: New InferRequest object.
:rtype: openvino.runtime.InferRequest
"""
return InferRequest(super().create_infer_request())
def infer_new_request(
self, inputs: Union[dict, list, tuple, Tensor, np.ndarray] = None
) -> dict:
"""Infers specified input(s) in synchronous mode.
Blocks all methods of CompiledModel while request is running.
Method creates new temporary InferRequest and run inference on it.
It is advised to use a dedicated InferRequest class for performance,
optimizing workflows, and creating advanced pipelines.
The allowed types of keys in the `inputs` dictionary are:
(1) `int`
(2) `str`
(3) `openvino.runtime.ConstOutput`
The allowed types of values in the `inputs` are:
(1) `numpy.array`
(2) `openvino.runtime.Tensor`
Can be called with only one `openvino.runtime.Tensor` or `numpy.array`,
it will work only with one-input models. When model has more inputs,
function throws error.
:param inputs: Data to be set on input tensors.
:type inputs: Union[Dict[keys, values], List[values], Tuple[values], Tensor, numpy.array], optional
:return: Dictionary of results from output tensors with ports as keys.
:rtype: Dict[openvino.runtime.ConstOutput, numpy.array]
"""
# It returns wrapped python InferReqeust and then call upon
# overloaded functions of InferRequest class
return self.create_infer_request().infer(inputs)
def __call__(self, inputs: Union[dict, list] = None) -> dict:
"""Callable infer wrapper for CompiledModel.
Take a look at `infer_new_request` for reference.
"""
return self.infer_new_request(inputs)
class AsyncInferQueue(AsyncInferQueueBase):
"""AsyncInferQueue with pool of asynchronous requests.
AsyncInferQueue represents helper that creates a pool of asynchronous
InferRequests and provides synchronization functions to control flow of
a simple pipeline.
"""
def __getitem__(self, i: int) -> InferRequest:
"""Gets InferRequest from the pool with given i id.
:param i: InferRequest id.
:type i: int
:return: InferRequests from the pool with given id.
:rtype: openvino.runtime.InferRequest
"""
return InferRequest(super().__getitem__(i))
def start_async(
self,
inputs: Union[dict, list, tuple, Tensor, np.ndarray] = None,
userdata: Any = None,
) -> None:
"""Run asynchronous inference using the next available InferRequest from the pool.
The allowed types of keys in the `inputs` dictionary are:
(1) `int`
(2) `str`
(3) `openvino.runtime.ConstOutput`
The allowed types of values in the `inputs` are:
(1) `numpy.array`
(2) `openvino.runtime.Tensor`
Can be called with only one `openvino.runtime.Tensor` or `numpy.array`,
it will work only with one-input models. When model has more inputs,
function throws error.
:param inputs: Data to be set on input tensors of the next available InferRequest.
:type inputs: Union[Dict[keys, values], List[values], Tuple[values], Tensor, numpy.array], optional
:param userdata: Any data that will be passed to a callback.
:type userdata: Any, optional
"""
if inputs is None:
super().start_async({}, userdata)
elif isinstance(inputs, dict):
super().start_async(
normalize_inputs(self[self.get_idle_request_id()], inputs), userdata
)
elif isinstance(inputs, (list, tuple)):
super().start_async
(
normalize_inputs(
self[self.get_idle_request_id()],
{index: input for index, input in enumerate(inputs)},
),
userdata,
)
elif isinstance(inputs, Tensor):
super().start_async(inputs, userdata)
elif isinstance(inputs, (np.ndarray, np.number, int, float)):
update_tensor(inputs, self[self.get_idle_request_id()])
super().start_async({}, userdata)
else:
raise TypeError(f"Incompatible inputs of type: {type(inputs)}")
class Core(CoreBase):
"""Core class represents OpenVINO runtime Core entity.
User applications can create several Core class instances, but in this
case, the underlying plugins are created multiple times and not shared
between several Core instances. The recommended way is to have a single
Core instance per application.
"""
def compile_model(
self, model: Union[Model, str], device_name: str = None, config: dict = None
) -> CompiledModel:
"""Creates a compiled model.
Creates a compiled model from a source Model object or
reads model and creates a compiled model from IR / ONNX / PDPD file.
This can be more efficient than using read_model + compile_model(model_in_memory_object) flow,
especially for cases when caching is enabled and cached model is available.
If device_name is not specified, the default OpenVINO device will be selected by AUTO plugin.
Users can create as many compiled models as they need, and use them simultaneously
(up to the limitation of the hardware resources).
:param model: Model acquired from read_model function or a path to a model in IR / ONNX / PDPD format.
:type model: Union[openvino.runtime.Model, str]
:param device_name: Optional. Name of the device to load the model to. If not specified,
the default OpenVINO device will be selected by AUTO plugin.
:type device_name: str
:param config: Optional dict of pairs:
(property name, property value) relevant only for this load operation.
:type config: dict, optional
:return: A compiled model.
:rtype: openvino.runtime.CompiledModel
"""
if device_name is None:
return CompiledModel(
super().compile_model(model, {} if config is None else config)
)
return CompiledModel(
super().compile_model(model, device_name, {} if config is None else config)
)
def import_model(
self, model_stream: bytes, device_name: str, config: dict = None
) -> CompiledModel:
"""Imports a compiled model from a previously exported one.
:param model_stream: Input stream, containing a model previously exported, using export_model method.
:type model_stream: bytes
:param device_name: Name of device to which compiled model is imported.
Note: if device_name is not used to compile the original model,
an exception is thrown.
:type device_name: str
:param config: Optional dict of pairs:
(property name, property value) relevant only for this load operation.
:type config: dict, optional
:return: A compiled model.
:rtype: openvino.runtime.CompiledModel
:Example:
.. code-block:: python
user_stream = compiled.export_model()
with open('./my_model', 'wb') as f:
f.write(user_stream)
# ...
new_compiled = core.import_model(user_stream, "CPU")
.. code-block:: python
user_stream = io.BytesIO()
compiled.export_model(user_stream)
with open('./my_model', 'wb') as f:
f.write(user_stream.getvalue()) # or read() if seek(0) was applied before
# ...
new_compiled = core.import_model(user_stream, "CPU")
"""
return CompiledModel(
super().import_model(
model_stream, device_name, {} if config is None else config
)
)
def compile_model(model_path: str) -> CompiledModel:
"""Compact method to compile model with AUTO plugin.
:param model_path: Path to file with model.
:type model_path: str
:return: A compiled model.
:rtype: openvino.runtime.CompiledModel
"""
core = Core()
return core.compile_model(model_path, "AUTO")
| 38.034934 | 110 | 0.637313 |
398f25678be20df263441ac023cb7550c4aa6549 | 5,588 | py | Python | mrjob/tools/emr/mrboss.py | ukwa/mrjob | 091572e87bc24cc64be40278dd0f5c3617c98d4b | [
"Apache-2.0"
] | 1,538 | 2015-01-02T10:22:17.000Z | 2022-03-29T16:42:33.000Z | mrjob/tools/emr/mrboss.py | ukwa/mrjob | 091572e87bc24cc64be40278dd0f5c3617c98d4b | [
"Apache-2.0"
] | 1,027 | 2015-01-09T21:30:37.000Z | 2022-02-26T18:21:42.000Z | mrjob/tools/emr/mrboss.py | ukwa/mrjob | 091572e87bc24cc64be40278dd0f5c3617c98d4b | [
"Apache-2.0"
] | 403 | 2015-01-06T15:49:44.000Z | 2022-03-29T16:42:34.000Z | # Copyright 2009-2012 Yelp
# Copyright 2015-2018 Yelp
# Copyright 2019 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a command on every node of a cluster. Store stdout and stderr for
results in OUTPUT_DIR.
Usage::
mrjob boss CLUSTER_ID [options] "command string"
Options::
-c CONF_PATHS, --conf-path CONF_PATHS
Path to alternate mrjob.conf file to read from
--no-conf Don't load mrjob.conf even if it's available
--ec2-endpoint EC2_ENDPOINT
Force mrjob to connect to EC2 on this endpoint (e.g.
ec2.us-west-1.amazonaws.com). Default is to infer this
from region.
--ec2-key-pair-file EC2_KEY_PAIR_FILE
Path to file containing SSH key for EMR
--emr-endpoint EMR_ENDPOINT
Force mrjob to connect to EMR on this endpoint (e.g.
us-west-1.elasticmapreduce.amazonaws.com). Default is
to infer this from region.
-h, --help show this help message and exit
-o OUTPUT_DIR, --output-dir OUTPUT_DIR
Specify an output directory (default: CLUSTER_ID)
-q, --quiet Don't print anything to stderr
--region REGION GCE/AWS region to run Dataproc/EMR jobs in.
--s3-endpoint S3_ENDPOINT
Force mrjob to connect to S3 on this endpoint (e.g. s3
-us-west-1.amazonaws.com). You usually shouldn't set
this; by default mrjob will choose the correct
endpoint for each S3 bucket based on its location.
--ssh-bin SSH_BIN Name/path of ssh binary. Arguments are allowed (e.g.
--ssh-bin 'ssh -v')
-v, --verbose print more messages to stderr
"""
from __future__ import print_function
import os
from argparse import ArgumentParser
from mrjob.emr import EMRJobRunner
from mrjob.job import MRJob
from mrjob.options import _add_basic_args
from mrjob.options import _add_runner_args
from mrjob.options import _alphabetize_actions
from mrjob.options import _filter_by_role
from mrjob.py2 import to_unicode
from mrjob.util import shlex_split
def main(cl_args=None):
usage = 'usage: %(prog)s boss CLUSTER_ID [options] "command string"'
description = ('Run a command on the master and all worker nodes of an EMR'
' cluster. Store stdout/stderr for results in OUTPUT_DIR.')
arg_parser = ArgumentParser(usage=usage, description=description)
arg_parser.add_argument('-o', '--output-dir', dest='output_dir',
default=None,
help="Specify an output directory (default:"
" CLUSTER_ID)")
arg_parser.add_argument(dest='cluster_id',
help='ID of cluster to run command on')
arg_parser.add_argument(dest='cmd_string',
help='command to run, as a single string')
_add_basic_args(arg_parser)
_add_runner_args(
arg_parser,
{'ec2_key_pair_file', 'ssh_bin'} | _filter_by_role(
EMRJobRunner.OPT_NAMES, 'connect')
)
_alphabetize_actions(arg_parser)
options = arg_parser.parse_args(cl_args)
MRJob.set_up_logging(quiet=options.quiet, verbose=options.verbose)
runner_kwargs = options.__dict__.copy()
for unused_arg in ('cluster_id', 'cmd_string', 'output_dir',
'quiet', 'verbose'):
del runner_kwargs[unused_arg]
cmd_args = shlex_split(options.cmd_string)
output_dir = os.path.abspath(options.output_dir or options.cluster_id)
with EMRJobRunner(
cluster_id=options.cluster_id, **runner_kwargs) as runner:
_run_on_all_nodes(runner, output_dir, cmd_args)
def _run_on_all_nodes(runner, output_dir, cmd_args, print_stderr=True):
"""Given an :py:class:`EMRJobRunner`, run the command specified by
*cmd_args* on all nodes in the cluster and save the stdout and stderr of
each run to subdirectories of *output_dir*.
"""
master_addr = runner._address_of_master()
addresses = [master_addr]
worker_addrs = runner._ssh_worker_hosts()
if worker_addrs:
addresses += ['%s!%s' % (master_addr, worker_addr)
for worker_addr in worker_addrs]
for addr in addresses:
stdout, stderr = runner.fs.ssh._ssh_run(addr, cmd_args)
if print_stderr:
print('---')
print('Command completed on %s.' % addr)
print(to_unicode(stderr), end=' ')
if '!' in addr:
base_dir = os.path.join(output_dir, 'worker ' + addr.split('!')[1])
else:
base_dir = os.path.join(output_dir, 'master')
if not os.path.exists(base_dir):
os.makedirs(base_dir)
with open(os.path.join(base_dir, 'stdout'), 'wb') as f:
f.write(stdout)
with open(os.path.join(base_dir, 'stderr'), 'wb') as f:
f.write(stderr)
if __name__ == '__main__':
main()
| 37.253333 | 79 | 0.639764 |
71ef729f6489a7fa22a82d8485a2a1de21aaab55 | 2,793 | py | Python | tests/test_modelhub_cli.py | univerone/ML-Model-CI | f77635e469477b640a5c2d9b7ad3fe13374ce59e | [
"Apache-2.0"
] | 170 | 2020-06-08T18:30:52.000Z | 2022-03-28T12:08:11.000Z | tests/test_modelhub_cli.py | crazyCoderLi/ML-Model-CI | f77635e469477b640a5c2d9b7ad3fe13374ce59e | [
"Apache-2.0"
] | 146 | 2020-06-14T18:56:27.000Z | 2022-02-27T21:15:59.000Z | tests/test_modelhub_cli.py | univerone/ML-Model-CI | f77635e469477b640a5c2d9b7ad3fe13374ce59e | [
"Apache-2.0"
] | 36 | 2020-06-08T18:30:56.000Z | 2022-03-07T18:10:19.000Z | # test ModelCI CLI with unitest
from pathlib import Path
import requests
import torch
from typer.testing import CliRunner
import torchvision
from modelci.config import app_settings
from modelci.cli.modelhub import app
runner = CliRunner()
file_dir = f"{str(Path.home())}/.modelci/ResNet50/PyTorch-PYTORCH/Image_Classification"
Path(file_dir).mkdir(parents=True, exist_ok=True)
file_path = file_dir + "/1.pth"
def test_get():
result = runner.invoke(app, [
'get',
'https://download.pytorch.org/models/resnet50-19c8e357.pth',
f'{str(Path.home())}/.modelci/ResNet50/PyTorch-PYTORCH/Image_Classification/1.pth'
])
assert result.exit_code == 0
assert "model downloaded successfully" in result.stdout
def test_publish():
result = runner.invoke(app, [
'publish', '-f', 'example/resnet50.yml'
])
assert result.exit_code == 0
assert "\'status\': True" in result.stdout
def test_ls():
result = runner.invoke(app, ['ls'])
assert result.exit_code == 0
def test_detail():
with requests.get(f'{app_settings.api_v1_prefix}/model/') as r:
model_list = r.json()
model_id = model_list[0]["id"]
result = runner.invoke(app, ['detail', model_id])
assert result.exit_code == 0
def test_update():
with requests.get(f'{app_settings.api_v1_prefix}/model/') as r:
model_list = r.json()
model_id = model_list[0]["id"]
result = runner.invoke(app, ['update', model_id, '--version', '2'])
assert result.exit_code == 0
def test_delete():
with requests.get(f'{app_settings.api_v1_prefix}/model/') as r:
model_list = r.json()
model_id = model_list[0]["id"]
result = runner.invoke(app, ['delete', model_id])
assert result.exit_code == 0
assert f"Model {model_id} deleted\n" == result.output
def test_convert():
torch_model = torchvision.models.resnet50(pretrained=False)
torch_model.load_state_dict(torch.load(file_path))
torch.save(torch_model, file_path)
result = runner.invoke(app, [
'convert', '-f', 'example/resnet50.yml'
])
assert result.exit_code == 0
def test_profile():
runner.invoke(app, [
'publish', '-f', 'example/resnet50_torchscript.yml'
])
with requests.get(f'{app_settings.api_v1_prefix}/model/') as r:
model_list = r.json()
torchscript_id = None
for model in model_list:
if model['engine'] == "TORCHSCRIPT":
torchscript_id = model['id']
break
result = runner.invoke(app, [
'profile', torchscript_id, '-d', 'cpu'
])
with requests.get(f'{app_settings.api_v1_prefix}/model/') as r:
model_list = r.json()
model_id = model_list[0]["id"]
runner.invoke(app, ['delete', model_id])
assert result.exit_code == 0
| 28.793814 | 90 | 0.659506 |
3e8e4e86696ba69cbacafd79eec1297e141a8c8b | 4,963 | py | Python | Documentation/Cookbook/Scripts/otbGenerateExamplesRstDoc.py | heralex/OTB | c52b504b64dc89c8fe9cac8af39b8067ca2c3a57 | [
"Apache-2.0"
] | 317 | 2015-01-19T08:40:58.000Z | 2022-03-17T11:55:48.000Z | Documentation/Cookbook/Scripts/otbGenerateExamplesRstDoc.py | guandd/OTB | 707ce4c6bb4c7186e3b102b2b00493a5050872cb | [
"Apache-2.0"
] | 18 | 2015-07-29T14:13:45.000Z | 2021-03-29T12:36:24.000Z | Documentation/Cookbook/Scripts/otbGenerateExamplesRstDoc.py | guandd/OTB | 707ce4c6bb4c7186e3b102b2b00493a5050872cb | [
"Apache-2.0"
] | 132 | 2015-02-21T23:57:25.000Z | 2022-03-25T16:03:16.000Z | #!/usr/bin/env python3
#
# Copyright (C) 2005-2020 Centre National d'Etudes Spatiales (CNES)
#
# This file is part of Orfeo Toolbox
#
# https://www.orfeo-toolbox.org/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import os.path
from os.path import join
from collections import defaultdict
import re
import glob
from rst_utils import rst_section, RstPageHeading, examples_usage_regex
def generate_examples_index(rst_dir, list_of_examples):
# Compute dictionary of tag -> (list of examples)
tag_files = defaultdict(list)
for filename in list_of_examples:
tag = filename.split("/")[1]
name, _ = os.path.splitext(filename.split("/")[2])
tag_files[tag].append(join(tag, name + ".rst"))
# Render index file and tag index files
os.makedirs(join(rst_dir, "Examples"), exist_ok=True)
index_f = open(join(rst_dir, "Examples.rst"), "w")
index_f.write(RstPageHeading("C++ Examples", 3, ref="cpp-examples"))
for tag, examples_filenames in sorted(tag_files.items()):
tag_filename = join("Examples", tag + ".rst")
index_f.write("\t" + tag_filename + "\n")
with open(join(rst_dir, tag_filename), "w") as tag_f:
tag_f.write(RstPageHeading(tag, 3))
for examples_filename in examples_filenames:
tag_f.write("\t" + examples_filename + "\n")
def indent(str):
return "\n".join([" " + line for line in str.split("\n")])
def cpp_uncomment(code):
# Strip '// '
return "\n".join([line[4:] for line in code.split("\n")])
def render_example(filename, otb_root):
"Render a cxx example to rst"
# Read the source code of the cxx example
code = open(join(otb_root, filename)).read()
# Don't show the license header to make it nicer,
# and the cookbook is already under a CC license
examples_license_header = open("templates/examples_license_header.txt").read()
code = code.replace(examples_license_header, "")
# Extract usages
example_usage = ""
usage_matches = list(re.finditer(examples_usage_regex, code, flags = re.MULTILINE | re.DOTALL))
examples_usage_template = open("templates/example_usage.rst").read()
for match in usage_matches:
example_usage += examples_usage_template.format(indent(match.group(1).strip()))
# Don't show usage in example source
code = re.sub(examples_usage_regex, "", code, flags = re.MULTILINE | re.DOTALL)
# Make the link to the source code
link_name = os.path.basename(filename)
link_href = "https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/raw/develop/" + filename + "?inline=false"
# Read the description from the example .rst file if it exists
example_rst_file = join(otb_root, filename.replace(".cxx", ".rst"))
if os.path.isfile(example_rst_file):
rst_description = open(example_rst_file).read()
else:
rst_description = ""
# Render the template
name = os.path.basename(filename)
template_example = open("templates/example.rst").read()
output_rst = template_example.format(
label=name,
heading=rst_section(name, "="),
description=rst_description,
usage=example_usage,
code=indent(code.strip()),
link_name=link_name,
link_href=link_href
)
return output_rst
def main():
parser = argparse.ArgumentParser(usage="Export examples to rst")
parser.add_argument("rst_dir", help="Directory where rst files are generated")
parser.add_argument("otb_root", help="OTB repository root")
args = parser.parse_args()
# Get list of cxx examples as relative paths from otb_root
list_of_examples = [os.path.relpath(p, start=args.otb_root) for p in sorted(glob.glob(join(args.otb_root, "Examples/*/*.cxx")))]
print("Generating rst for {} examples".format(len(list_of_examples)))
# Generate example index and tag indexes
generate_examples_index(join(args.rst_dir, "C++"), list_of_examples)
# Generate examples rst
for filename in list_of_examples:
name = os.path.basename(filename)
tag = filename.split("/")[1]
root, ext = os.path.splitext(name)
os.makedirs(join(args.rst_dir, "C++", "Examples", tag), exist_ok=True)
with open(join(args.rst_dir, "C++", "Examples", tag, root + ".rst"), "w") as output_file:
output_file.write(render_example(filename, args.otb_root))
if __name__ == "__main__":
main()
| 36.226277 | 132 | 0.68507 |
517afc8e40d402895ea89b6dfbb64d289e9c215c | 1,315 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/virtual_machine_scale_set_managed_disk_parameters_py3.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/virtual_machine_scale_set_managed_disk_parameters_py3.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | null | null | null | azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/virtual_machine_scale_set_managed_disk_parameters_py3.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetManagedDiskParameters(Model):
"""Describes the parameters of a ScaleSet managed disk.
:param storage_account_type: Specifies the storage account type for the
managed disk. Possible values are: Standard_LRS, Premium_LRS, and
StandardSSD_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS',
'StandardSSD_LRS'
:type storage_account_type: str or
~azure.mgmt.compute.v2018_06_01.models.StorageAccountTypes
"""
_attribute_map = {
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
}
def __init__(self, *, storage_account_type=None, **kwargs) -> None:
super(VirtualMachineScaleSetManagedDiskParameters, self).__init__(**kwargs)
self.storage_account_type = storage_account_type
| 39.848485 | 83 | 0.660076 |
af18d0966046c99ec5b7964c1af4653e709f815e | 11,484 | py | Python | lang/python/github/com/metaprov/modelaapi/services/review/v1/review_pb2_grpc.py | metaprov/modeldapi | ee05693832051dcd990ee4f061715d7ae0787340 | [
"Apache-2.0"
] | 5 | 2022-02-18T03:40:10.000Z | 2022-03-01T16:11:24.000Z | lang/python/github/com/metaprov/modelaapi/services/review/v1/review_pb2_grpc.py | metaprov/modeldapi | ee05693832051dcd990ee4f061715d7ae0787340 | [
"Apache-2.0"
] | 1 | 2022-01-07T19:59:25.000Z | 2022-02-04T01:21:14.000Z | lang/python/github/com/metaprov/modelaapi/services/review/v1/review_pb2_grpc.py | metaprov/modeldapi | ee05693832051dcd990ee4f061715d7ae0787340 | [
"Apache-2.0"
] | 1 | 2022-03-25T10:21:43.000Z | 2022-03-25T10:21:43.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from github.com.metaprov.modelaapi.services.review.v1 import review_pb2 as github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2
class ReviewServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListReviews = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.review.v1.ReviewService/ListReviews',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.ListReviewRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.ListReviewResponse.FromString,
)
self.CreateReview = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.review.v1.ReviewService/CreateReview',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.CreateReviewRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.CreateReviewResponse.FromString,
)
self.GetReview = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.review.v1.ReviewService/GetReview',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.GetReviewRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.GetReviewResponse.FromString,
)
self.UpdateReview = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.review.v1.ReviewService/UpdateReview',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.UpdateReviewRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.UpdateReviewResponse.FromString,
)
self.DeleteReview = channel.unary_unary(
'/github.com.metaprov.modelaapi.services.review.v1.ReviewService/DeleteReview',
request_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.DeleteReviewRequest.SerializeToString,
response_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.DeleteReviewResponse.FromString,
)
class ReviewServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def ListReviews(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateReview(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetReview(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateReview(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteReview(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ReviewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListReviews': grpc.unary_unary_rpc_method_handler(
servicer.ListReviews,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.ListReviewRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.ListReviewResponse.SerializeToString,
),
'CreateReview': grpc.unary_unary_rpc_method_handler(
servicer.CreateReview,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.CreateReviewRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.CreateReviewResponse.SerializeToString,
),
'GetReview': grpc.unary_unary_rpc_method_handler(
servicer.GetReview,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.GetReviewRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.GetReviewResponse.SerializeToString,
),
'UpdateReview': grpc.unary_unary_rpc_method_handler(
servicer.UpdateReview,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.UpdateReviewRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.UpdateReviewResponse.SerializeToString,
),
'DeleteReview': grpc.unary_unary_rpc_method_handler(
servicer.DeleteReview,
request_deserializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.DeleteReviewRequest.FromString,
response_serializer=github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.DeleteReviewResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'github.com.metaprov.modelaapi.services.review.v1.ReviewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ReviewService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def ListReviews(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.review.v1.ReviewService/ListReviews',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.ListReviewRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.ListReviewResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateReview(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.review.v1.ReviewService/CreateReview',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.CreateReviewRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.CreateReviewResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetReview(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.review.v1.ReviewService/GetReview',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.GetReviewRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.GetReviewResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateReview(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.review.v1.ReviewService/UpdateReview',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.UpdateReviewRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.UpdateReviewResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteReview(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/github.com.metaprov.modelaapi.services.review.v1.ReviewService/DeleteReview',
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.DeleteReviewRequest.SerializeToString,
github_dot_com_dot_metaprov_dot_modelaapi_dot_services_dot_review_dot_v1_dot_review__pb2.DeleteReviewResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 57.708543 | 168 | 0.737287 |
614d572ae61f8ab850131de032cc7ef605cf5a1d | 435 | py | Python | al5d/scripts/local_config.py | pouyaAB/ros_teleoperate | 4fe993b558b1b96a8483fe9161989ecf348dcdfc | [
"MIT"
] | null | null | null | al5d/scripts/local_config.py | pouyaAB/ros_teleoperate | 4fe993b558b1b96a8483fe9161989ecf348dcdfc | [
"MIT"
] | null | null | null | al5d/scripts/local_config.py | pouyaAB/ros_teleoperate | 4fe993b558b1b96a8483fe9161989ecf348dcdfc | [
"MIT"
] | null | null | null | config = {}
config['record_path'] = '/home/d3gan/development/datasets/record/real_time'
config['robot_command_file'] = '/home/d3gan/development/datasets/record/real_time/commands.csv'
config['image_size'] = 64
config['task'] = '5002'
config['camera_topics'] = ['/camera1/usb_cam1/image_raw', '/camera2/usb_cam2/image_raw', '/camera3/usb_cam3/image_raw']
config['cameras_switch'] = [False, True, False]
config['record_human'] = True
| 36.25 | 119 | 0.742529 |
f16237dae9d1799ab8cc26c1b9c14daf1832acb9 | 56,171 | py | Python | spyder/api/plugins.py | feiser2016/spyder | b6e7a45f8bb12b9be6b279218c44e19f603685e8 | [
"MIT"
] | null | null | null | spyder/api/plugins.py | feiser2016/spyder | b6e7a45f8bb12b9be6b279218c44e19f603685e8 | [
"MIT"
] | null | null | null | spyder/api/plugins.py | feiser2016/spyder | b6e7a45f8bb12b9be6b279218c44e19f603685e8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
spyder.api.plugins
==================
Here, 'plugins' are Qt objects that can make changes to Spyder's
main window and call other plugins directly.
There are two types of plugins available:
1. SpyderPluginV2 is a plugin that does not create a new dock/pane on Spyder's
main window. Note: SpyderPluginV2 will be renamed to SpyderPlugin once the
migration to the new API is finished
2. SpyderDockablePlugin is a plugin that does create a new dock/pane on
Spyder's main window.
"""
# Standard library imports
from collections import OrderedDict
import inspect
import os
# Third party imports
from qtpy.QtCore import QObject, Qt, Signal, Slot, QSize
from qtpy.QtGui import QCursor
from qtpy.QtWidgets import QApplication, QWidget
# Local imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.menus import ApplicationMenus
from spyder.api.toolbars import ApplicationToolBars
from spyder.api.translations import get_translation
from spyder.api.widgets import PluginMainContainer, PluginMainWidget
from spyder.api.widgets.menus import ApplicationMenu
from spyder.api.widgets.mixins import (SpyderActionMixin, SpyderOptionMixin,
SpyderWidgetMixin)
from spyder.api.widgets.toolbars import ApplicationToolBar
from spyder.config.gui import get_color_scheme, get_font
from spyder.config.manager import CONF # TODO: Remove after migration
from spyder.config.user import NoDefault
from spyder.plugins.base import BasePluginMixin, BasePluginWidgetMixin
from spyder.py3compat import configparser as cp
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import create_action
# Localization
_ = get_translation('spyder')
# =============================================================================
# SpyderPlugin
# =============================================================================
class BasePlugin(BasePluginMixin):
"""
Basic functionality for Spyder plugins.
WARNING: Don't override any methods or attributes present here!
"""
# Use this signal to display a message in the status bar.
# str: The message you want to display
# int: Amount of time to display the message
sig_show_status_message = Signal(str, int)
# Use this signal to inform another plugin that a configuration
# value has changed.
sig_option_changed = Signal(str, object)
def __init__(self, parent=None):
super(BasePlugin, self).__init__(parent)
# This is the plugin parent, which corresponds to the main
# window.
self.main = parent
# Filesystem path to the root directory that contains the
# plugin
self.PLUGIN_PATH = self._get_plugin_path()
# Connect signals to slots.
self.sig_show_status_message.connect(self.show_status_message)
self.sig_option_changed.connect(self.set_option)
@Slot(str)
@Slot(str, int)
def show_status_message(self, message, timeout=0):
"""
Show message in main window's status bar.
Parameters
----------
message: str
Message to display in the status bar.
timeout: int
Amount of time to display the message.
"""
super(BasePlugin, self)._show_status_message(message, timeout)
@Slot(str, object)
def set_option(self, option, value, section=None):
"""
Set an option in Spyder configuration file.
Parameters
----------
option: str
Name of the option (e.g. 'case_sensitive')
value: bool, int, str, tuple, list, dict
Value to save in configuration file, passed as a Python
object.
Notes
-----
* Use sig_option_changed to call this method from widgets of the
same or another plugin.
* CONF_SECTION needs to be defined for this to work.
"""
super(BasePlugin, self)._set_option(option, value, section=section)
def get_option(self, option, default=NoDefault, section=None):
"""
Get an option from Spyder configuration file.
Parameters
----------
option: str
Name of the option to get its value from.
Returns
-------
bool, int, str, tuple, list, dict
Value associated with `option`.
"""
return super(BasePlugin, self)._get_option(option, default,
section=section)
def starting_long_process(self, message):
"""
Show a message in main window's status bar and changes the
mouse to Qt.WaitCursor when starting a long process.
Parameters
----------
message: str
Message to show in the status bar when the long
process starts.
"""
super(BasePlugin, self)._starting_long_process(message)
def ending_long_process(self, message=""):
"""
Clear main window's status bar after a long process and restore
mouse to the OS deault.
Parameters
----------
message: str
Message to show in the status bar when the long process
finishes.
"""
super(BasePlugin, self)._ending_long_process(message)
class SpyderPlugin(BasePlugin):
"""
Spyder plugin class.
All plugins *must* inherit this class and reimplement its interface.
"""
# ---------------------------- ATTRIBUTES ---------------------------------
# Name of the configuration section that's going to be
# used to record the plugin's permanent data in Spyder
# config system (i.e. in spyder.ini)
# Status: Optional
CONF_SECTION = None
# One line localized description of the features this plugin implements
# Status: Optional
DESCRIPTION = None
# Widget to be used as entry in Spyder Preferences dialog
# Status: Optional
CONFIGWIDGET_CLASS = None
# Use separate configuration file for plugin
# Status: Optional
CONF_FILE = True
# Define configuration defaults if using a separate file.
# List of tuples, with the first item in the tuple being the section
# name and the second item being the default options dictionary.
# Status: Optional
#
# CONF_DEFAULTS_EXAMPLE = [
# ('section-name', {'option-1': 'some-value',
# 'option-2': True,}),
# ('another-section-name', {'option-3': 'some-other-value',
# 'option-4': [1, 2, 3],}),
# ]
CONF_DEFAULTS = None
# Define configuration version if using a separate file
# Status: Optional
#
# IMPORTANT NOTES:
# 1. If you want to *change* the default value of a current option, you
# need to do a MINOR update in config version, e.g. from 3.0.0 to 3.1.0
# 2. If you want to *remove* options that are no longer needed or if you
# want to *rename* options, then you need to do a MAJOR update in
# version, e.g. from 3.0.0 to 4.0.0
# 3. You don't need to touch this value if you're just adding a new option
CONF_VERSION = None
# ------------------------------ METHODS ----------------------------------
def check_compatibility(self):
"""
This method can be reimplemented to check compatibility of a
plugin for a given condition.
Returns
-------
(bool, str)
The first value tells Spyder if the plugin has passed the
compatibility test defined in this method. The second value
is a message that must explain users why the plugin was
found to be incompatible (e.g. 'This plugin does not work
with PyQt4'). It will be shown at startup in a QMessageBox.
"""
message = ''
valid = True
return valid, message
# =============================================================================
# SpyderPluginWidget
# =============================================================================
class BasePluginWidget(QWidget, BasePluginWidgetMixin):
"""
Basic functionality for Spyder plugin widgets.
WARNING: Don't override any methods or attributes present here!
"""
# Signal used to update the plugin title when it's undocked
sig_update_plugin_title = Signal()
def __init__(self, main=None):
super(BasePluginWidget, self).__init__(main)
# Dockwidget for the plugin, i.e. the pane that's going to be
# displayed in Spyder for this plugin.
# Note: This is created when you call the `add_dockwidget`
# method, which must be done in the `register_plugin` one.
self.dockwidget = None
def add_dockwidget(self):
"""Add the plugin's QDockWidget to the main window."""
super(BasePluginWidget, self)._add_dockwidget()
def tabify(self, core_plugin):
"""
Tabify plugin next to one of the core plugins.
Parameters
----------
core_plugin: SpyderPluginWidget
Core Spyder plugin this one will be tabified next to.
Examples
--------
>>> self.tabify(self.main.variableexplorer)
>>> self.tabify(self.main.ipyconsole)
Notes
-----
The names of variables associated with each of the core plugins
can be found in the `setup` method of `MainWindow`, present in
`spyder/app/mainwindow.py`.
"""
super(BasePluginWidget, self)._tabify(core_plugin)
def get_font(self, rich_text=False):
"""
Return plain or rich text font used in Spyder.
Parameters
----------
rich_text: bool
Return rich text font (i.e. the one used in the Help pane)
or plain text one (i.e. the one used in the Editor).
Returns
-------
QFont:
QFont object to be passed to other Qt widgets.
Notes
-----
All plugins in Spyder use the same, global font. This is a
convenience method in case some plugins want to use a delta
size based on the default one. That can be controlled by using
FONT_SIZE_DELTA or RICH_FONT_SIZE_DELTA (declared below in
`SpyderPluginWidget`).
"""
return super(BasePluginWidget, self)._get_font(rich_text)
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=False):
"""
Register a shortcut associated to a QAction or a QShortcut to
Spyder main application.
Parameters
----------
qaction_or_qshortcut: QAction or QShortcut
QAction to register the shortcut for or QShortcut.
context: str
Name of the plugin this shortcut applies to. For instance,
if you pass 'Editor' as context, the shortcut will only
work when the editor is focused.
Note: You can use '_' if you want the shortcut to be work
for the entire application.
name: str
Name of the action the shortcut refers to (e.g. 'Debug
exit').
add_shortcut_to_tip: bool
If True, the shortcut is added to the action's tooltip.
This is useful if the action is added to a toolbar and
users hover it to see what it does.
"""
super(BasePluginWidget, self)._register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip)
def register_widget_shortcuts(self, widget):
"""
Register shortcuts defined by a plugin's widget so they take
effect when the plugin is focused.
Parameters
----------
widget: QWidget
Widget to register shortcuts for.
Notes
-----
The widget interface must have a method called
`get_shortcut_data` for this to work. Please see
`spyder/widgets/findreplace.py` for an example.
"""
for qshortcut, context, name in widget.get_shortcut_data():
self.register_shortcut(qshortcut, context, name)
def get_color_scheme(self):
"""
Get the current color scheme.
Returns
-------
dict
Dictionary with properties and colors of the color scheme
used in the Editor.
Notes
-----
This is useful to set the color scheme of all instances of
CodeEditor used by the plugin.
"""
return super(BasePluginWidget, self)._get_color_scheme()
def switch_to_plugin(self):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
super(BasePluginWidget, self)._switch_to_plugin()
class SpyderPluginWidget(SpyderPlugin, BasePluginWidget):
"""
Spyder plugin widget class.
All plugin widgets *must* inherit this class and reimplement its interface.
"""
# ---------------------------- ATTRIBUTES ---------------------------------
# Path for images relative to the plugin path
# Status: Optional
IMG_PATH = 'images'
# Control the size of the fonts used in the plugin
# relative to the fonts defined in Spyder
# Status: Optional
FONT_SIZE_DELTA = 0
RICH_FONT_SIZE_DELTA = 0
# Disable actions in Spyder main menus when the plugin
# is not visible
# Status: Optional
DISABLE_ACTIONS_WHEN_HIDDEN = True
# Shortcut to give focus to the plugin. In Spyder we try
# to reserve shortcuts that start with Ctrl+Shift+... for
# these actions
# Status: Optional
shortcut = None
# ------------------------------ METHODS ----------------------------------
def get_plugin_title(self):
"""
Get plugin's title.
Returns
-------
str
Name of the plugin.
"""
raise NotImplementedError
def get_plugin_icon(self):
"""
Get plugin's associated icon.
Returns
-------
QIcon
QIcon instance
"""
return ima.icon('outline_explorer')
def get_focus_widget(self):
"""
Get the plugin widget to give focus to.
Returns
-------
QWidget
QWidget to give focus to.
Notes
-----
This is applied when plugin's dockwidget is raised on top-level.
"""
pass
def closing_plugin(self, cancelable=False):
"""
Perform actions before the main window is closed.
Returns
-------
bool
Whether the plugin may be closed immediately or not.
Notes
-----
The returned value is ignored if *cancelable* is False.
"""
return True
def refresh_plugin(self):
"""
Refresh plugin after it receives focus.
Notes
-----
For instance, this is used to maintain in sync the Variable
Explorer with the currently focused IPython console.
"""
pass
def get_plugin_actions(self):
"""
Return a list of QAction's related to plugin.
Notes
-----
These actions will be shown in the plugins Options menu (i.e.
the hambuger menu on the right of each plugin).
"""
return []
def register_plugin(self):
"""
Register plugin in Spyder's main window and connect it to other
plugins.
Notes
-----
Below is the minimal call necessary to register the plugin. If
you override this method, please don't forget to make that call
here too.
"""
self.add_dockwidget()
def on_first_registration(self):
"""
Action to be performed on first plugin registration.
Notes
-----
This is mostly used to tabify the plugin next to one of the
core plugins, like this:
self.tabify(self.main.variableexplorer)
"""
raise NotImplementedError
def apply_plugin_settings(self, options):
"""
Determine what to do to apply configuration plugin settings.
"""
pass
def update_font(self):
"""
This must be reimplemented by plugins that need to adjust
their fonts.
"""
pass
def toggle_view(self, checked):
"""
Toggle dockwidget's visibility when its entry is selected in
the menu `View > Panes`.
Parameters
----------
checked: bool
Is the entry in `View > Panes` checked or not?
Notes
-----
Redefining this method can be useful to execute certain actions
when the plugin is made visible. For an example, please see
`spyder/plugins/ipythonconsole/plugin.py`
"""
if not self.dockwidget:
return
if checked:
self.dockwidget.show()
self.dockwidget.raise_()
else:
self.dockwidget.hide()
def set_ancestor(self, ancestor):
"""
Needed to update the ancestor/parent of child widgets when undocking.
"""
pass
##############################################################################
#
# New API: Migrate plugins one by one and test changes on the way.
#
##############################################################################
class Plugins:
"""
Convenience class for accessing Spyder internal plugins.
"""
Breakpoints = 'breakpoints'
CodeAnalysis = 'code_analysis'
CodeCompletion = 'code_completion'
KiteCompletion = 'kite'
FallBackCompletion = 'fallback'
LanguageServerCompletion = 'lsp'
Console = 'internal_console'
Editor = 'editor'
Explorer = 'explorer'
Find = 'find_in_files'
Help = 'help'
History = 'historylog'
IPythonConsole = 'ipython_console'
OnlineHelp = 'online_help'
OutlineExplorer = 'outline_explorer'
Plots = 'plots'
Profiler = 'profiler'
Projects = 'project_explorer'
Pylint = 'pylint'
VariableExplorer = 'variable_explorer'
WorkingDirectory = 'workingdir'
# --- Base API plugins
# ----------------------------------------------------------------------------
class SpyderPluginV2(QObject, SpyderActionMixin, SpyderOptionMixin):
"""
A Spyder plugin to extend functionality without a dockable widget.
If you want to create a plugin that adds a new pane, please use
SpyderDockableWidget.
"""
# --- API: Mandatory attributes ------------------------------------------
# ------------------------------------------------------------------------
# Name of the plugin that will be used to refer to it.
# This name must be unique and will only be loaded once.
NAME = None
# --- API: Optional attributes ------------------------------------------
# ------------------------------------------------------------------------
# List of required plugin dependencies.
# Example: [Plugins.Plots, Plugins.IPythonConsole, ...].
# These values are defined in the `Plugins` class present in this file.
# If a plugin is using a widget from another plugin, that other
# must be declared as a required dependency.
REQUIRES = None
# List of optional plugin dependencies.
# Example: [Plugins.Plots, Plugins.IPythonConsole, ...].
# These values are defined in the `Plugins` class present in this file.
# A plugin might be performing actions when connectiong to other plugins,
# but the main functionality of the plugin does not depend on other
# plugins. For example, the Help plugin might render information from
# the Editor or from the Console or from another source, but it does not
# depend on either of those plugins.
# Methods in the plugin that make use of optional plugins must check
# existence before using those methods or applying signal connections.
OPTIONAL = None
# This must subclass a `PluginMainContainer` for non dockable plugins that
# create a widget, like a status bar widget, a toolbar, a menu, etc.
# For non dockable plugins that do not define widgets of any kind this can
# be `None`, for example a plugin that only exposes a configuration page.
CONTAINER_CLASS = None
# Name of the configuration section that's going to be
# used to record the plugin's permanent data in Spyder
# config system (i.e. in spyder.ini)
CONF_SECTION = None
# Use a separate configuration file for the plugin.
CONF_FILE = True
# Define configuration defaults if using a separate file.
# List of tuples, with the first item in the tuple being the section
# name and the second item being the default options dictionary.
#
# CONF_DEFAULTS_EXAMPLE = [
# ('section-name', {'option-1': 'some-value',
# 'option-2': True,}),
# ('another-section-name', {'option-3': 'some-other-value',
# 'option-4': [1, 2, 3],}),
# ]
CONF_DEFAULTS = None
# Define configuration version if using a separate file
#
# IMPORTANT NOTES:
# 1. If you want to *change* the default value of a current option, you
# need to do a MINOR update in config version, e.g. from 3.0.0 to 3.1.0
# 2. If you want to *remove* options that are no longer needed or if you
# want to *rename* options, then you need to do a MAJOR update in
# version, e.g. from 3.0.0 to 4.0.0
# 3. You don't need to touch this value if you're just adding a new option
CONF_VERSION = None
# Widget to be used as entry in Spyder Preferences dialog.
CONF_WIDGET_CLASS = None
# Some widgets may use configuration options from other plugins.
# This variable helps translate CONF to options when the option comes
# from another plugin.
# Example:
# CONF_FROM_OPTIONS = {'widget_option': ('section', 'option'), ...}
# See: spyder/plugins/console/plugin.py
CONF_FROM_OPTIONS = None
# Path for images relative to the plugin path
# A Python package can include one or several Spyder plugins. In this case
# the package may be using images from a global folder outside the plugin
# folder
IMG_PATH = 'images'
# Control the font size relative to the global fonts defined in Spyder
FONT_SIZE_DELTA = 0
RICH_FONT_SIZE_DELTA = 0
# --- API: Signals -------------------------------------------------------
# ------------------------------------------------------------------------
# Signals here are automatically connected by the Spyder main window and
# connected to the the respective global actions defined on it.
# Request garbage collection of deleted objects
sig_free_memory_requested = Signal()
# Request the main application to quit.
sig_quit_requested = Signal()
# Request the main application to restart.
sig_restart_requested = Signal()
# Request the main application to display a message in the status bar.
sig_status_message_requested = Signal(str, int)
# Request the main application to redirect standard output/error when
# using Open/Save/Browse dialogs within widgets.
sig_redirect_stdio_requested = Signal(bool)
# Signals below are not automatically connected by the Spyder main window
# Emit this signal when the plugin focus has changed.
sig_focus_changed = Signal()
# This signal is fired when any option in the child widgets is modified.
sig_option_changed = Signal(str, object)
# --- Private attributes -------------------------------------------------
# ------------------------------------------------------------------------
# Define configuration name map for plugin to split configuration
# among several files. See spyder/config/main.py
_CONF_NAME_MAP = None
def __init__(self, parent, configuration=None):
super().__init__(parent)
self._main = parent
self._widget = None
self._conf = configuration
self._plugin_path = os.path.dirname(inspect.getfile(self.__class__))
self._container = None
self._added_toolbars = OrderedDict()
self._actions = {}
self.is_compatible = None
self.is_registered = None
self.main = parent
if self.CONTAINER_CLASS is not None:
options = self.options_from_conf(
self.CONTAINER_CLASS.DEFAULT_OPTIONS)
self._container = container = self.CONTAINER_CLASS(
name=self.NAME,
plugin=self,
parent=parent,
options=options,
)
# Widget setup
# ----------------------------------------------------------------
try:
container._setup(options=options)
except AttributeError:
pass
if isinstance(container, SpyderWidgetMixin):
container.setup(options=options)
container.update_actions()
# Set options without emitting a signal
container.change_options(options=options)
container.sig_option_changed.connect(self.sig_option_changed)
if isinstance(container, PluginMainContainer):
container.sig_redirect_stdio_requested.connect(
self.sig_redirect_stdio_requested)
# --- Private methods ----------------------------------------------------
# ------------------------------------------------------------------------
def _register(self):
"""
Setup and register plugin in Spyder's main window and connect it to
other plugins.
"""
# Checks
# --------------------------------------------------------------------
if self.NAME is None:
raise SpyderAPIError('A Spyder Plugin must define a `NAME`!')
if self.NAME in self._main._PLUGINS:
raise SpyderAPIError(
'A Spyder Plugin with NAME="{}" already exists!'.format(
self.NAME))
# Setup configuration
# --------------------------------------------------------------------
if self._conf is not None:
self._conf.register_plugin(self)
# Signals
# --------------------------------------------------------------------
self.sig_option_changed.connect(self.set_conf_option)
self.is_registered = True
self.update_font()
def _unregister(self):
"""
Disconnect signals and clean up the plugin to be able to stop it while
Spyder is running.
"""
try:
self.sig_option_changed.disconnect()
except TypeError:
pass
if self._conf is not None:
self._conf.unregister_plugin()
self._container = None
self.is_compatible = None
self.is_registered = False
# --- API: available methods ---------------------------------------------
# ------------------------------------------------------------------------
def get_path(self):
"""
Return the plugin's system path.
"""
return self._plugin_path
def get_container(self):
"""
Return the plugin main container.
"""
return self._container
def get_configuration(self):
"""
Return the Spyder configuration object.
"""
return self._conf
def get_main(self):
"""
Return the Spyder main window..
"""
return self._main
def get_plugin(self, plugin_name):
"""
Return a plugin instance by providing the plugin's NAME.
"""
# Ensure that this plugin has the plugin corresponding to
# `plugin_name` listed as required or optional.
requires = self.REQUIRES or []
optional = self.OPTIONAL or []
deps = []
for dependency in requires + optional:
deps.append(dependency)
PLUGINS = self._main._PLUGINS
if plugin_name in PLUGINS:
for name, plugin_instance in PLUGINS.items():
if name == plugin_name and name in deps:
return plugin_instance
else:
raise SpyderAPIError(
'Plugin "{}" not found!'.format(plugin_name))
else:
raise SpyderAPIError(
'Plugin "{}" not part of REQUIRES or '
'OPTIONAL requirements!'.format(plugin_name)
)
def options_from_conf(self, options):
"""
Get `options` values from the configuration system.
Returns
-------
Dictionary of {str: object}
"""
conf_from_options = self.CONF_FROM_OPTIONS or {}
config_options = {}
if self._conf is not None:
# options could be a list, or a dictionary
for option in options:
if option in conf_from_options:
section, new_option = conf_from_options[option]
else:
section, new_option = (self.CONF_SECTION, option)
try:
config_options[option] = self.get_conf_option(
new_option,
section=section,
)
except (cp.NoSectionError, cp.NoOptionError):
# TODO: Remove when migration is done, move to logger.
# Needed to check how the options API needs to cover
# options from all plugins
print('\nspyder.api.plugins.options_from_conf\n'
'Warning: option "{}" not found in section "{}" '
'of configuration!'.format(option, self.NAME))
# Currently when the preferences dialog is used, a set of
# changed options is passed.
# This method can get the values from the DEFAULT_OPTIONS
# of the PluginMainWidget or the PluginMainContainer
# subclass if `options`is a dictionary instead of a set
# of options.
if isinstance(options, (dict, OrderedDict)):
try:
config_options[option] = options[option]
except Exception:
pass
return config_options
def get_conf_option(self, option, default=NoDefault, section=None):
"""
Get an option from Spyder configuration system.
Parameters
----------
option: str
Name of the option to get its value from.
default: bool, int, str, tuple, list, dict, NoDefault
Value to get from the configuration system, passed as a
Python object.
section: str
Section in the configuration system, e.g. `shortcuts`.
Returns
-------
bool, int, str, tuple, list, dict
Value associated with `option`.
"""
if self._conf is not None:
section = self.CONF_SECTION if section is None else section
if section is None:
raise SpyderAPIError(
'A spyder plugin must define a `CONF_SECTION` class '
'attribute!'
)
return self._conf.get(section, option, default)
@Slot(str, object)
@Slot(str, object, str)
def set_conf_option(self, option, value, section=None):
"""
Set an option in Spyder configuration system.
Parameters
----------
option: str
Name of the option (e.g. 'case_sensitive')
value: bool, int, str, tuple, list, dict
Value to save in the configuration system, passed as a
Python object.
section: str
Section in the configuration system, e.g. `shortcuts`.
"""
if self._conf is not None:
section = self.CONF_SECTION if section is None else section
if section is None:
raise SpyderAPIError(
'A spyder plugin must define a `CONF_SECTION` class '
'attribute!'
)
self._conf.set(section, str(option), value)
def apply_conf(self, options_set):
"""
Apply `options_set` to this plugin's widget.
"""
if self._conf is not None:
container = self.get_container()
# The container might not implement the SpyderWidgetMixin API
# for example a completion client that only implements the
# completion client interface without any options.
if isinstance(container, SpyderWidgetMixin):
options = self.options_from_conf(options_set)
new_options = self.options_from_keys(
options,
container.DEFAULT_OPTIONS,
)
# By using change_options we will not emit sig_option_changed
# when setting the options
# This will also cascade on all children
container.change_options(new_options)
@Slot(str)
@Slot(str, int)
def show_status_message(self, message, timeout=0):
"""
Show message in status bar.
Parameters
----------
message: str
Message to display in the status bar.
timeout: int
Amount of time to display the message.
"""
self.sig_status_message_requested.emit(message, timeout)
def before_long_process(self, message):
"""
Show a message in main window's status bar and change the mouse
pointer to Qt.WaitCursor when starting a long process.
Parameters
----------
message: str
Message to show in the status bar when the long process starts.
"""
if message:
self.show_status_message(message)
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
def after_long_process(self, message=""):
"""
Clear main window's status bar after a long process and restore
mouse pointer to the OS deault.
Parameters
----------
message: str
Message to show in the status bar when the long process finishes.
"""
QApplication.restoreOverrideCursor()
self.show_status_message(message, timeout=2000)
QApplication.processEvents()
def get_color_scheme(self):
"""
Get the current color scheme.
Returns
-------
dict
Dictionary with properties and colors of the color scheme
used in the Editor.
Notes
-----
This is useful to set the color scheme of all instances of
CodeEditor used by the plugin.
"""
if self._conf is not None:
return get_color_scheme(self._conf.get('appearance', 'selected'))
@staticmethod
def create_icon(name, path=None):
"""
Provide icons from the theme and icon manager.
"""
return ima.icon(name, icon_path=path)
@classmethod
def get_font(cls, rich_text=False):
"""
Return plain or rich text font used in Spyder.
Parameters
----------
rich_text: bool
Return rich text font (i.e. the one used in the Help pane)
or plain text one (i.e. the one used in the Editor).
Returns
-------
QFont
QFont object to be passed to other Qt widgets.
Notes
-----
All plugins in Spyder use the same, global font. This is a convenience
method in case some plugins want to use a delta size based on the
default one. That can be controlled by using FONT_SIZE_DELTA or
RICH_FONT_SIZE_DELTA (declared in `SpyderPlugin`).
"""
if rich_text:
option = 'rich_font'
font_size_delta = cls.RICH_FONT_SIZE_DELTA
else:
option = 'font'
font_size_delta = cls.FONT_SIZE_DELTA
return get_font(option=option, font_size_delta=font_size_delta)
def get_actions(self):
"""
Return a dictionary of actions exposed by the plugin and child widgets.
It returns all actions defined by the Spyder plugin widget, wheter it
is a PluginMainWidget or PluginMainContainer subclass.
Notes
-----
1. Actions should be created once. Creating new actions on menu popup
is *highly* discouraged.
2. Actions can be created directly on a PluginMainWidget or
PluginMainContainer subclass. Child widgets can also create
actions, but they need to subclass SpyderWidgetMixin.
3. The PluginMainWidget or PluginMainContainer will collect any
actions defined in subwidgets (if defined) and expose them in
the get_actions method at the plugin level.
4. Any action created this way is now exposed as a possible shortcut
automatically without manual shortcut registration.
If an option is found in the config system then it is assigned,
otherwise it's left with an empty shortcut.
5. There is no need to override this method.
"""
container = self.get_container()
actions = container.get_actions() if container is not None else {}
actions.update(super().get_actions())
return actions
def get_action(self, name):
"""
Return action defined in any of the child widgets by name.
"""
container = self.get_container()
if container is not None:
actions = container.get_actions()
if name in actions:
return actions[name]
else:
raise SpyderAPIError('Action "{0}" not found! Available '
'actions are: {1}'.format(name, actions))
# --- API: Mandatory methods to define -----------------------------------
# ------------------------------------------------------------------------
def get_name(self):
"""
Return the plugin localized name.
Returns
-------
str
Localized name of the plugin.
Notes
-----
This is a method to be able to update localization without a restart.
"""
raise NotImplementedError('A plugin name must be defined!')
def get_description(self):
"""
Return the plugin localized description.
Returns
-------
str
Localized description of the plugin.
Notes
-----
This is a method to be able to update localization without a restart.
"""
raise NotImplementedError('A plugin description must be defined!')
def get_icon(self):
"""
Return the plugin associated icon.
Returns
-------
QIcon
QIcon instance
"""
raise NotImplementedError('A plugin icon must be defined!')
def register(self):
"""
Setup and register plugin in Spyder's main window and connect it to
other plugins.
"""
raise NotImplementedError('Must define a register method!')
# --- API: Optional methods to override ----------------------------------
# ------------------------------------------------------------------------
def unregister(self):
"""
Disconnect signals and clean up the plugin to be able to stop it while
Spyder is running.
"""
pass
@staticmethod
def check_compatibility():
"""
This method can be reimplemented to check compatibility of a plugin
with the user's current environment.
Returns
-------
(bool, str)
The first value tells Spyder if the plugin has passed the
compatibility test defined in this method. The second value
is a message that must explain users why the plugin was
found to be incompatible (e.g. 'This plugin does not work
with PyQt4'). It will be shown at startup in a QMessageBox.
"""
valid = True
message = '' # Note: Remeber to use _('') to localize the string
return valid, message
def on_first_registration(self):
"""
Actions to be performed the first time the plugin is started.
It can also be used to perform actions that are needed only the
first time this is loaded after installation.
This method is called after the main window is visible.
"""
pass
def on_mainwindow_visible(self):
"""
Actions to be performed after the main window's has been shown.
"""
pass
def on_close(self, cancelable=False):
"""
Perform actions before the main window is closed.
Returns
-------
bool
Whether the plugin may be closed immediately or not.
Notes
-----
The returned value is ignored if *cancelable* is False.
"""
return True
def update_font(self):
"""
This must be reimplemented by plugins that need to adjust their fonts.
The following plugins illustrate the usage of this method:
* spyder/plugins/help/plugin.py
* spyder/plugins/onlinehelp/plugin.py
"""
pass
def update_style(self):
"""
This must be reimplemented by plugins that need to adjust their style.
Changing from the dark to the light interface theme might
require specific styles or stylesheets to be applied. When
the theme is changed by the user through our Preferences,
this method will be called for all plugins.
"""
pass
# --- API Application Menus
# ------------------------------------------------------------------------
def add_application_menu(self, name, menu):
"""
Add menu to the application.
"""
if name in self._main._APPLICATION_MENUS:
raise SpyderAPIError(
'Menu with name "{}" already added!'.format(name))
self._main._APPLICATION_MENUS[name] = menu
self._main.menuBar().addMenu(menu)
def add_item_to_application_menu(self, item, menu, section=None,
before=None):
"""
Add action or widget `item` to given application menu `section`.
"""
# FIXME: Enable when new API is activated
# Check that menu is an ApplicationMenu
# if not isinstance(menu, ApplicationMenu):
# raise SpyderAPIError('Not an ApplicationMenu!')
# TODO: For now just add the item to the bottom.
# Temporal solution while API for managing app menus is created
app_menu_actions = {
ApplicationMenus.File: self._main.file_menu_actions,
ApplicationMenus.Edit: self._main.edit_menu_actions,
ApplicationMenus.Search: self._main.search_menu_actions,
ApplicationMenus.Source: self._main.source_menu_actions,
ApplicationMenus.Run: self._main.run_menu_actions,
ApplicationMenus.Debug: self._main.debug_menu_actions,
ApplicationMenus.Consoles: self._main.consoles_menu_actions,
ApplicationMenus.Projects: self._main.projects_menu_actions,
ApplicationMenus.Tools: self._main.tools_menu_actions,
# ApplicationMenus.View: self._main.view_menu_actions,
ApplicationMenus.Help: self._main.help_menu_actions,
}
actions = app_menu_actions[menu.name]
actions.append(None)
actions.append(item)
def get_application_menu(self, name):
"""
Return an application menu by name.
"""
# TODO: Temporal solution while API for managing app menus is created
self._main.file_menu.name = ApplicationMenus.File
self._main.edit_menu.name = ApplicationMenus.Edit
self._main.search_menu.name = ApplicationMenus.Search
self._main.source_menu.name = ApplicationMenus.Source
self._main.run_menu.name = ApplicationMenus.Run
self._main.debug_menu.name = ApplicationMenus.Debug
self._main.consoles_menu.name = ApplicationMenus.Consoles
self._main.projects_menu.name = ApplicationMenus.Projects
self._main.tools_menu.name = ApplicationMenus.Tools
self._main.view_menu.name = ApplicationMenus.View
self._main.help_menu.name = ApplicationMenus.Help
app_menus = {
ApplicationMenus.File: self._main.file_menu,
ApplicationMenus.Edit: self._main.edit_menu,
ApplicationMenus.Search: self._main.search_menu,
ApplicationMenus.Source: self._main.source_menu,
ApplicationMenus.Run: self._main.run_menu,
ApplicationMenus.Debug: self._main.debug_menu,
ApplicationMenus.Consoles: self._main.consoles_menu,
ApplicationMenus.Projects: self._main.projects_menu,
ApplicationMenus.Tools: self._main.tools_menu,
ApplicationMenus.View: self._main.view_menu,
ApplicationMenus.Help: self._main.help_menu,
}
if name in app_menus:
return app_menus[name]
else:
raise SpyderAPIError(
'Application menu "{0}" not found! Available '
'menus are: {1}'.format(name, list(app_menus.keys()))
)
# --- API Application Toolbars
# ------------------------------------------------------------------------
def add_application_toolbar(self, name, toolbar):
"""
Add toolbar to application toolbars.
"""
if name in self._main._APPLICATION_TOOLBARS:
raise SpyderAPIError(
'Toolbar with name "{}" already added!'.format(name))
# TODO: Make the icon size adjustable in Preferences later on.
iconsize = 24
toolbar.setIconSize(QSize(iconsize, iconsize))
self._main._APPLICATION_TOOLBARS[name] = toolbar
self._added_toolbars[name] = toolbar
self.main.addToolBar(toolbar)
def add_item_to_application_toolbar(self, item, toolbar, section=None,
before=None):
"""
Add action or widget `item` to given application toolbar section.
"""
if not isinstance(toolbar, ApplicationToolBar):
raise SpyderAPIError('Not an ApplicationMenu!')
toolbar.addAction(item)
def get_application_toolbar(self, name):
"""
Return an application toolbar by name.
"""
# TODO: Temporal solution while API for managing app menus is created
app_toolbars = {
ApplicationToolBars.File: self._main.file_toolbar,
ApplicationToolBars.Run: self._main.run_toolbar,
ApplicationToolBars.Debug: self._main.debug_toolbar,
ApplicationToolBars.Main: self._main.main_toolbar,
ApplicationToolBars.Search: self._main.search_toolbar,
ApplicationToolBars.Edit: self._main.edit_toolbar,
ApplicationToolBars.Source: self._main.source_toolbar,
}
if name in app_toolbars:
return app_toolbars[name]
else:
raise SpyderAPIError(
'Application toolbar "{0}" not found! '
'Available toolbars are: {1}'.format(
name,
list(app_toolbars.keys())
)
)
def get_application_toolbars(self):
"""
Return all created application toolbars.
"""
return self._main._APPLICATION_TOOLBARS
def get_registered_application_toolbars(self):
"""
Return all created application toolbars.
"""
return self._added_toolbars
# --- API Application Status Widgets
# ------------------------------------------------------------------------
def add_application_status_widget(self, name, widget):
"""
Add status widget to main application status bar.
"""
# TODO: Check widget class
# TODO: Check existence
status_bar = self._main.statusBar()
status_bar.insertPermanentWidget(0, widget)
self._main._STATUS_WIDGETS[name] = widget
def get_application_status_widget(self, name):
"""
Return an application status widget by name.
"""
if name in self._main._STATUS_WIDGETS:
return self._main._STATUS_WIDGETS[name]
else:
raise SpyderAPIError('Status widget "{}" not found!'.format(name))
def get_application_status_widgets(self):
"""
Return all application status widgets created.
"""
return self._main._STATUS_WIDGETS
class SpyderDockablePlugin(SpyderPluginV2):
"""
A Spyder plugin to enhance functionality with a dockable widget.
"""
# --- API: Mandatory attributes ------------------------------------------
# ------------------------------------------------------------------------
# This is the main widget of the dockable plugin.
# It needs to be a subclass of PluginMainWidget.
WIDGET_CLASS = None
# --- API: Optional attributes -------------------------------------------
# ------------------------------------------------------------------------
# Define a list of plugins next to which we want to to tabify this plugin.
# Example: ['Plugins.Editor']
TABIFY = [Plugins.Console]
# Disable actions in Spyder main menus when the plugin is not visible
DISABLE_ACTIONS_WHEN_HIDDEN = True
# Raise and focus on switch to plugin calls.
# If False, the widget will be raised but focus will not be given until
# the action to switch is called a second time.
RAISE_AND_FOCUS = False
# --- API: Available signals ---------------------------------------------
# ------------------------------------------------------------------------
# The action that toggles the visibility of a dockable plugin fires
# this signal. This is triggered by checking/unchecking the option for
# a pane in the View menu.
sig_toggle_view_changed = Signal(bool)
# Emit this signal to inform the main window that this plugin requested
# to be displayed. This is automatically connected on plugin
# registration.
sig_switch_to_plugin_requested = Signal(object, bool)
# Inform the main window that a child widget needs its ancestor to be
# updated.
sig_update_ancestor_requested = Signal()
# --- Private methods ----------------------------------------------------
# ------------------------------------------------------------------------
def __init__(self, parent, configuration):
if not issubclass(self.WIDGET_CLASS, PluginMainWidget):
raise SpyderAPIError(
'A SpyderDockablePlugin must define a valid WIDGET_CLASS '
'attribute!')
self.CONTAINER_CLASS = self.WIDGET_CLASS
super().__init__(parent, configuration=configuration)
# Defined on mainwindow.py
self._shortcut = None
# Widget setup
# --------------------------------------------------------------------
self._widget = self._container
widget = self._widget
if widget is None:
raise SpyderAPIError(
'A dockable plugin must define a WIDGET_CLASS!')
if not isinstance(widget, PluginMainWidget):
raise SpyderAPIError(
'The WIDGET_CLASS of a dockable plugin must be a subclass of '
'PluginMainWidget!')
widget.DISABLE_ACTIONS_WHEN_HIDDEN = self.DISABLE_ACTIONS_WHEN_HIDDEN
widget.RAISE_AND_FOCUS = self.RAISE_AND_FOCUS
widget.set_icon(self.get_icon())
widget.set_name(self.NAME)
# TODO: Streamline this by moving to postvisible setup
# Render all toolbars as a final separate step on the main window
# in case some plugins want to extend a toolbar. Since the rendering
# can only be done once!
widget.get_main_toolbar()._render()
for __, toolbars in widget._aux_toolbars.items():
for toolbar in toolbars:
toolbar._render()
# Default Signals
# --------------------------------------------------------------------
widget.sig_toggle_view_changed.connect(self.sig_toggle_view_changed)
widget.sig_update_ancestor_requested.connect(
self.sig_update_ancestor_requested)
# --- API: available methods ---------------------------------------------
# ------------------------------------------------------------------------
def before_long_process(self, message):
"""
Show a message in main window's status bar, change the mouse pointer
to Qt.WaitCursor and start spinner when starting a long process.
Parameters
----------
message: str
Message to show in the status bar when the long process starts.
"""
self.get_widget().start_spinner()
super().before_long_process(message)
def after_long_process(self, message=""):
"""
Clear main window's status bar after a long process, restore mouse
pointer to the OS deault and stop spinner.
Parameters
----------
message: str
Message to show in the status bar when the long process finishes.
"""
super().after_long_process(message)
self.get_widget().stop_spinner()
def get_widget(self):
"""
Return the plugin main widget.
"""
if self._widget is None:
raise SpyderAPIError('Dockable Plugin must have a WIDGET_CLASS!')
return self._widget
def update_title(self):
"""
Update plugin title, i.e. dockwidget or window title.
"""
self.get_widget().update_title()
def update_margins(self, margin):
"""
Update margins of main widget inside dockable plugin.
"""
self.get_widget().update_margins(margin)
def switch_to_plugin(self, force_focus=False):
"""
Switch to plugin and define if focus should be given or not.
"""
self.sig_switch_to_plugin_requested.emit(self, force_focus)
def set_ancestor(self, ancestor_widget):
"""
Update the ancestor/parent of child widgets when undocking.
"""
self.get_widget().set_ancestor(ancestor_widget)
# --- Convenience methods from the widget exposed on the plugin
# ------------------------------------------------------------------------
@property
def dockwidget(self):
return self.get_widget().dockwidget
@property
def options_menu(self):
return self.get_widget().get_options_menu()
@property
def toggle_view_action(self):
return self.get_widget().toggle_view_action
def create_dockwidget(self, mainwindow):
return self.get_widget().create_dockwidget(mainwindow)
def close_window(self):
self.get_widget().close_window()
def change_visibility(self, state, force_focus=False):
self.get_widget().change_visibility(state, force_focus)
def toggle_view(self, value):
self.get_widget().toggle_view(value)
| 34.823931 | 79 | 0.580584 |
6e532740eef8da586e4f483a2640f896e2f64f84 | 4,381 | py | Python | run_model.py | tira-io/LyS-FASTPARSE | 5c148bd146b48c991a9b9c1028c815ef45f09ebb | [
"Apache-2.0"
] | null | null | null | run_model.py | tira-io/LyS-FASTPARSE | 5c148bd146b48c991a9b9c1028c815ef45f09ebb | [
"Apache-2.0"
] | null | null | null | run_model.py | tira-io/LyS-FASTPARSE | 5c148bd146b48c991a9b9c1028c815ef45f09ebb | [
"Apache-2.0"
] | null | null | null |
from argparse import ArgumentParser, Namespace
import codecs
import sys
import pickle
import os
import time
import lysfastparse.utils
import lysfastparse.bcovington.utils_bcovington
import tempfile
import yaml
import subprocess
import lysfastparse.bcovington.covington
parser = ArgumentParser()
parser.add_argument("-p", dest="p",metavar="FILE")
parser.add_argument("-m", dest="m",metavar="FILE")
parser.add_argument("-o", dest="o",metavar="FILE")
parser.add_argument("-epe", dest="epe",metavar="FILE")
parser.add_argument("-efe",dest="efe",metavar="FILE")
parser.add_argument("-ewe",dest="ewe", metavar="FILE")
parser.add_argument("-r", dest="r",help="Input run [raw|conllu]", type=str)
parser.add_argument("-i", dest="i",metavar="FILE")
parser.add_argument("--dynet-mem", dest="dynet_mem", help="It is needed to specify this parameter")
parser.add_argument("-udpipe_bin", dest="udpipe_bin",metavar="FILE")
parser.add_argument("-udpipe_model", dest="udpipe_model",metavar="FILE")
args = parser.parse_args()
print "args (run_model.py)",args
path_params = args.p
path_model = args.m
path_outfile = args.o
path_embeddings = args.ewe
path_pos_embeddings = args.epe
path_feats_embeddings = args.efe
type_text = args.r
path_input = args.i
valid_content = False
if type_text == "conllu" and os.path.exists(path_model):
with codecs.open(path_input) as f:
f_temp = tempfile.NamedTemporaryFile("w", delete=False)
f_temp.write(f.read())
f_temp.close()
valid_content = True
elif type_text == "raw" and os.path.exists(path_model):
pipe = lysfastparse.utils.UDPipe(args.udpipe_model, args.udpipe_bin) #config[YAML_UDPIPE])
raw_content = lysfastparse.utils.read_raw_file(path_input)
conllu = pipe.run(raw_content, options=" --tokenize --tag")
f_temp = tempfile.NamedTemporaryFile("w", delete=False)
f_temp.write(conllu)
f_temp.close()
valid_content = True
else:
raise NotImplementedError
if valid_content == True:
#TEST PHASE
with codecs.open(path_params, 'r') as paramsfp:
aux = pickle.load(paramsfp)
words, w2i, lemmas, l2i, cpos , pos, feats, rels, stored_opt = aux
d = vars(stored_opt)
print "d before",d
print
d["external_embedding"] = None if d["external_embedding"] =="None" else path_embeddings #os.sep.join([args.e,"FB_embeddings","wiki."+metadata[LTCODE]+".vec"])
d["pos_external_embedding"] = None if d["pos_external_embedding"] =="None" else path_pos_embeddings #os.sep.join([args.e,"UD_POS_embeddings",metadata[NAME_TREEBANK]])
d["feats_external_embedding"] = None if d["feats_external_embedding"] =="None" else path_feats_embeddings #os.sep.join([args.e,"UD_FEATS_embeddings",metadata[NAME_TREEBANK]])
d["lemmas_external_embedding"] = None
print "pos_external_embeddings", d["pos_external_embedding"]
print "feats_external_embeddings", d["feats_external_embedding"]
print "external_embedding", d["external_embedding"]
stored_opt =Namespace(**d)
print "Running model with this configuration", stored_opt
parser = lysfastparse.bcovington.covington.CovingtonBILSTM(words, lemmas, cpos, pos, feats, rels, w2i, l2i, stored_opt,
None)
parser.Load(path_model)
with codecs.open(f_temp.name) as f_temp:
lookup_conll_data = lysfastparse.utils.lookup_conll_extra_data(f_temp)
testpath = f_temp.name
ts = time.time()
pred = list(parser.Predict(testpath))
te = time.time()
print "Took "+str(te - ts)+" seconds"
lysfastparse.bcovington.utils_bcovington.write_conll(testpath, pred)
lysfastparse.utils.dump_lookup_extra_into_conll(testpath, lookup_conll_data)
lysfastparse.utils.transform_to_single_root(testpath)
with codecs.open(path_outfile,"w") as f_out:
with codecs.open(f_temp.name) as f_out_aux:
f_out.write(f_out_aux.read())
os.unlink(f_temp.name) | 35.909836 | 178 | 0.647341 |
19c9d9c6dfa0f9005eec328dfbc0a3eb0f4d50f0 | 229 | py | Python | api_crud/urls.py | TeunSpithoven/NinjaGame-backend | 9b3cf3d87593e0b3fe9956f85ca5633fb08d63af | [
"MIT"
] | null | null | null | api_crud/urls.py | TeunSpithoven/NinjaGame-backend | 9b3cf3d87593e0b3fe9956f85ca5633fb08d63af | [
"MIT"
] | null | null | null | api_crud/urls.py | TeunSpithoven/NinjaGame-backend | 9b3cf3d87593e0b3fe9956f85ca5633fb08d63af | [
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import include, path
# urls
urlpatterns = [
path('games/', include('games.urls')),
path('auth/', include('authentication.urls')),
path('admin/', admin.site.urls),
]
| 20.818182 | 50 | 0.672489 |
776ffe7d9b1aabfb20cef0c02100b0cd6f457a57 | 20 | py | Python | cobiv/modules/core/imageset/__init__.py | gokudomatic/cobiv | c095eda704fab319fccc04d43d8099f1e8327734 | [
"MIT"
] | 4 | 2017-12-26T07:19:46.000Z | 2019-09-20T08:27:58.000Z | cobiv/modules/core/imageset/__init__.py | gokudomatic/cobiv | c095eda704fab319fccc04d43d8099f1e8327734 | [
"MIT"
] | 4 | 2017-10-01T12:18:43.000Z | 2019-06-09T10:29:03.000Z | cobiv/modules/core/imageset/__init__.py | gokudomatic/cobiv | c095eda704fab319fccc04d43d8099f1e8327734 | [
"MIT"
] | 1 | 2019-01-07T19:58:00.000Z | 2019-01-07T19:58:00.000Z | __all__=["ImageSet"] | 20 | 20 | 0.75 |
15abf5a87ac8cf0a32c9615268c67a292de267bc | 379 | py | Python | home/migrations/0002_auto_20210320_2138.py | MatheusAfinovicz/MathsBurguer | 82b8cd1499989ac46bb341296bfdd7f432ae5481 | [
"MIT"
] | null | null | null | home/migrations/0002_auto_20210320_2138.py | MatheusAfinovicz/MathsBurguer | 82b8cd1499989ac46bb341296bfdd7f432ae5481 | [
"MIT"
] | null | null | null | home/migrations/0002_auto_20210320_2138.py | MatheusAfinovicz/MathsBurguer | 82b8cd1499989ac46bb341296bfdd7f432ae5481 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-21 00:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='messagebox',
name='message',
field=models.TextField(max_length=1500),
),
]
| 19.947368 | 52 | 0.591029 |
3fca61f7458a20f0cc657e059a14392b21971439 | 830 | py | Python | wb/main/environment/manifest/__init__.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 23 | 2022-03-17T12:24:09.000Z | 2022-03-31T09:13:30.000Z | wb/main/environment/manifest/__init__.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 18 | 2022-03-21T08:17:44.000Z | 2022-03-30T12:42:30.000Z | wb/main/environment/manifest/__init__.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 16 | 2022-03-17T12:24:14.000Z | 2022-03-31T12:15:12.000Z | """
OpenVINO DL Workbench
Modules for manipulating with manifest
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from wb.main.environment.manifest.dumper import ManifestDumper
from wb.main.environment.manifest.manifest import Manifest
from wb.main.environment.manifest.factory import ManifestFactory
| 41.5 | 73 | 0.79759 |
3393383f642b04de44a4e21fc9b07efd53ba87ee | 2,080 | py | Python | iRONS/Software/day2week2month.py | csiro-hydrology/iRONS | 022182d85ed2e7799e5d65bb5b9ed04f91da7526 | [
"MIT"
] | 12 | 2020-12-15T23:39:26.000Z | 2021-10-30T18:48:35.000Z | iRONS/Software/day2week2month.py | csiro-hydrology/iRONS | 022182d85ed2e7799e5d65bb5b9ed04f91da7526 | [
"MIT"
] | 1 | 2021-05-12T08:34:06.000Z | 2021-06-11T10:11:15.000Z | iRONS/Software/day2week2month.py | csiro-hydrology/iRONS | 022182d85ed2e7799e5d65bb5b9ed04f91da7526 | [
"MIT"
] | 5 | 2020-09-12T14:38:46.000Z | 2021-07-17T15:56:36.000Z | # -*- coding: utf-8 -*-
"""
This is a function to transform daily data into weekly
This module is part of the iRONS toolbox by A. Peñuela and F. Pianosi and at
Bristol University (2020).
Licence: MIT
"""
import numpy as np
import pandas as pd
from datetime import timedelta
def day2week(dates,data,date_ini=None,date_end=None):
if data.ndim == 1:
data = data.reshape([data.shape[0],1])
delta = 7 # days of a week
# Initial day
if date_ini==None:
date_ini = dates[0]
else:
if (dates[0]-date_ini).days > 0:
raise Exception('Error: The defined initial date is not within the data period, please try with a date equal or later than '+str(dates[-1]))
# # Day of the week of the initial day (Monday = 0,..., Sunday = 6)
# wday0 = date_ini.weekday()
# # We define the inital date according to the day of the week we would like to start with, in this case Monday
# if wday0 != 0:
# date_ini = date_ini + timedelta(days = 7-wday0)
# # Now we get the final date
if date_end==None:
date_end = dates[-1]
else:
if (date_end-dates[-1]).days > 0:
raise Exception('Error: The defined end date is not within the data period, please try with a date equal or earlier than '+str(dates[-1]))
N = (date_end - date_ini).days//7 # number of entire weeks
date_end = date_ini + timedelta(days = N*7) # day_ini + horizon weeks * 7 days/week
index_ini = np.where(dates==date_ini)[0][0]
dates_week = dates[index_ini]
data_week = [np.zeros(np.shape(data)[1])]
data_cum_week = [np.zeros(np.shape(data)[1])]
for i in np.arange(N)+1:
dates_week = np.append(dates_week,[dates[index_ini+i*delta]])
data_week = np.append(data_week,[np.sum(data[index_ini+np.max([i-1,0])*delta:index_ini+i*delta,:],axis =0)],axis = 0)
data_cum_week = np.append(data_cum_week,[np.sum(data[index_ini:index_ini+i*delta,:],axis =0)],axis = 0)
dates_week = pd.to_datetime(dates_week)
return dates_week,data_week,data_cum_week | 38.518519 | 152 | 0.647596 |
18d72031e09d563514ea1fa41582dcb1f723387d | 13,877 | py | Python | nursereg/tasks.py | praekeltfoundation/ndoh-control | 56385edfcea58385efe4f7a0e203c0076e7bac9c | [
"BSD-3-Clause"
] | null | null | null | nursereg/tasks.py | praekeltfoundation/ndoh-control | 56385edfcea58385efe4f7a0e203c0076e7bac9c | [
"BSD-3-Clause"
] | 101 | 2015-01-15T14:01:29.000Z | 2016-10-03T15:21:53.000Z | nursereg/tasks.py | praekeltfoundation/ndoh-control | 56385edfcea58385efe4f7a0e203c0076e7bac9c | [
"BSD-3-Clause"
] | null | null | null | import requests
import json
from requests.exceptions import HTTPError
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from celery import task
from celery.utils.log import get_task_logger
from celery.exceptions import SoftTimeLimitExceeded
from djcelery.models import PeriodicTask
from go_http import HttpApiSender
from go_http.contacts import ContactsApiClient
from .models import NurseReg
from subscription.models import Subscription, MessageSet
logger = get_task_logger(__name__)
def get_registrant_id(id_type, id_no=None, passport_origin=None, msisdn=None):
if id_type == 'sa_id':
return id_no + "^^^ZAF^NI"
elif id_type == 'passport':
return id_no + '^^^' + passport_origin.upper() + '^PPN'
else:
return msisdn.replace('+', '') + '^^^ZAF^TEL'
def get_subscription_type(authority):
authority_map = {
'personal': 1,
'chw': 2,
'clinic': 3
}
return authority_map[authority]
def get_today():
return datetime.today()
def get_timestamp():
return get_today().strftime("%Y%m%d%H%M%S")
def get_dob(mom_dob):
if mom_dob is not None:
return mom_dob.strftime("%Y%m%d")
else:
return None
def get_persal(persal):
if persal is not None:
return str(persal)
else:
return None
def get_sanc(sanc):
if sanc is not None:
return str(sanc)
else:
return None
def get_sender():
sender = HttpApiSender(
account_key=settings.VUMI_GO_ACCOUNT_KEY,
conversation_key=settings.VUMI_GO_CONVERSATION_KEY,
conversation_token=settings.VUMI_GO_ACCOUNT_TOKEN
)
return sender
def build_jembi_json(nursereg):
""" Compile json to be sent to Jembi. """
json_template = {
"mha": 1,
"swt": 3,
"type": 7,
"dmsisdn": nursereg.dmsisdn,
"cmsisdn": nursereg.cmsisdn,
"rmsisdn": nursereg.rmsisdn,
"faccode": nursereg.faccode,
"id": get_registrant_id(
nursereg.id_type, nursereg.id_no, nursereg.passport_origin,
nursereg.cmsisdn),
"dob": get_dob(nursereg.dob),
"persal": get_persal(nursereg.persal_no),
"sanc": get_sanc(nursereg.sanc_reg_no),
"encdate": get_timestamp()
}
return json_template
@task(time_limit=10, ignore_result=True)
def jembi_post_json(nursereg_id, sender=None):
""" Task to send nurse registrations Json to Jembi"""
logger.info("Compiling Jembi Json data")
try:
nursereg = NurseReg.objects.get(pk=nursereg_id)
json_doc = build_jembi_json(nursereg)
try:
result = requests.post(
"%s/nc/subscription" % settings.JEMBI_BASE_URL, # url
headers={'Content-Type': 'application/json'},
data=json.dumps(json_doc),
auth=(settings.JEMBI_USERNAME, settings.JEMBI_PASSWORD),
verify=False
)
result.raise_for_status()
vumi_fire_metric.apply_async(
kwargs={
"metric": u"%s.%s.sum.json_to_jembi_success" % (
settings.METRIC_ENV, 'nursereg'),
"value": 1,
"agg": "sum",
"sender": sender}
)
except HTTPError as e:
# retry message sending if in 500 range (3 default retries)
if 500 < e.response.status_code < 599:
if jembi_post_json.max_retries == \
jembi_post_json.request.retries:
vumi_fire_metric.apply_async(
kwargs={
"metric": u"%s.%s.sum.json_to_jembi_fail" % (
settings.METRIC_ENV, 'nursereg'),
"value": 1,
"agg": "sum",
"sender": None}
)
raise jembi_post_json.retry(exc=e)
else:
vumi_fire_metric.apply_async(
kwargs={
"metric": u"%s.%s.sum.json_to_jembi_fail" % (
settings.METRIC_ENV, 'nursereg'),
"value": 1,
"agg": "sum",
"sender": None}
)
raise e
except:
logger.error('Problem posting JSON to Jembi', exc_info=True)
return result.text
except ObjectDoesNotExist:
logger.error('Missing NurseReg object', exc_info=True)
except SoftTimeLimitExceeded:
logger.error(
'Soft time limit exceeded processing Jembi send via Celery.',
exc_info=True)
def get_client():
return ContactsApiClient(auth_token=settings.VUMI_GO_API_TOKEN)
def define_extras_subscription(_extras, subscription):
# Set up the new extras
_extras[u"nc_subscription_type"] = str(subscription.message_set.id)
_extras[u"nc_subscription_rate"] = str(subscription.schedule.id)
_extras[u"nc_subscription_seq_start"] = str(
subscription.next_sequence_number)
return _extras
def define_extras_registration(_extras, nursereg):
# Set up the new extras
_extras[u"nc_source_name"] = nursereg.nurse_source.name
_extras[u"nc_last_reg_id"] = str(nursereg.id)
# Duplication of JS extras required for external nurseregs
_extras[u"nc_faccode"] = nursereg.faccode
_extras[u"nc_is_registered"] = "true"
if nursereg.id_type == "sa_id":
_extras[u"nc_id_type"] = nursereg.id_type
_extras[u"nc_sa_id_no"] = nursereg.id_no
_extras[u"nc_dob"] = nursereg.dob.strftime("%Y-%m-%d")
elif nursereg.id_type == "passport":
_extras[u"nc_id_type"] = nursereg.id_type
_extras[u"nc_passport_num"] = nursereg.id_no
_extras[u"nc_passport_country"] = nursereg.passport_origin
_extras[u"nc_dob"] = nursereg.dob.strftime("%Y-%m-%d")
if nursereg.cmsisdn != nursereg.dmsisdn:
_extras[u"nc_registered_by"] = nursereg.dmsisdn
if nursereg.persal_no:
_extras[u"nc_persal"] = str(nursereg.persal_no)
if nursereg.sanc_reg_no:
_extras[u"nc_sanc"] = str(nursereg.sanc_reg_no)
return _extras
def update_contact_registration(contact, nursereg, client):
# Setup new values - only extras need updating
existing_extras = contact["extra"]
_extras = define_extras_registration(existing_extras, nursereg)
update_data = {u"extra": _extras}
return client.update_contact(contact["key"], update_data)
def update_contact_subscription(contact, subscription, client):
# Setup new values - only extras need updating
existing_extras = contact["extra"]
_extras = define_extras_subscription(existing_extras, subscription)
update_data = {u"extra": _extras}
return client.update_contact(contact["key"], update_data)
def get_subscription_details():
msg_set = "nurseconnect"
sub_rate = "three_per_week"
seq_start = 1
return msg_set, sub_rate, seq_start
def create_subscription(contact, sender=None):
""" Create new Control messaging subscription"""
logger.info("Creating new Control messaging subscription")
try:
sub_details = get_subscription_details()
subscription = Subscription(
contact_key=contact["key"],
to_addr=contact["msisdn"],
user_account=contact["user_account"],
lang="en",
message_set=MessageSet.objects.get(short_name=sub_details[0]),
schedule=PeriodicTask.objects.get(
id=settings.SUBSCRIPTION_RATES[sub_details[1]]),
next_sequence_number=sub_details[2],
)
subscription.save()
logger.info("Created subscription for %s" % subscription.to_addr)
vumi_fire_metric.apply_async(
kwargs={
"metric": u"%s.sum.nc_subscriptions" % (
settings.METRIC_ENV),
"value": 1,
"agg": "sum",
"sender": sender}
)
vumi_fire_metric.apply_async(
kwargs={
"metric": u"%s.%s.sum.nc_subscription_to_protocol_success" % (
settings.METRIC_ENV, "nurseconnect"),
"value": 1,
"agg": "sum",
"sender": sender}
)
return subscription
except:
vumi_fire_metric.apply_async(
kwargs={
"metric": u"%s.%s.sum.nc_subscription_to_protocol_fail" % (
settings.METRIC_ENV, "nurseconnect"),
"value": 1,
"agg": "sum",
"sender": sender}
)
logger.error(
'Error creating Subscription instance',
exc_info=True)
def transfer_subscription(contact, subscription):
# activate the same subscription on the new msisdn
new_sub = Subscription(
contact_key=contact["key"],
to_addr=contact["msisdn"],
user_account=contact["user_account"],
lang="en",
message_set=subscription.message_set,
schedule=subscription.schedule,
next_sequence_number=subscription.next_sequence_number)
new_sub.save()
# deactivate active subscriptions for rmsisdn
subscription.active = False
subscription.save()
# TODO #123: Clear extras for old contact for external change requests
return new_sub
def create_contact(nursereg, client):
contact_data = {
u"msisdn": nursereg.cmsisdn
}
_extras = define_extras_registration({}, nursereg)
contact_data[u"extra"] = _extras
return client.create_contact(contact_data)
@task(time_limit=10, ignore_result=True)
def update_create_vumi_contact(nursereg_id, client=None, sender=None):
""" Task to update or create a Vumi contact when a nurse
registration is created.
Creates a nurseconnect subscription for the contact.
"""
logger.info("Creating / Updating Contact")
try:
if client is None:
client = get_client()
# Load the nursereg
try:
nursereg = NurseReg.objects.get(pk=nursereg_id)
try:
# Get and update the contact if it exists
contact = client.get_contact(
msisdn=nursereg.cmsisdn)
logger.info("Contact exists - updating contact")
contact = update_contact_registration(
contact, nursereg, client)
# This exception should rather look for a 404 if the contact is
# not found, but currently a 400 Bad Request is returned.
except HTTPError as e:
if e.response.status_code == 400:
# Create the contact as it doesn't exist
logger.info("Contact doesn't exist - creating new contact")
contact = create_contact(nursereg, client)
elif 500 < e.response.status_code < 599:
# Retry task if 500 error
raise update_create_vumi_contact.retry(exc=e)
else:
raise e
except:
logger.error('Problem contacting http_api', exc_info=True)
# Warning: This only caters for singular messageset 'nurseconnect'
cmsisdn_active_subs = Subscription.objects.filter(
to_addr=nursereg.cmsisdn, active=True,
message_set__short_name="nurseconnect")
if cmsisdn_active_subs.count() > 0:
# Do nothing if the cmsisdn has an active subscription
return contact
else:
try:
# Get the old contact active subscription
rmsisdn_active_sub = Subscription.objects.get(
to_addr=nursereg.rmsisdn, active=True,
message_set__short_name="nurseconnect")
subscription = transfer_subscription(
contact, rmsisdn_active_sub)
except ObjectDoesNotExist:
# Create new subscription for the contact
subscription = create_subscription(contact, sender)
# Update the contact with subscription details
updated_contact = update_contact_subscription(
contact, subscription, client)
return updated_contact
except ObjectDoesNotExist:
logger.error('Missing NurseReg object', exc_info=True)
except SoftTimeLimitExceeded:
logger.error(
'Soft time limit exceeded processing Jembi send via Celery.',
exc_info=True)
@task(time_limit=10, ignore_result=True)
def fire_new_clinic_metric(client=None, sender=None):
""" Task to increment the unique clinic nurse registrations metric.
"""
logger.info("Firing metric")
try:
if client is None:
client = get_client()
vumi_fire_metric.apply_async(kwargs={
"metric": u"%s.nurseconnect.unique.clinics" % (
settings.METRIC_ENV),
"value": 1, "agg": "sum", "sender": sender}
)
except SoftTimeLimitExceeded:
logger.error(
'Soft time limit exceeded processing Jembi send via Celery.',
exc_info=True)
return
@task(ignore_result=True)
def vumi_fire_metric(metric, value, agg, sender=None):
try:
if sender is None:
sender = get_sender()
sender.fire_metric(metric, value, agg=agg)
return sender
except SoftTimeLimitExceeded:
logger.error(
'Soft time limit exceed processing metric fire to Vumi HTTP API '
'via Celery',
exc_info=True)
| 33.76399 | 79 | 0.60467 |
bc9f42616f0a016757d35f475b9f9dc2de759866 | 2,056 | py | Python | reversible2/classifier.py | robintibor/reversible2 | e6fea33ba41c7f76ee50295329b4ef27b879a7fa | [
"MIT"
] | null | null | null | reversible2/classifier.py | robintibor/reversible2 | e6fea33ba41c7f76ee50295329b4ef27b879a7fa | [
"MIT"
] | null | null | null | reversible2/classifier.py | robintibor/reversible2 | e6fea33ba41c7f76ee50295329b4ef27b879a7fa | [
"MIT"
] | null | null | null | import torch as th
from torch import nn
import numpy as np
from reversible2.sliced import norm_and_var_directions, sample_directions
from reversible2.gaussian import transform_gaussians_by_dirs
from reversible2.gaussian import get_gaussian_log_probs
class SubspaceClassifier(nn.Module):
def __init__(self, n_classes, n_directions, n_dims):
super(SubspaceClassifier, self).__init__()
self.means = th.nn.Parameter(th.zeros(n_classes, n_directions))
self.log_stds = th.nn.Parameter(th.zeros(n_classes, n_directions))
classifier_dirs = sample_directions(
np.prod(n_dims),
orthogonalize=True, cuda=True)[:n_directions]
self.classifier_dirs = th.nn.Parameter(classifier_dirs)
def get_log_probs(self, outs):
outs_for_clf = self.project_outs(outs, detach_dirs=False)
return self.get_log_probs_projected(outs_for_clf)
def log_softmax_from_projected(self, outs_for_clf):
log_probs = self.get_log_probs_projected(outs_for_clf)
return th.nn.functional.log_softmax(log_probs, dim=-1)
def get_log_probs_projected(self, outs_for_clf):
log_probs_per_class = []
for i_class in range(len(self.means)):
log_probs = get_gaussian_log_probs(self.means[i_class],
self.log_stds[i_class],
outs_for_clf)
log_probs_per_class.append(log_probs)
return th.stack(log_probs_per_class, dim=-1)
def forward(self, outs):
log_probs = self.get_log_probs(outs)
return th.nn.functional.log_softmax(log_probs, dim=-1)
def project_outs(self, outs, detach_dirs):
normed_dirs = norm_and_var_directions(self.classifier_dirs)
if detach_dirs:
normed_dirs = normed_dirs.detach()
projected_outs = th.mm(outs, normed_dirs.t(), )
return projected_outs
def get_dirs(self):
normed_dirs = norm_and_var_directions(self.classifier_dirs)
return normed_dirs
| 37.381818 | 74 | 0.686284 |
4eb114453870f9471d4131fcff1d81163c7e7e95 | 4,424 | py | Python | HelmholtzAI/benchmarks/implementations/utils/summarize_data.py | mlcommons/hpc_results_v1.0 | a3f7469937aa44a48e186160a2e97464970cf72f | [
"Apache-2.0"
] | 3 | 2021-11-18T20:01:35.000Z | 2021-12-17T17:47:23.000Z | HelmholtzAI/benchmarks/implementations/utils/summarize_data.py | mlcommons/hpc_results_v1.0 | a3f7469937aa44a48e186160a2e97464970cf72f | [
"Apache-2.0"
] | 1 | 2022-03-16T07:29:30.000Z | 2022-03-31T10:19:07.000Z | HelmholtzAI/benchmarks/implementations/utils/summarize_data.py | mlcommons/hpc_results_v1.0 | a3f7469937aa44a48e186160a2e97464970cf72f | [
"Apache-2.0"
] | 1 | 2021-11-18T01:53:25.000Z | 2021-11-18T01:53:25.000Z | # The MIT License (MIT)
#
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import numpy as np
import h5py as h5
from mpi4py import MPI
#merge function helpers
def merge_all_token(token, comm):
#first, allreduce the counts
n = token[0]
nres = comm.allreduce(token[0])
weight = float(n)/float(nres)
dmeanres = comm.allreduce(weight*token[1], op = MPI.SUM)
dsqmeanres = comm.allreduce(weight*token[2], op = MPI.SUM)
#these guys require a custom reduction because there is no elemwise mean
#so lets just gather them
#min
token_all = comm.allgather(token[3])
dminres = token_all[0]
for tk in token_all[1:]:
dminres = np.minimum(dminres, tk)
#max
token_all = comm.allgather(token[4])
dmaxres = token_all[0]
for tk in token_all[1:]:
dmaxres = np.maximum(dmaxres, tk)
return (nres, dmeanres, dsqmeanres, dminres, dmaxres)
def merge_token(token1, token2):
#extract data
#first
n1 = token1[0]
dmean1 = token1[1]
dsqmean1 = token1[2]
dmin1 = token1[3]
dmax1 = token1[4]
#second
n2 = token2[0]
dmean2 = token2[1]
dsqmean2 = token2[2]
dmin2 = token2[3]
dmax2 = token2[4]
#create new token
nres = n1 + n2
dmeanres = float(n1)/float(nres)*dmean1 + float(n2)/float(nres)*dmean2
dsqmeanres = float(n1)/float(nres)*dsqmean1 + float(n2)/float(nres)*dsqmean2
dminres = np.minimum(dmin1, dmin2)
dmaxres = np.maximum(dmax1, dmax2)
return (nres, dmeanres, dsqmeanres, dminres, dmaxres)
#create data token
def create_token(filename, data_format="nchw", rank = 0):
try:
with h5.File(filename, "r") as f:
arr = f["climate/data"][...]
except:
raise IOError("Cannot open file {} on rank {}".format(filename, rank))
#prep axis for ops
axis = (1,2) if data_format == "nchw" else (0,1)
#how many samples do we have: just 1 here
n = 1
#compute stats
mean = np.mean(arr, axis=axis)
meansq = np.mean(np.square(arr), axis=axis)
minimum = np.amin(arr, axis=axis)
maximum = np.amax(arr, axis=axis)
#result
result = (n, mean, meansq, minimum, maximum)
return result
#global parameters
overwrite = False
data_format = "nhwc"
data_path_prefix = "/data"
#MPI
comm = MPI.COMM_WORLD.Dup()
comm_rank = comm.rank
comm_size = comm.size
#root path
root = os.path.join( data_path_prefix, "train" )
#get files
allfiles = [ os.path.join(root, x) for x in os.listdir(root) \
if x.endswith('.h5') and x.startswith('data-') ]
#split list
numfiles = len(allfiles)
chunksize = int(np.ceil(numfiles / comm_size))
start = chunksize * comm_rank
end = min([start + chunksize, numfiles])
files = allfiles[start:end]
#get first token and then merge recursively
token = create_token(files[0], data_format)
for filename in files[1:]:
token = merge_token(create_token(filename, data_format, comm_rank), token)
#communicate results
token = merge_all_token(token, comm)
#write file on rank 0
if comm_rank == 0:
#save the stuff
with h5.File(os.path.join(data_path_prefix, "../stats.h5"), "w") as f:
f["climate/count"]=token[0]
f["climate/mean"]=token[1]
f["climate/sqmean"]=token[2]
f["climate/minval"]=token[3]
f["climate/maxval"]=token[4]
| 30.095238 | 82 | 0.680606 |
f1610878338446cf982cc551c8639f569b0414b4 | 472 | py | Python | panifex/__init__.py | lainproliant/panifex | 97782309ab669ecc5528e69e0bc76a8e65bc19f3 | [
"MIT"
] | null | null | null | panifex/__init__.py | lainproliant/panifex | 97782309ab669ecc5528e69e0bc76a8e65bc19f3 | [
"MIT"
] | null | null | null | panifex/__init__.py | lainproliant/panifex | 97782309ab669ecc5528e69e0bc76a8e65bc19f3 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------
# Panifex: The Python dependency-injection based build system.
#
# Author: Lain Musgrove (lainproliant)
# Date: Thursday, January 2 2020
#
# Released under a 3-clause BSD license, see LICENSE for more info.
# -------------------------------------------------------------------
from .build import build, default, provide, target, seq, keep, noclean
from .shell import sh, ShellReport
temp = build.temp
| 36.307692 | 70 | 0.533898 |
19f09d97a43d009b58272323808cf4c102a7f0a8 | 2,563 | py | Python | dcmetro.py | mkaiser101/DCfoodtrucks | 955d8c746c8eee53801a073a1922f7c2a0fc7848 | [
"Apache-2.0"
] | null | null | null | dcmetro.py | mkaiser101/DCfoodtrucks | 955d8c746c8eee53801a073a1922f7c2a0fc7848 | [
"Apache-2.0"
] | null | null | null | dcmetro.py | mkaiser101/DCfoodtrucks | 955d8c746c8eee53801a073a1922f7c2a0fc7848 | [
"Apache-2.0"
] | null | null | null | ########### Python 3.2 #############
import http.client, urllib.request, urllib.parse, urllib.error, base64, requests, pickle, json
class metro_client(object):
def __init__(self):
self.session = requests.Session()
def _handle_creds(self):
'''Reusable credential handler
Arguments:
self - instance reference
'''
with open("metro_creds.bin", 'rb') as readfile:
auth_list = pickle.load(readfile)
primary_key = base64.decodebytes(auth_list[0])
return primary_key
def build_params(self):
payload = []
auth = self._handle_creds()
headers = {
# Request headers
'api_key': auth,
}
red_params = urllib.parse.urlencode({
# Request parameters
'LineCode': 'RD',
})
yellow_params = urllib.parse.urlencode({
# Request parameters
'LineCode': 'YL',
})
green_params = urllib.parse.urlencode({
# Request parameters
'LineCode': 'GR',
})
blue_params = urllib.parse.urlencode({
# Request parameters
'LineCode': 'BL',
})
orange_params = urllib.parse.urlencode({
# Request parameters
'LineCode': 'OR',
})
silver_params = urllib.parse.urlencode({
# Request parameters
'LineCode': 'SV',
})
payloads = []
params = [red_params, yellow_params, green_params, blue_params, orange_params, silver_params]
for param in params:
try:
conn = http.client.HTTPSConnection('api.wmata.com')
conn.request("GET", "/Rail.svc/json/jStations?%s" % param, "{body}", headers)
response = conn.getresponse()
data = response.read()
jsonResponse = json.loads(data.decode('utf-8'))
stations = jsonResponse.get('Stations')
for i in stations:
lat = i.get('Lat')
lon = i.get('Lon')
station_name = i.get('Name')
payload = [station_name, lat, lon]
payloads.append(payload)
conn.close()
except requests.exceptions.HTTPError as e:
logging.error("HTTP Error:" + str(e))
return False
return payloads
def main():
client = metro_client()
print(client.build_params())
if __name__ == "__main__":
main()
| 30.511905 | 101 | 0.521654 |
327fdbc02590f56e58325c21f715f374f8d73343 | 8,802 | py | Python | imperfecto/demos/regret_matching_demo.py | vlongle/Imperfecto | 0d9c75f1238b55e70533355e2cdef2f166e70f89 | [
"MIT"
] | 4 | 2022-03-05T01:25:31.000Z | 2022-03-08T07:52:28.000Z | imperfecto/demos/regret_matching_demo.py | vlongle/Imperfecto | 0d9c75f1238b55e70533355e2cdef2f166e70f89 | [
"MIT"
] | null | null | null | imperfecto/demos/regret_matching_demo.py | vlongle/Imperfecto | 0d9c75f1238b55e70533355e2cdef2f166e70f89 | [
"MIT"
] | null | null | null | """A demo for the regret-matching algorithm (Hart and Mas-Colell 2000) for various
N-player normal form games.
For 2-player zero-sum game, regret matching algorithm's average strategy provably converges to Nash.
However, it seems to work for more than 2-player games as well.
Usage:
Run::
$ python3 imperfecto/demos/regret_matching_demo.py --help
to print the available options.
The regret-matching algorithm for normal-form games here is equivalent to external sampling CFR (I think).
"""
import logging
from pprint import pprint
from typing import Type
import click
import numpy as np
from imperfecto.algos.regret_matching import RegretMatchingPlayer
from imperfecto.games.bar_crowding import BarCrowdingGame
from imperfecto.games.game import ExtensiveFormGame
from imperfecto.games.prisoner_dilemma import PrisonerDilemmaGame
from imperfecto.games.rock_paper_scissor import (
AsymmetricRockPaperScissorGame,
RockPaperScissorGame,
)
from imperfecto.misc.evaluate import evaluate_strategies
from imperfecto.misc.trainer import NormalFormTrainer
from imperfecto.misc.utils import run_web
def generate_random_prob_dist(n_actions: int) -> np.ndarray:
"""Generate a random probability distribution for a game.
Args:
n_actions: The number of actions in the game.
Returns:
A numpy array of shape (n_actions,).
"""
return np.random.dirichlet(np.ones(n_actions), size=1)[0]
def verify_nash_strategy(Game: Type[ExtensiveFormGame], nash_strategy: np.ndarray,
n_iters: int = 10000, n_random_strategies: int = 5) -> None:
"""
Verifies (roughly) that the given strategy is a Nash equilibrium. The idea of
Nash strategy is only pplicable for 2-player (normal form).
zero-sum games. We verify Nash strategy by pitting the strategy against random opponent's strategy.
The Nash strategy should be unexploitable (i.e., having the payoff >= 0).
Args:
Game: The game to verify the strategy for.
nash_strategy: The strategy to verify.
n_iters: The number of iterations to run the game for.
"""
print(f"In {Game.__name__}, the nash strategy {nash_strategy} is unexploitable by "
"any other strategy.")
print("That means that it will always have 0 payoff against all strategy[p, q, 1-p-q]. Do the math with the "
"normal form game matrix to convince yourself that this is true.")
print("In this example, we will fix P0 strategy to be nash strategy, and vary P1 strategy")
print("\n")
P0_uniform_strategy = {"P0": nash_strategy}
strategies = [generate_random_prob_dist(
len(Game.actions))for _ in range(n_random_strategies)]
print("P1 strategy \t \t payoff")
print("-" * 40)
with np.printoptions(suppress=True, precision=2):
for strat in strategies:
P1_strategy = {"P1": strat}
avg_payoffs = evaluate_strategies(Game, [
P0_uniform_strategy, P1_strategy], n_iters=n_iters)
print(f"{np.array2string(strat):20} \t {avg_payoffs}")
print()
def to_train_regret_matching(Game: Type[ExtensiveFormGame], n_iters: int = 10000) -> None:
"""Train all players simultaneously by the regret-matching algorithm and print the average
strategies and payoffs.
Args:
Game: The game to train the players for.
n_iters: The number of iterations to run the game for.
"""
players = [RegretMatchingPlayer(name=f"RM{i}", n_actions=len(Game.actions))
for i in range(Game.n_players)]
trainer = NormalFormTrainer(Game, players, n_iters=n_iters)
avg_payoffs = trainer.train()
with np.printoptions(suppress=True, precision=2):
print(
f'Training regret-matching players for game {Game.__name__} after {n_iters} iters:')
print('average strategies:')
pprint(trainer.avg_strategies)
print(f'average eps_rewards: {avg_payoffs}')
print()
filenames = {
'strategy_file': Game.__name__ + '_strategy.json',
'avg_strategy_file': Game.__name__ + '_avg_strategy.json',
'histories_payoffs_file': Game.__name__ + '_histories_payoffs.json',
}
trainer.store_data(filenames)
run_web(filenames)
def to_train_delay_regret_matching(Game: Type[ExtensiveFormGame], n_iters: int = 10000, freeze_duration: int = 10):
"""Train all players by the regret-matching algorithm and print the average strategies and payoffs.
We alternatively freeze one player's strategy and train the other player(s). This is a process of
co-evolution.
Args:
Game: The game to train the players for.
n_iters: The number of iterations to run the game for.
freeze_duration: The number of iterations to freeze the strategy of the player that is not being trained.
"""
assert 0 < freeze_duration < n_iters
# no. of intervals where someone is frozen
freeze_interval = n_iters // freeze_duration
players = [RegretMatchingPlayer(name=f"RM{i}", n_actions=len(Game.actions))
for i in range(Game.n_players)]
trainer = NormalFormTrainer(Game, players, n_iters=freeze_duration)
for _ in range(freeze_interval):
for i in range(Game.n_players):
# train player i freezing the rest
freeze_list = [player for player_id,
player in enumerate(players) if player_id != i]
trainer.train(freeze_ls=freeze_list)
with np.printoptions(suppress=True, precision=2):
print(
f'Training delay regret-matching players for game {Game.__name__} after {n_iters} iters:')
print('average strategies:')
pprint(trainer.avg_strategies)
print(f'eps_rewards: {trainer.avg_payoffs}')
print()
filenames = {
'strategy_file': Game.__name__ + '_strategy.json',
'avg_strategy_file': Game.__name__ + '_avg_strategy.json',
'histories_payoffs_file': Game.__name__ + '_histories_payoffs.json',
}
trainer.store_data(filenames)
run_web(filenames)
@click.command()
@click.option("--game", type=click.Choice(["RockPaperScissorGame",
"AsymmetricRockPaperScissorGame", "BarCrowdingGame", "PrisonerDilemmaGame"]),
default="RockPaperScissorGame", help="The game to demo.")
@click.option("--n_iters", type=int, default=10000, help="The number of iterations to run the game for.")
@click.option("--train_regret_matching", is_flag=True, default=False, help="Train regret matching players.")
@click.option("--train_delay_regret_matching", is_flag=True, default=False,
help="Train delay regret matching players.")
@click.option("--verbose_level", type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]),
default="INFO", help="The verbosity level of the game.")
@click.option("--seed", type=int, default=0, help="The random seed to use for the game.")
def main(game: str, n_iters: int = 10000, train_regret_matching: bool = False,
train_delay_regret_matching: bool = False,
verbose_level: str = "INFO", seed: int = 0) -> None:
"""Demo for N-player normal-form games using the regret-matching algorithm.
Available games:
----------------
RockPaperScissorGame, AsymmetricRockPaperScissorGame, BarCrowdingGame, PrisonerDilemmaGame
Available options:
------------------
--game: The game to demo.
--n_iters: The number of iterations to run the game for.
--train_regret_matching: Whether to train regret matching players.
--train_delay_regret_matching: Whether to train delay regret matching players.
We will also show the Nash equilibrium for 2-player zero-sum games so the user can verify that
the regret matching players' strategies indeed converge to Nash.
"""
logging.basicConfig(level=getattr(
logging, verbose_level), format="%(message)s")
np.random.seed(seed)
Game_dict = {
"RockPaperScissorGame": RockPaperScissorGame,
"AsymmetricRockPaperScissorGame": AsymmetricRockPaperScissorGame,
"BarCrowdingGame": BarCrowdingGame,
"PrisonerDilemmaGame": PrisonerDilemmaGame,
}
nash_strategy_dict = {
"RockPaperScissorGame": np.array([0.5, 0.5, 0.0]),
"AsymmetricRockPaperScissorGame": np.array([0.5, 0.5, 0.0]),
}
Game = Game_dict[game]
if game in nash_strategy_dict:
nash_strategy = nash_strategy_dict[game]
verify_nash_strategy(Game, nash_strategy, n_iters=n_iters)
if train_regret_matching:
to_train_regret_matching(Game, n_iters=n_iters)
if train_delay_regret_matching:
to_train_delay_regret_matching(Game, n_iters=n_iters)
if __name__ == "__main__":
main()
| 41.914286 | 120 | 0.69416 |
3d8de0b9f6069a704c0824dda5ecfa64e647e8dd | 74 | py | Python | 0364 Line of People.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | 1 | 2020-12-29T21:17:26.000Z | 2020-12-29T21:17:26.000Z | 0364 Line of People.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | null | null | null | 0364 Line of People.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | 4 | 2021-09-09T17:42:43.000Z | 2022-03-18T04:54:03.000Z | class Solution:
def solve(self, n, a, b):
return min(n-a,b+1)
| 18.5 | 29 | 0.554054 |
b66cc808b08aa8af3a91cdca379ed9b52a5a68d1 | 463 | py | Python | musicapp/migrations/0003_musician_slug.py | daniel-afana/MusicDB | c2c897d540e6e514403997510e91044f518c0328 | [
"MIT"
] | null | null | null | musicapp/migrations/0003_musician_slug.py | daniel-afana/MusicDB | c2c897d540e6e514403997510e91044f518c0328 | [
"MIT"
] | null | null | null | musicapp/migrations/0003_musician_slug.py | daniel-afana/MusicDB | c2c897d540e6e514403997510e91044f518c0328 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-14 11:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('musicapp', '0002_auto_20170814_0941'),
]
operations = [
migrations.AddField(
model_name='musician',
name='slug',
field=models.CharField(blank=True, max_length=205),
),
]
| 22.047619 | 63 | 0.61987 |
5df75b06602427edaa4cffeddd06b1e9969783b9 | 3,725 | py | Python | kombu/transport/__init__.py | expa/kombu | 528ac975bd3b815ebe7d1b5f92126d5abd01c6cc | [
"BSD-3-Clause"
] | null | null | null | kombu/transport/__init__.py | expa/kombu | 528ac975bd3b815ebe7d1b5f92126d5abd01c6cc | [
"BSD-3-Clause"
] | null | null | null | kombu/transport/__init__.py | expa/kombu | 528ac975bd3b815ebe7d1b5f92126d5abd01c6cc | [
"BSD-3-Clause"
] | null | null | null | """
kombu.transport
===============
Built-in transports.
"""
from __future__ import absolute_import
from collections import Callable
from kombu.five import string_t
from kombu.syn import _detect_environment
from kombu.utils import symbol_by_name
def supports_librabbitmq():
if _detect_environment() == 'default':
try:
import librabbitmq # noqa
except ImportError: # pragma: no cover
pass
else: # pragma: no cover
return True
def _ghettoq(name, new, alias=None):
xxx = new # stupid enclosing
def __inner():
import warnings
_new = isinstance(xxx, Callable) and xxx() or xxx
gtransport = 'ghettoq.taproot.{0}'.format(name)
ktransport = 'kombu.transport.{0}.Transport'.format(_new)
this = alias or name
warnings.warn("""
Ghettoq does not work with Kombu, but there is now a built-in version
of the {0} transport.
You should replace {1!r} with: {2!r}
""".format(name, gtransport, this))
return ktransport
return __inner
TRANSPORT_ALIASES = {
'amqp': 'kombu.transport.pyamqp:Transport',
'pyamqp': 'kombu.transport.pyamqp:Transport',
'librabbitmq': 'kombu.transport.librabbitmq:Transport',
'memory': 'kombu.transport.memory:Transport',
'redis': 'kombu.transport.redis:Transport',
'SQS': 'kombu.transport.SQS:Transport',
'sqs': 'kombu.transport.SQS:Transport',
'beanstalk': 'kombu.transport.beanstalk:Transport',
'mongodb': 'kombu.transport.mongodb:Transport',
'couchdb': 'kombu.transport.couchdb:Transport',
'zookeeper': 'kombu.transport.zookeeper:Transport',
'django': 'kombu.transport.django:Transport',
'sqlalchemy': 'kombu.transport.sqlalchemy:Transport',
'sqla': 'kombu.transport.sqlalchemy:Transport',
'SLMQ': 'kombu.transport.SLMQ.Transport',
'slmq': 'kombu.transport.SLMQ.Transport',
'ghettoq.taproot.Redis': _ghettoq('Redis', 'redis', 'redis'),
'ghettoq.taproot.Database': _ghettoq('Database', 'django', 'django'),
'ghettoq.taproot.MongoDB': _ghettoq('MongoDB', 'mongodb'),
'ghettoq.taproot.Beanstalk': _ghettoq('Beanstalk', 'beanstalk'),
'ghettoq.taproot.CouchDB': _ghettoq('CouchDB', 'couchdb'),
'filesystem': 'kombu.transport.filesystem:Transport',
'zeromq': 'kombu.transport.zmq:Transport',
'zmq': 'kombu.transport.zmq:Transport',
'amqplib': 'kombu.transport.amqplib:Transport',
}
_transport_cache = {}
def resolve_transport(transport=None):
if isinstance(transport, string_t):
try:
transport = TRANSPORT_ALIASES[transport]
except KeyError:
if '.' not in transport and ':' not in transport:
from kombu.utils.text import fmatch_best
alt = fmatch_best(transport, TRANSPORT_ALIASES)
if alt:
raise KeyError(
'No such transport: {0}. Did you mean {1}?'.format(
transport, alt))
raise KeyError('No such transport: {0}'.format(transport))
else:
if isinstance(transport, Callable):
transport = transport()
return symbol_by_name(transport)
return transport
def get_transport_cls(transport=None):
"""Get transport class by name.
The transport string is the full path to a transport class, e.g.::
"kombu.transport.pyamqp:Transport"
If the name does not include `"."` (is not fully qualified),
the alias table will be consulted.
"""
if transport not in _transport_cache:
_transport_cache[transport] = resolve_transport(transport)
return _transport_cache[transport]
| 33.258929 | 76 | 0.645906 |
e3f39da13af1f3ad3dc42c15d8176c22e5fd4609 | 34,925 | py | Python | roles/custom_module/library/ibm_ss_node.py | dheren-git/ibm-spectrum-scale-install-infra | 21f86774a09d9c36fba1ca75e489c9d4b99768d3 | [
"Apache-2.0"
] | null | null | null | roles/custom_module/library/ibm_ss_node.py | dheren-git/ibm-spectrum-scale-install-infra | 21f86774a09d9c36fba1ca75e489c9d4b99768d3 | [
"Apache-2.0"
] | null | null | null | roles/custom_module/library/ibm_ss_node.py | dheren-git/ibm-spectrum-scale-install-infra | 21f86774a09d9c36fba1ca75e489c9d4b99768d3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# author: IBM Corporation
# description: Highly-customizable Ansible role module
# for installing and configuring IBM Spectrum Scale (GPFS)
# company: IBM
# license: Apache-2.0
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'IBM',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: ibm_ss_node
short_description: IBM Spectrum Scale Node Management
version_added: "0.0"
description:
- This module can be used to add, remove or retrieve information
about an IBM Spectrum Scale Node(s) from the Cluster.
options:
op:
description:
- An operation to execute on the IBM Spectrum Scale Node.
Mutually exclusive with the state operand.
required: false
state:
description:
- The desired state of the Node in relation to the cluster.
required: false
default: "present"
choices: [ "present", "absent" ]
nodefile:
description:
- Blueprint that defines all node attributes
required: false
name:
description:
- The name of the Node to be added, removed or whose
information is to be retrieved
required: false
'''
EXAMPLES = '''
# Retrive information about an existing IBM Spectrum Scale Node(s)
- name: Retrieve IBM Spectrum Scale Node information
ibm_ss_node:
op: list
# Adds a Node to the IBM Spectrum Scale Cluster
- name: Add node to IBM Spectrum Scale Cluster
ibm_ss_node:
state: present
nodefile: "/tmp/nodefile"
name: "host-01"
# Delete an existing IBM Spectrum Node from the Cluster
- name: Delete an IBM Spectrum Scale Node from Cluster
ibm_ss_node:
state: absent
name: "host-01"
'''
RETURN = '''
changed:
description: A boolean indicating if the module has made changes
type: boolean
returned: always
msg:
description: The output from the cluster create/delete operations
type: str
returned: when supported
rc:
description: The return code from the IBM Spectrum Scale mm command
type: int
returned: always
results:
description: The JSON document containing the cluster information
type: str
returned: when supported
'''
import os
import re
import sys
import json
import time
import logging
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, SUPPRESS
from ansible.module_utils.basic import AnsibleModule
#TODO: FIX THIS. If the modules and utils are located in a non standard
# path, the PYTHONPATH will need to be exported in the .bashrc
from ansible.module_utils.ibm_ss_utils import runCmd, parse_aggregate_cmd_output, RC_SUCCESS, get_logger
#from ansible.module_utils.ibm_ss_utils import runCmd, parse_aggregate_cmd_output, RC_SUCCESS
###############################################################################
## ##
## Helper Functions ##
## ##
###############################################################################
#
# This function retrieves the Role for each node in the Spectrum Scale Cluster
#
# Returns:
# role_details = {
# "Daemon Name", "IP", "Admin Name": "Role"
# }
# Where Role is "ces", quorum", "gateway" etc
#
def get_gpfs_node_roles():
logger.debug("Function Entry: get_gpfs_node_roles(). ")
try:
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmlscluster", "-Y"])
except Exception as exp_msg:
logger.error("While obtaining nodes vs. role map. Execution halted!\n"
"Message: %s", exp_msg)
exit(1)
if rc:
logger.error("Operation (mmlscluster -Y) failed.")
logger.error("stdout: %s\nstderr: %s", stdout, stderr)
exit(1)
else:
output = stdout.splitlines()
daemon_node_token = output[1].split(':').index('daemonNodeName')
ipaddress_token = output[1].split(':').index('ipAddress')
admin_node_token = output[1].split(':').index('adminNodeName')
designation_token = output[1].split(':').index('designation')
other_role_token = output[1].split(':').index('otherNodeRoles')
alias_role_token = output[1].split(':').index('otherNodeRolesAlias')
role_details, final_value = {}, ''
for cmd_line in output:
if re.match(r"mmlscluster:clusterNode:\d+", cmd_line):
daemon_value = cmd_line.split(':')[daemon_node_token]
ip_value = cmd_line.split(':')[ipaddress_token]
admin_value = cmd_line.split(':')[admin_node_token]
other_role_value = cmd_line.split(':')[other_role_token]
alias_role_value = cmd_line.split(':')[alias_role_token]
designation_value = cmd_line.split(':')[designation_token]
key = '{},{},{}'.format(daemon_value, ip_value, admin_value)
if not designation_value and not alias_role_value and not \
other_role_value:
final_value = ''
elif designation_value and not alias_role_value and not \
other_role_value:
final_value = designation_value
elif not designation_value and alias_role_value and not \
other_role_value:
final_value = alias_role_value
elif not designation_value and not alias_role_value and \
other_role_value:
final_value = other_role_value
elif designation_value and alias_role_value and not \
other_role_value:
final_value = '{},{}'.format(designation_value,
alias_role_value)
elif not designation_value and alias_role_value and \
other_role_value:
final_value = '{},{}'.format(alias_role_value,
other_role_value)
elif designation_value and not alias_role_value and \
other_role_value:
final_value = '{},{}'.format(designation_value,
other_role_value)
elif designation_value and alias_role_value and \
other_role_value:
final_value = '{},{},{}'.format(designation_value,
alias_role_value,
other_role_value)
role_details[key] = final_value
logger.debug("Function Exit: get_gpfs_node_roles(). Return Params: "
"role_details={0}".format(role_details))
return role_details
def gpfs_del_nsd(all_node_disks):
"""
This function performs "mmdelnsd".
Args:
all_node_disks (list): List of disks corresponding to an instance.
"""
disk_name = ";".join(all_node_disks)
logger.info("** disk_name = {0}".format(disk_name))
try:
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmdelnsd", disk_name])
except Exception as exp_msg:
logger.error("While deleting NSD. "
"Execution halted!\nMessage: %s", exp_msg)
exit(1)
if rc:
logger.error("Operation (mmdelnsd %s) failed.", disk_name)
logger.error("stdout: %s\nstderr: %s", stdout, stderr)
exit(1)
else:
logger.info("Operation (mmdelnsd %s) completed successfully.",
disk_name)
def gpfs_del_disk(instance, fs_name, disk_names):
"""
This function performs "mmdeldisk".
Args:
instance (str): instance for which disk needs to be deleted.
fs_name (str): Filesystem name associated with the disks.
disk_names (list): Disk name to be deleted.
Ex: ['gpfs1nsd', 'gpfs2nsd', 'gpfs3nsd']
"""
disk_name = ";".join(disk_names)
try:
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmdeldisk", fs_name,
disk_name, '-N', instance])
except Exception as exp_msg:
logger.error("While deleting disk. "
"Execution halted!\nMessage: %s", exp_msg)
exit(1)
if rc:
logger.error("Operation (mmdeldisk %s %s -N %s) failed.", fs_name,
disk_name, instance)
logger.error("stdout: %s\nstderr: %s", stdout, stderr)
exit(1)
# TODO: This is most obvious situation, we need to enhance message
else:
logger.info("Operation (mmdeldisk %s %s -N %s) completed "
"successfully.", fs_name, disk_name, instance)
def get_all_disks_of_node(instance, region):
"""
This function performs "mmlsnsd -X -Y".
Args:
instance (str): instance for which disks are use by filesystem.
region (str): Region of operation
Returns:
all_disk_names (list): Disk names in list format.
Ex: [nsd_1a_1_0, nsd_1c_1_0, nsd_1c_d_1]
"""
try:
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmlsnsd", '-X', '-Y'])
except Exception as exp_msg:
logger.error("While obtaining disk to filesystem details. "
"Execution halted!\nMessage: %s", exp_msg)
exit(1)
if "No disks were found" in stderr:
return []
output = stdout.splitlines()
disk_token = output[0].split(':').index('diskName')
server_token = output[0].split(':').index('serverList')
remark = output[0].split(':').index('remarks')
all_disk_names = []
for cmd_line in output:
if re.match(r"mmlsnsd:nsd:\d+", cmd_line):
disk_host = cmd_line.split(':')[server_token]
if cmd_line.split(':')[remark] == 'server node' and disk_host == instance:
all_disk_names.append(cmd_line.split(':')[disk_token])
return all_disk_names
def get_zimon_collectors():
"""
This function returns zimon collector node ip's.
"""
try:
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmperfmon", "config",
"show"])
except Exception as exp_msg:
logger.error("While obtaining zimon configuration details. "
"Execution halted!\nMessage: %s", exp_msg)
exit(1)
if rc:
if "There is no performance monitoring configuration data" in stderr:
return []
logger.error("Operation (mmperfmon config show) failed.")
logger.error("stdout: %s\nstderr: %s", stdout, stderr)
exit(1)
else:
logger.info("Operation (mmperfmon config show) completed "
"successfully.")
output = stdout.splitlines()
col_regex = re.compile(r'colCandidates\s=\s(?P<collectors>.*)')
for cmd_line in output:
if col_regex.match(cmd_line):
collectors = col_regex.match(cmd_line).group('collectors')
collectors = collectors.replace("\"", '').replace(" ", '')
collectors = collectors.split(',')
logger.info("Identified collectors: %s ", collectors)
return collectors
def get_allfsnames():
"""
This function executes mmlsfs and returns all filesystem names in
list form.
Returns:
fs_names (list): All filesystem names in the cluster.
Ex: fs_names = ['gpfs0', 'gpfs1']
"""
output = []
try:
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmlsfs", "all", "-Y"])
except Exception as exp_msg:
logger.error("While obtaining list of filesystems. Execution halted!\n"
"Message: %s", exp_msg)
fs_names = []
if rc:
if 'mmlsfs: No file systems were found.' in stdout or \
'mmlsfs: No file systems were found.' in stderr:
logger.debug("No filesystems were found in the cluster.")
return list(fs_names)
logger.error("Operation (mmlsfs all -Y) failed:")
logger.error("stdout: %s\nstderr: %s", stdout, stderr)
exit(1)
output = stdout.splitlines()
device_index = output[0].split(':').index('deviceName')
for cmd_line in output[1:]:
device_name = cmd_line.split(':')[device_index]
fs_names.append(device_name)
fs_names = set(fs_names)
return list(fs_names)
def get_all_fs_to_disk_map(fs_list):
"""
This function performs "mmlsdisk <fs> -L -Y".
Args:
fs_list (list): List of all filesystems in the cluster.
Returns:
fs_disk_map (dict): Dict of fs names vs. disk names.
Ex: {'fs1': ['gpfs1nsd', 'gpfs2nsd'],
'fs2': ['gpfs3nsd', 'gpfs4nsd']}
"""
fs_disk_map = {}
for each_fs in fs_list:
print(each_fs)
disk_name = []
try:
stdout, stderr, rc = runCmd(['/usr/lpp/mmfs/bin/mmlsdisk', each_fs,
'-L', '-Y'])
except Exception as exp_msg:
logger.error("While obtaining filesystem to disk map. "
"Execution halted! Message: %s",
exp_msg)
exit(1)
if rc:
logger.error("Operation (mmlsdisk %s -L -Y) failed.", each_fs)
logger.error("stdout: %s\nstderr: %s", stdout, stderr)
exit(1)
else:
output = stdout.splitlines()
disk_token = output[0].split(':').index('nsdName')
for cmd_line in output:
if re.match(r"mmlsdisk::\d+", cmd_line):
disk_name.append(cmd_line.split(':')[disk_token])
fs_disk_map[each_fs] = disk_name
return fs_disk_map
def gpfs_df_disk(fs_name):
"""
This function performs "mmdf" to obtain disk capacities.
Args:
fs_name (str): Filesystem name associated with the disks.
Returns:
disk_size_map (dict): Disk name vs. free block size vs. percent
free blocks.
Ex: {
'nsd_1a_1_0': {'free_size': 10485760,
'used_size': 480256,
'percent': 95},
'nsd_1c_1_0': {'free_size': 10485760,
'used_size': 480256,
'percent': 95}
}
"""
try:
# TODO
# The original code executed the command "/usr/lpp/mmfs/bin/mmdf <fs_name> -d -Y"
# but this did not work if there were multiple Pools with a separate System Pool.
# Therefore the "-d" flag has been removed. Check to see why the "-d" flag was
# was used in the first place
stdout, stderr, rc = runCmd(['/usr/lpp/mmfs/bin/mmdf', fs_name,
'-Y'])
except Exception as exp_msg:
logger.error("While obtaining filesystem capacity. Execution halted!\n"
"Code:, Message: ")
exit(1)
if rc:
logger.error("Operation (mmdf %s -d -Y) failed.", fs_name)
logger.error("stdout: %s\nstderr: %s", stdout, stderr)
exit(1)
else:
output = stdout.splitlines()
disk_token = output[0].split(':').index('nsdName')
percent_token = output[0].split(':').index('freeBlocksPct')
free_token = output[0].split(':').index('freeBlocks')
size_token = output[0].split(':').index('diskSize')
disk_size_map = {}
for cmd_line in output:
if re.match(r"mmdf:nsd:\d+", cmd_line):
total = cmd_line.split(':')[size_token]
free = cmd_line.split(':')[free_token]
used = int(total) - int(free)
disk = cmd_line.split(':')[disk_token]
disk_size_map[disk] = \
{'free_size': int(free), 'used_size': used,
'percent': cmd_line.split(':')[percent_token]}
return disk_size_map
def gpfs_remove_nodes(existing_instances, skip=False):
"""
This function performs "mmshutdown" and "mmdelnode".
Args:
exist_instances (list): List of instances to remove from cluster.
"""
if not skip:
try:
# TODO: Should we first unmount to ensure proper shutdown?
#stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmumount", "-a",
# "-N", existing_instances])
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmshutdown", "-N",
existing_instances])
except Exception as exp_msg:
logger.error("While shutting down gpfs. Execution halted!\n"
"Code:, Message: ")
exit(1)
if rc:
logger.error("Operation (mmshutdown -N %s) failed.",
existing_instances)
logger.error("stdout: %s\nstderr: %s", stdout, stderr)
exit(1)
else:
logger.info("Operation (mmshutdown -N %s) completed successfully.",
existing_instances)
try:
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmdelnode", "-N",
existing_instances])
except Exception as exp_msg:
logger.error("While deleting node(s) from gpfs cluster. "
"Execution halted!\nCode:, Message:")
exit(1)
if rc:
logger.error("Operation (mmdelnode -N %s) failed.", existing_instances)
logger.error("stdout: %s\nstderr: %s", stdout, stderr)
exit(1)
else:
logger.info("Operation (mmdelnode -N %s) completed successfully.",
existing_instances)
def get_node_nsd_info():
try:
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmlsnsd", '-X', '-Y'])
except Exception as exp_msg:
logger.error("While obtaining disk to filesystem details. "
"Execution halted!\nMessage: %s", exp_msg)
exit(1)
output = stdout.splitlines()
if "No disks were found" in stderr:
return {}, {}
nsd_token_idx = output[0].split(':').index('diskName')
server_token_idx = output[0].split(':').index('serverList')
remarks_token_idx = output[0].split(':').index('remarks')
node_nsd_map = {}
nsd_node_map = {}
for line in output:
if re.match(r"mmlsnsd:nsd:\d+", line):
nsd_name = line.split(':')[nsd_token_idx]
host_name = line.split(':')[server_token_idx]
host_status = line.split(':')[remarks_token_idx]
if host_status == 'server node':
# Populate the node_nsd_map data structure
nsd_list = []
if host_name in node_nsd_map.keys():
nsd_list = node_nsd_map[host_name]
nsd_list.append(nsd_name)
node_nsd_map[host_name] = nsd_list
# Populate the nsd_node_map data structure
host_list = []
if nsd_name in nsd_node_map.keys():
host_list = nsd_node_map[nsd_name]
host_list.append(host_name)
nsd_node_map[nsd_name] = host_list
return node_nsd_map, nsd_node_map
###############################################################################
## ##
## Functions to add a node to the cluster ##
## ##
###############################################################################
def add_node(name, stanza_path):
# TODO: Make This idempotent
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmaddnode",
"-N", nodefile_path,
"--accept"],
sh=False)
if rc == RC_SUCCESS:
msg = stdout
else:
msg = stderr
return rc, msg
###############################################################################
## ##
## Functions to remove node(s) from cluster ##
## ##
###############################################################################
def remove_multi_attach_nsd(nodes_to_be_deleted):
logger.debug("Function Entry: remove_multi_attach_nsd(). "
"Args nodes_to_be_deleted={0}".format(nodes_to_be_deleted))
# Iterate through each server to be deleted
for node_to_delete in nodes_to_be_deleted:
logger.debug("Processing all NSDs on node={0} for "
"removal".format(node_to_delete))
node_map, nsd_map = get_node_nsd_info()
# Check if the node to be deleted has access to any NSDs
if node_to_delete in node_map.keys():
nsds_to_delete_list = node_map[node_to_delete]
# For each Node, check all the NSDS it has access to. If the
# Node has access to an NSD that can also be accessed from other
# NSD servers, then we can simply modify the server access list
# through the mmchnsd command
for nsd_to_delete in nsds_to_delete_list:
# Clone list to avoid modifying original content
nsd_attached_to_nodes = (nsd_map[nsd_to_delete])[:]
nsd_attached_to_nodes.remove(node_to_delete)
if len(nsd_attached_to_nodes) >= 1:
# This node has access to an NSD, that can also be
# accessed by other NSD servers. Therefore modify the
# server access list
#
# mmchnsd "nsd1:host-nsd-01"
server_access_list = ','.join(map(str, nsd_attached_to_nodes))
server_access_list = nsd_to_delete+":"+server_access_list
try:
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmchnsd",
server_access_list],
sh=False)
except Exception as exp_msg:
logger.error("Exception encountered during execution "
"of modifying NSD server access list "
"for NSD={0} on Node={1}. Exception "
"Message={2)".format(nsd_to_delete,
node_to_delete,
exp_msg))
exit(1)
if rc != RC_SUCCESS:
logger.error("Failed to modify NSD server access list "
"for NSD={0} on Node={1}. Output={2} "
"Error={3}".format(nsd_to_delete,
node_to_delete,
stdout,
stderr))
exit(1)
else:
logger.info("Successfully modify NSD server access "
"list for NSD={0} on Node={1}".format(
nsd_to_delete, node_to_delete))
# All "mmchnsd" calls are asynchronous. Therefore wait here till all
# modifications are committed before proceeding further. For now just
# sleep but we need to enhance this to ensure the async op has completed
time.sleep(60)
logger.debug("Function Exit: remove_multi_attach_nsd(). ")
#
# This function performs removal / termination of nodes from the IBM Spectrum
# Scale cluster. If the node is a server node that has access to NSD(s), then
# we attempt to remove access to this NSD (if the NSD is a shared NSD) or
# delete access to it (if its a dedicated NSD).
#
# Args:
# nodes_to_delete: Nodes to be deleted from the cluster
#
# Return:
# rc: Return code
# msg: Output message
def remove_nodes(nodes_to_delete):
logger.debug("Function Entry: remove_nodes(). "
"Args: node_list={0}".format(nodes_to_delete))
# Precheck nodes to make sure they do not have any roles that should
# not be deleted
gpfs_node_roles = get_gpfs_node_roles()
ROLES_NOT_TO_DELETE = ['quorum', 'quorumManager', 'ces', 'gateway',
'tct', 'snmp_collector']
for each_ip in nodes_to_delete:
for node_details in gpfs_node_roles:
# The node_details consists of "daemon name,ip address,admin name"
# Check if the node (reffered to as any of the "daemon name",
# "ip address" or "admin name")
if each_ip in node_details.split(','):
for role in ROLES_NOT_TO_DELETE:
if role in gpfs_node_roles:
logger.info("Cannot remove node (%s), as it was "
"holding (%s) role.", each_ip, role)
logger.error("Please re-run the current command "
"without ip(s) (%s). Execution halted!",
each_ip)
exit(1)
# An NSD node can have access to a multi attach NSD (shared NSD) or
# dedicated access to the NSD (FPO model) or a combination of both.
# First modify the Shared NSDs and remove access to all NSD Nodes
# that are to be deleted. Note: As long as these are Shared NSD's
# another NSD server will continue to have access to the NSD (and
# therefore Data)
remove_multi_attach_nsd(nodes_to_delete)
# Finally delete any dedicated NSDs (this will force the data to be
# copied to another NSD in the same Filesystem). Finally delete the
# node from the cluster
node_map, nsd_map = get_node_nsd_info()
all_fs_list = get_allfsnames()
node_disk_map = get_all_fs_to_disk_map(all_fs_list)
zimon_col_nodes = get_zimon_collectors()
logger.debug("Identified all filesystem to disk mapping: %s",
node_disk_map)
for each_ip in nodes_to_delete:
logger.debug("Operating on server: %s", each_ip)
#all_node_disks = get_all_disks_of_node(each_ip, REGION)
all_node_disks = get_all_disks_of_node(each_ip, "")
logger.debug("Identified disks for server (%s): %s", each_ip,
all_node_disks)
if len(all_node_disks) == 0:
gpfs_remove_nodes(each_ip)
continue
fs_disk_map = {}
for fs_name, disks in node_disk_map.iteritems():
node_specific_disks = []
for each_disk in disks:
if each_disk in all_node_disks:
node_specific_disks.append(each_disk)
fs_disk_map[fs_name] = node_specific_disks
logger.debug("Identified filesystem to disk map for server (%s): %s",
each_ip, fs_disk_map)
for each_fs in fs_disk_map:
disk_cap = gpfs_df_disk(each_fs)
logger.debug("Identified disk capacity for filesystem (%s): %s",
each_fs, disk_cap)
# Algorithm used for checking at-least 20% free space during
# mmdeldisk in progress;
# - Identify the size of data stored in disks going to be
# deleted.
# - Identify the free size of the filesystem
# (excluding the disk going to be deleted)
# - Allow for disk deletion, if total_free size is 20% greater
# even after moving used data stored in disk going to be deleted.
size_to_be_del = 0
for each_disk in fs_disk_map[each_fs]:
size_to_be_del += disk_cap[each_disk]['used_size']
logger.debug("Identified data size going to be deleted from "
"filesystem (%s): %s", each_fs, size_to_be_del)
other_disks = []
for disk_name in disk_cap:
if disk_name not in fs_disk_map[each_fs]:
other_disks.append(disk_name)
logger.debug("Identified other disks of the filesystem (%s): %s",
each_fs, other_disks)
size_avail_after_migration, total_free = 0, 0
for each_disk in other_disks:
# Accumulate free size on all disks.
total_free += disk_cap[each_disk]['free_size']
logger.debug("Identified free size in other disks of the "
"filesystem (%s): %s", each_fs, total_free)
size_avail_after_migration = total_free - size_to_be_del
logger.debug("Expected size after restriping of the filesystem "
"(%s): %s", each_fs, size_avail_after_migration)
print(size_avail_after_migration)
#percent = 30
percent = int(size_avail_after_migration*100/total_free)
logger.debug("Expected percentage of size left after restriping "
"of the filesystem (%s): %s", each_fs, percent)
if percent < 20:
logger.error("No enough space left for restriping data for "
"filesystem (%s). Execution halted!", each_fs)
exit(1)
if fs_disk_map[each_fs]:
# mmdeldisk will not be hit if there are no disks to delete.
gpfs_del_disk(each_ip, each_fs, fs_disk_map[each_fs])
if all_node_disks:
# mmdelnsd will not be hot if there are no disks to delete.
gpfs_del_nsd(all_node_disks)
gpfs_remove_nodes(each_ip)
logger.debug("Function Exit: remove_nodes().")
return 0, ""
###############################################################################
## ##
## Functions to retrieve Node information ##
## ##
###############################################################################
def get_node_info(node_names):
msg = result_json = ""
stdout, stderr, rc = runCmd(["/usr/lpp/mmfs/bin/mmlscluster","-Y"], sh=False)
if rc == RC_SUCCESS:
result_dict = parse_aggregate_cmd_output(stdout,
MMLSCLUSTER_SUMMARY_FIELDS)
result_json = json.dumps(result_dict)
msg = "mmlscluster successfully executed"
else:
msg = stderr
return rc, msg, result_json
###############################################################################
## ##
## Main Function ##
## ##
###############################################################################
def main():
# Setup the module argument specifications
scale_arg_spec = dict(
op = dict(
type='str',
choices=['get'],
required=False
),
state = dict(
type='str',
choices=['present', 'absent'],
required=False
),
nodefile = dict(
type='str',
required=False
),
name = dict(
type='str',
required=False
)
)
scale_req_args = [
[ "state", "present", [ "stanza", "name" ] ],
[ "state", "absent", [ "name" ] ],
[ "op", "get", [ "name" ] ]
]
scale_req_one_of_args = [
[ "op", "state" ]
]
# Instantiate the Ansible module with the given argument specifications
module = AnsibleModule(
argument_spec=scale_arg_spec,
required_one_of=scale_req_one_of_args,
required_if=scale_req_args,
)
rc = RC_SUCCESS
msg = result_json = ""
state_changed = False
if module.params['op'] and "get" in module.params['op']:
# Retrieve the IBM Spectrum Scale node (cluster) information
node_name_str = module.params['name']
rc, msg, result_json = get_node_info(node_name_str.split(','))
elif module.params['state']:
if "present" in module.params['state']:
# Create a new IBM Spectrum Scale cluster
rc, msg = add_node(
module.params['stanza'],
module.params['name']
)
else:
listofserver = module.params['name']
# Delete the existing IBM Spectrum Scale cluster
rc, msg = remove_nodes(listofserver.split(','))
if rc == RC_SUCCESS:
state_changed = True
# Module is done. Return back the result
module.exit_json(changed=state_changed, msg=msg, rc=rc, result=result_json)
if __name__ == '__main__':
# Set up the Logger. Print to console and file
logger = get_logger()
logger.addHandler(logging.StreamHandler())
main()
| 40.005727 | 104 | 0.517051 |
94080e3e659d9a8ffa5950fa74300f54ea5bc0a1 | 829 | py | Python | python/tests/vrp/test_vrp_constraint_programming.py | memgraph/mage | 8c389146dfce35c436e941b04655d9f758351e46 | [
"Apache-2.0"
] | 67 | 2021-01-29T08:38:11.000Z | 2022-03-22T08:39:47.000Z | python/tests/vrp/test_vrp_constraint_programming.py | memgraph/mage | 8c389146dfce35c436e941b04655d9f758351e46 | [
"Apache-2.0"
] | 35 | 2021-04-12T12:51:17.000Z | 2022-03-18T13:24:39.000Z | python/tests/vrp/test_vrp_constraint_programming.py | memgraph/mage | 8c389146dfce35c436e941b04655d9f758351e46 | [
"Apache-2.0"
] | 4 | 2021-07-20T10:59:12.000Z | 2021-12-15T08:07:11.000Z | import pytest
import numpy as np
from mage.geography import InvalidDepotException
from mage.constraint_programming.vrp_cp_solver import VRPConstraintProgrammingSolver
@pytest.fixture
def default_distance_matrix():
return np.array([[0, 1, 2], [1, 0, 3], [2, 3, 0]])
def test_negative_depot_index_raise_exception(default_distance_matrix):
with pytest.raises(InvalidDepotException):
VRPConstraintProgrammingSolver(
no_vehicles=2, distance_matrix=default_distance_matrix, depot_index=-1
)
def test_depot_index_to_big_raise_exception(default_distance_matrix):
with pytest.raises(InvalidDepotException):
VRPConstraintProgrammingSolver(
no_vehicles=2,
distance_matrix=default_distance_matrix,
depot_index=len(default_distance_matrix),
)
| 30.703704 | 84 | 0.755127 |
869b03d716a0b33dbf3b52cd44039aff00f4ca96 | 11,297 | py | Python | ndbTools.py | jmtoung/26p | 84264015e3112daa2b20d83ac9d364fc4b497cea | [
"Apache-2.0"
] | 1 | 2020-05-27T17:01:35.000Z | 2020-05-27T17:01:35.000Z | ndbTools.py | jmtoung/26p | 84264015e3112daa2b20d83ac9d364fc4b497cea | [
"Apache-2.0"
] | null | null | null | ndbTools.py | jmtoung/26p | 84264015e3112daa2b20d83ac9d364fc4b497cea | [
"Apache-2.0"
] | null | null | null | from google.appengine.ext import ndb
import re
import datetime
# function for getting entity from GAE by key
def GetData(key):
return ndb.Key(urlsafe=key).get()
# function for querying GAE
def QueryData(model, queryParams):
newQueryParams = {}
if 'ancestor' in queryParams:
if isinstance(queryParams['ancestor'], basestring):
ancestor = ndb.Key(urlsafe=queryParams['ancestor'])
elif isinstance(queryParams['ancestor'], dict):
ancestor = ndb.Key(urlsafe=queryParams['ancestor']['key'])
else:
raise Exception('invalid key parameter in queryParams: ' + str(queryParams['key']))
newQueryParams['ancestor'] = ancestor
if 'filters' in queryParams:
filterNode = []
for f in queryParams['filters']:
filterNode.append(ndb.FilterNode(f['property'], f['operator'], f['value']))
newQueryParams['filters'] = ndb.AND(*filterNode)
query = model.query(**newQueryParams)
if 'orders' in queryParams:
if queryParams['orders']['direction'] == 'ASC':
query = query.order(getattr(model, queryParams['orders']['property']))
elif queryParams['orders']['direction'] == 'DESC':
query = query.order(-getattr(model, queryParams['orders']['property']))
else:
raise Exception('invalid direction parameter for order: ' + queryParams['orders']['direction'])
return query.fetch()
# function for adding a new GAE entity
def AddData(model, data, parent=None):
entity = _CreateEntity(model, data, parent)
entity.put()
return entity
# private helper function for adding data
def _CreateEntity(model, data, parent=None):
entity = None
if parent:
key = None
if isinstance(parent, ndb.model.KeyProperty):
key = parent
elif isinstance(parent, basestring):
key = ndb.Key(urlsafe=parent)
elif isinstance(parent, dict):
key = ndb.Key(urlsafe=parent['key'])
elif isinstance(parent, list):
key = ndb.Key(pairs=parent)
else:
raise Exception('invalid parent parameter to CreateEntity method')
if key.get():
entity = model(parent=key)
else:
entity = model()
for x in model._properties:
property = model._properties[x]
if x in data:
if property._repeated:
values = _CheckValueIntegrityList(property, data[x])
setattr(entity, x, values)
else:
value = _CheckValueIntegrity(property, data[x])
setattr(entity, x, value)
else:
if property._required:
raise Exception('required attribute ' + x + ' not defined')
return entity
# private helper function for adding data
def _CheckValueIntegrityList(property, data):
values = []
for d in data:
value = _CheckValueIntegrity(property, d)
if value not in values:
values.append(value)
return values
# private helper function for adding data
def _CheckValueIntegrity(property, data):
if isinstance(property, ndb.model.StringProperty):
if isinstance(data, basestring):
return data
else:
raise Exception('property ' + str(property) + ' expects StringProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.TextProperty):
if isinstance(data, basestring):
return data
else:
raise Exception('property ' + str(property) + ' expects TextProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.BooleanProperty):
if isinstance(data, bool):
return data
else:
raise Exception('property ' + str(property) + ' expects BooleanProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.IntegerProperty):
if isinstance(data, int):
return data
else:
raise Exception('property ' + str(property) + ' expects IntegerProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.FloatProperty):
if isinstance(data, float):
return data
elif isinstance(data, int):
return float(data)
else:
raise Exception('property ' + str(property) + ' expects FloatProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.DateProperty):
if isinstance(data, basestring):
return AsDateObject(data)
else:
raise Exception('property ' + str(property) + ' expects StringProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.TimeProperty):
if isinstance(data, basestring):
return AsTimeObject(data)
else:
raise Exception('property ' + str(property) + ' expects StringProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.DateTimeProperty):
if isinstance(data, basestring):
return AsDateTimeObject(data)
else:
raise Exception('property ' + str(property) + ' expects StringProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
elif isinstance(property, ndb.model.StructuredProperty):
return _CreateEntity(property._modelclass, data)
elif isinstance(property, ndb.model.KeyProperty):
(urlsafe, key) = None, None
if isinstance(data, basestring):
urlsafe = data
elif isinstance(data, dict):
urlsafe = data['key']
else:
raise Exception('property ' + str(property) + ' expects KeyProperty: received ' + str(data) + ' ' + str(type(data)) + ' instead')
if urlsafe:
key = ndb.Key(urlsafe=urlsafe)
if key and key.get():
return key
else:
raise Exception('key points to nonexistent object: ' + str(key))
else:
raise Exception('property ' + str(property) + ' not yet implemented')
def DeleteData(key):
if isinstance(key, basestring):
key = ndb.Key(urlsafe=key)
elif isinstance(key, ndb.model.KeyProperty):
pass
else:
raise Exception('invalid key parameter passed to DeleteData')
key.delete()
return
# function for getting an object referenced by 'key'
def GetData(key):
if isinstance(key, basestring):
return ndb.Key(urlsafe=key).get()
elif isinstance(key, dict):
return ndb.Key(urlsafe=key['key'])
else:
raise Exception('unknown key type')
# function for saving a python dict object (obj) into GAE entity (referenced by 'key')
def SaveData(key, obj):
entity = ndb.Key(urlsafe=key).get()
for x in entity.__class__._properties:
property = entity.__class__._properties[x]
if isinstance(property, ndb.model.ComputedProperty):
continue
if property._repeated:
values = []
for d in obj[x]:
value = _CheckValueIntegrity(property, d)
if value not in values:
values.append(value)
setattr(entity, x, values)
else:
value = _CheckValueIntegrity(property, obj[x])
setattr(entity, x, value)
entity.put()
return entity
# function for returning a ndb object as a dictionary
def AsDict(obj, level = 0):
dict = None
entity = None
if isinstance(obj, ndb.Key):
dict = {'id': obj.id(), 'key': obj.urlsafe()}
if level > 2:
return obj.urlsafe()
entity = obj.get()
if entity is None:
return None
elif isinstance(obj, ndb.Model):
dict = {}
entity = obj
else:
raise Exception('invalid parameter obj passed to AsDict')
for p in entity._properties:
attr = getattr(entity, p)
#if p == "shipping":
#print '~~~'
#print attr
#print '~~~'
if isinstance(attr, ndb.Key):
attr = AsDict(attr, level = (level + 1))
elif isinstance(attr, list):
#print 'p: ' + str(p)
#print 'attr: ' + str(attr)
attr = [AsDict(a, level = (level + 1)) for a in attr]
attr = [a for a in attr if not a is None]
elif isinstance(attr, (datetime.datetime, datetime.date, datetime.time)):
attr = str(attr)
dict[p] = attr
return dict
# function for returning a ndb object as a dictionary
def AsDictBACKUP(obj, level = 0):
if isinstance(obj, ndb.Key):
dict = {'id': obj.id(), 'key': obj.urlsafe()}
if obj.get() is None:
return None
if level > 2:
return obj.urlsafe()
for p in obj.get()._properties:
attr = getattr(obj.get(), p)
if p == "shipping":
print '~~~'
print attr
print '~~~'
if isinstance(attr, ndb.Key):
attr = AsDict(attr, level = (level + 1))
elif isinstance(attr, list):
print 'p: ' + str(p)
print 'attr: ' + str(attr)
attr = [AsDict(a, level = (level + 1)) for a in attr]
attr = [a for a in attr if not a is None]
elif isinstance(attr, (datetime.datetime, datetime.date, datetime.time)):
attr = str(attr)
dict[p] = attr
return dict
elif isinstance(obj, ndb.Model):
dict = {}
for p in obj._properties:
print '@@@'
print p
print getattr(obj, p)
print '@@@'
print obj
print type(obj)
print isinstance(obj, ndb.Model)
print '/////'
return '{error: "AsDict()" requires entity key}'
# function for returning a date and time string as a date object
def AsDateTimeObject(dt):
dateTimeMatch = re.match('.*(\d{4}-\d{2}-\d{2}).*(\d{2}:\d{2}).*',dt)
if dateTimeMatch:
dateValue = dateTimeMatch.groups()[0].split('-')
dateValue = [int(dv) for dv in dateValue]
timeValue = dateTimeMatch.groups()[1].split(':')
timeValue = [int(tv) for tv in timeValue]
return datetime.datetime(dateValue[0], dateValue[1], dateValue[2], timeValue[0], timeValue[1])
else:
raise Exception('invalid DateTime parameter: ' + str(dt))
def AsDateObject(d):
dateMatch = re.match('.*(\d{4}-\d{2}-\d{2}).*',d)
if dateMatch:
dateValue = dateMatch.groups()[0].split('-')
dateValue = [int(dv) for dv in dateValue]
return datetime.date(dateValue[0], dateValue[1], dateValue[2])
else:
return Exception('invalid Date parameter: ' + str(d))
def AsTimeObject(t):
timeMatch = re.match('.*(\d{2}:\d{2}).*',t)
if timeMatch:
timeValue = timeMatch.groups()[0].split(':')
timeValue = [int(tv) for tv in timeValue]
return datetime.time(timeValue[0], timeValue[1])
else:
return Exception('invalid Time parameter: ' + str(t)) | 33.722388 | 145 | 0.584403 |
11d987c38b6259f67c9f2f9e16180617e34a1f63 | 2,297 | py | Python | datadog_checks_dev/datadog_checks/dev/tooling/cli.py | volksman/integrations-core | 34405662b09bf4a8c32feaed16a4745c7e1f24c0 | [
"BSD-3-Clause"
] | null | null | null | datadog_checks_dev/datadog_checks/dev/tooling/cli.py | volksman/integrations-core | 34405662b09bf4a8c32feaed16a4745c7e1f24c0 | [
"BSD-3-Clause"
] | null | null | null | datadog_checks_dev/datadog_checks/dev/tooling/cli.py | volksman/integrations-core | 34405662b09bf4a8c32feaed16a4745c7e1f24c0 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import click
from .commands import ALL_COMMANDS
from .commands.utils import CONTEXT_SETTINGS, echo_success, echo_waiting, echo_warning
from .config import CONFIG_FILE, config_file_exists, load_config, restore_config
from .constants import set_root
from ..compat import PermissionError
from ..utils import dir_exists
@click.group(context_settings=CONTEXT_SETTINGS, invoke_without_command=True)
@click.option('--core', '-c', is_flag=True, help='Work on `integrations-core`.')
@click.option('--extras', '-e', is_flag=True, help='Work on `integrations-extras`.')
@click.option('--agent', '-a', is_flag=True, help='Work on `datadog-agent`.')
@click.option('--here', '-x', is_flag=True, help='Work on the current location.')
@click.option('--quiet', '-q', is_flag=True)
@click.version_option()
@click.pass_context
def ddev(ctx, core, extras, agent, here, quiet):
if not quiet and not config_file_exists():
echo_waiting(
'No config file found, creating one with default settings now...'
)
try:
restore_config()
echo_success('Success! Please see `ddev config`.')
except (IOError, OSError, PermissionError):
echo_warning(
'Unable to create config file located at `{}`. '
'Please check your permissions.'.format(CONFIG_FILE)
)
# Load and store configuration for sub-commands.
config = load_config()
repo_choice = (
'core' if core
else 'extras' if extras
else 'agent' if agent
else config.get('repo', 'core')
)
config['repo_choice'] = repo_choice
ctx.obj = config
root = os.path.expanduser(config.get(repo_choice, ''))
if here or not dir_exists(root):
if not here and not quiet:
repo = 'datadog-agent' if repo_choice == 'agent' else 'integrations-{}'.format(repo_choice)
echo_warning('`{}` directory `{}` does not exist, defaulting to the current location.'.format(repo, root))
root = os.getcwd()
set_root(root)
if not ctx.invoked_subcommand:
click.echo(ctx.get_help())
for command in ALL_COMMANDS:
ddev.add_command(command)
| 33.779412 | 118 | 0.66478 |
bea4af61dd1830d9f746e08a30b19bf09c9600c0 | 3,478 | py | Python | tests/integration/test_azure_integrator.py | afreiberger/charm-azure-integrator | 9e3cac110042770dac2eb5f445b9909b730a4ca7 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2018-10-23T17:38:53.000Z | 2019-02-20T18:39:13.000Z | tests/integration/test_azure_integrator.py | afreiberger/charm-azure-integrator | 9e3cac110042770dac2eb5f445b9909b730a4ca7 | [
"ECL-2.0",
"Apache-2.0"
] | 20 | 2018-08-14T20:13:45.000Z | 2021-11-10T14:09:39.000Z | tests/integration/test_azure_integrator.py | charmed-kubernetes/charm-azure-integrator | 428ccf090191d9c61220e3e59ab485de6e0d2071 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2018-10-23T17:38:56.000Z | 2021-05-26T13:27:07.000Z | import logging
import pytest
import requests
log = logging.getLogger(__name__)
@pytest.mark.abort_on_fail
async def test_build_and_deploy(ops_test, lb_charms):
if {"azure-integrator", "lb-consumer"} & ops_test.model.applications.keys():
# Allow for re-running a previous test or using an existing deploy.
pytest.skip("Already deployed")
await ops_test.model.deploy(await ops_test.build_charm("."), trust=True)
await ops_test.model.deploy(await ops_test.build_charm(lb_charms.lb_consumer))
await ops_test.model.wait_for_idle(timeout=20 * 60)
@pytest.mark.parametrize("visibility", ["public", "internal"])
async def test_lb(ops_test, list_lbs, list_public_ips, list_nsg_rules, visibility):
lb_consumer = ops_test.model.applications["lb-consumer"]
lb_unit = lb_consumer.units[0]
az_unit = ops_test.model.applications["azure-integrator"].units[0]
# Sanity check
assert await list_lbs() == []
assert await list_public_ips() == []
assert await list_nsg_rules() == []
assert lb_unit.workload_status == "waiting"
log.info(f"Creating {visibility} LB")
await lb_consumer.set_config(
{"public": "true" if visibility == "public" else "false"}
)
await ops_test.model.add_relation("azure-integrator", "lb-consumer")
log.info("Waiting for LB")
await ops_test.model.wait_for_idle()
is_failed = False
try:
log.info("Verifying LB components")
assert await list_lbs() != []
if visibility == "public":
assert await list_public_ips() != []
assert await list_nsg_rules() != []
else:
assert await list_public_ips() == []
assert await list_nsg_rules() == []
assert lb_unit.workload_status == "active"
address = lb_unit.workload_status_message
lb_url = f"http://{address}/"
if visibility == "public":
log.info(f"Confirming external access to {lb_url}")
r = requests.get(lb_url)
assert r.status_code == 200
assert "nginx" in r.text.lower()
units = [az_unit]
if visibility == "public":
# Backends can never reach their own internal LBs, so self-connectivity can
# only be validated for a public LB. See bullet #3 on:
# https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations
units.append(lb_unit)
for unit in units:
log.info(f"Confirming access from {unit.name} to {lb_url}")
data = await unit.run(f"curl -i '{lb_url}'")
output = data.results.get("Stdout", data.results.get("Stderr", ""))
assert "nginx" in output
except Exception as e:
is_failed = True
log.error(f"Failed: {e}")
raise
finally:
log.info("Cleaning up LB")
await lb_consumer.remove_relation("lb-provider", "azure-integrator")
await ops_test.model.wait_for_idle()
for check, dsc in [
(await list_lbs(), "LBs"),
(await list_public_ips(), "public IPs"),
(await list_nsg_rules(), "NSG rules"),
]:
msg = f"Failed to clean up {dsc}: {check}"
if is_failed:
if check != []:
# Only log failed cleanup, rather than assert, so as to not
# mask other failure.
log.error(msg)
else:
assert check == [], msg
| 39.977011 | 89 | 0.612421 |
e50df4ef797c82d8e73c8ad90e95e302be1e4fec | 1,875 | py | Python | coverage/IN_CTS/0516-COVERAGE-bit-vector-h-562/generate_cts_test.py | asuonpaa/ShaderTests | 6a3672040dcfa0d164d313224446496d1775a15e | [
"Apache-2.0"
] | null | null | null | coverage/IN_CTS/0516-COVERAGE-bit-vector-h-562/generate_cts_test.py | asuonpaa/ShaderTests | 6a3672040dcfa0d164d313224446496d1775a15e | [
"Apache-2.0"
] | 47 | 2021-03-11T07:42:51.000Z | 2022-03-14T06:30:14.000Z | coverage/IN_CTS/0516-COVERAGE-bit-vector-h-562/generate_cts_test.py | asuonpaa/ShaderTests | 6a3672040dcfa0d164d313224446496d1775a15e | [
"Apache-2.0"
] | 4 | 2021-03-09T13:37:19.000Z | 2022-02-25T07:32:11.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a CTS test.
This module/script is copied next to a specific test in your repository of bugs
to generate an Amber script test suitable for adding to the CTS.
In particular, the Amber script test is suitable for use with |add_amber_tests_to_cts.py|.
"""
import sys
from pathlib import Path
from gfauto import tool, util
def main() -> None:
# Checklist:
# - check output_amber
# - check short_description
# - check comment_text
# - check copyright_year
# - check extra_commands
bug_dir = util.norm_path(Path(__file__).absolute()).parent
tool.glsl_shader_job_crash_to_amber_script_for_google_cts(
source_dir=bug_dir / "reduced_manual",
output_amber=bug_dir / "cov-switch-fallthrough-variable-from-first-case.amber",
work_dir=bug_dir / "work",
# One sentence, 58 characters max., no period, no line breaks.
short_description="A fragment shader that covers specific LLVM code paths",
comment_text="""The test passes because the shader always writes red.""",
copyright_year="2021",
extra_commands=tool.AMBER_COMMAND_EXPECT_RED,
is_coverage_gap=True,
)
if __name__ == "__main__":
main()
sys.exit(0)
| 32.327586 | 90 | 0.720533 |
32972ae96d9aa978a6e61951baa7aeaf666bc52e | 14,868 | py | Python | pinkfish/analysis.py | tombohub/pinkfish | e5fdb4d197cd8d7c234125f46ddc462c62f3401d | [
"MIT"
] | null | null | null | pinkfish/analysis.py | tombohub/pinkfish | e5fdb4d197cd8d7c234125f46ddc462c62f3401d | [
"MIT"
] | null | null | null | pinkfish/analysis.py | tombohub/pinkfish | e5fdb4d197cd8d7c234125f46ddc462c62f3401d | [
"MIT"
] | null | null | null | """
Analysis of results.
This module contains some functions that were copied or derived
from the book "Trading Evolved" by Andreas F. Clenow.
Below is a correspondance I had with the author:
------------------------------------------------------------------------
Farrell
October 25, 2019 at 15:49
Hi Andreas,
I just finished reading the book. Awesome one of a kind! Thanks so much.
I also enjoyed your other two. Question: what is the copyright (if any)
on the source code you have in the book. I want to incorporate some of
it into my open source backtester, Pinkfish. How should I credit your
work if no copyright. I could add a comment at the beginning of each
derived function or module at a minimum.
Farrell
------------------------------------------------------------------------
Andreas Clenow
October 25, 2019 at 17:29
Hi Farrell,
I can be paid in reviews and/or beer. :)
For an open source project, use the code as you see fit. A credit in the
comments somewhere would be nice, but I won't sue you if you forget it.
ac
------------------------------------------------------------------------
"""
import empyrical as em
from IPython.core.display import display, HTML
import matplotlib.pyplot as plt
import pandas as pd
import pinkfish as pf
########################################################################
# MONTHY RETURNS MAP
def monthly_returns_map(dbal):
"""
Display per month and per year returns in a table.
Parameters
----------
dbal : pd.Series
The daily closing balance indexed by date.
Returns
-------
None
Examples
--------
>>> monthly_returns_map(dbal['close'])
Year Jan Feb Mar Apr May Jun Jul ... Year
1990 -8.5 0.9 2.4 -2.7 9.2 -0.9 -0.5 -8.2
1991 4.2 6.7 2.2 0.0 3.9 -4.8 4.5 26.3
"""
monthly_data = em.aggregate_returns(dbal.pct_change(),'monthly')
yearly_data = em.aggregate_returns(dbal.pct_change(),'yearly')
table_header = """
<table class='table table-hover table-condensed table-striped'>
<thead>
<tr>
<th style="text-align:right">Year</th>
<th style="text-align:right">Jan</th>
<th style="text-align:right">Feb</th>
<th style="text-align:right">Mar</th>
<th style="text-align:right">Apr</th>
<th style="text-align:right">May</th>
<th style="text-align:right">Jun</th>
<th style="text-align:right">Jul</th>
<th style="text-align:right">Aug</th>
<th style="text-align:right">Sep</th>
<th style="text-align:right">Oct</th>
<th style="text-align:right">Nov</th>
<th style="text-align:right">Dec</th>
<th style="text-align:right">Year</th>
</tr>
</thead>
<tbody>
<tr>"""
first_year = True
first_month = True
year = 0
month = 0
year_count = 0
table = ''
for m, val in monthly_data.iteritems():
year = m[0]
month = m[1]
if first_month:
if year_count % 15 == 0:
table += table_header
table += "<td align='right'><b>{}</b></td>\n".format(year)
first_month = False
# Pad empty months for first year if sim doesn't start in Jan.
if first_year:
first_year = False
if month > 1:
for _ in range(1, month):
table += "<td align='right'>-</td>\n"
table += "<td align='right'>{:.1f}</td>\n".format(val * 100)
# Check for dec and add yearly.
if month == 12:
table += "<td align='right'><b>{:.1f}</b></td>\n".format(
yearly_data[year] * 100)
table += '</tr>\n <tr> \n'
first_month = True
year_count += 1
# Add padding for empty months and last year's value.
if month != 12:
for i in range(month+1, 13):
table += "<td align='right'>-</td>\n"
if i == 12:
table += "<td align='right'><b>{:.1f}</b></td>\n".format(
yearly_data[year] * 100)
table += '</tr>\n <tr> \n'
table += '</tr>\n </tbody> \n </table>'
display(HTML(table))
########################################################################
# HOLDING PERIOD MAP
def holding_period_map(dbal):
"""
Display holding period returns in a table.
Length of returns should be 30 or less, otherwise the output
will be jumbled.
Parameters
----------
dbal : pd.Series
The daily closing balance indexed by date.
Returns
-------
None
Examples
--------
>>> table = holding_period_map(dbal['close'])
>>> display(HTML(table))
Years 1 2 3 4 5 6 7 8
2013 30 20 13 12 13 10 12 12
2014 11 5 7 10 6 10 9
...
2020 8
"""
year = em.aggregate_returns(dbal.pct_change(), 'yearly')
year_start = 0
table = "<table class='table table-hover table-condensed table-striped'>"
table += "<tr><th>Years</th>"
for i in range(len(year)):
table += "<th>{}</th>".format(i+1)
table += "</tr>"
for the_year, value in year.iteritems(): # Iterates years
table += "<tr><th>{}</th>".format(the_year) # New table row
for years_held in (range(1, len(year)+1)): # Iterates years held
if years_held <= len(year[year_start:year_start + years_held]):
ret = em.annual_return(year[year_start:year_start + years_held], 'yearly')
table += "<td>{:.0f}</td>".format(ret * 100)
table += "</tr>"
year_start+=1
display(HTML(table))
########################################################################
# PRETTIER GRAPHS
def _calc_corr(dbal, benchmark_dbal, window):
"""
Calculate the rollowing correlation between two returns.
Parameters
----------
dbal : pd.Series
Strategy daily closing balance indexed by date.
benchmark_dbal : pd.Series
Benchmark daily closing balance indexed by date.
window : int
Size of the moving window. This is the number of observations
used for calculating the statistic.
Returns
-------
corr : pd.DataFrame
Window size rollowing correlation between `dbal` and
`benchmark_dbal`.
"""
ret = dbal.pct_change()
benchmark_ret = benchmark_dbal.pct_change()
corr = ret.rolling(window).corr(benchmark_ret)
return corr
def prettier_graphs(dbal, benchmark_dbal, dbal_label='Strategy',
benchmark_label='Benchmark', points_to_plot=None):
"""
Plot 3 subplots.
The first subplot will show a rebased comparison of the returns to
the benchmark returns, recalculated with the same starting value
of 1. This will be shown on a semi logarithmic scale. The second
subplot will show relative strength of the returns to the benchmark
returns, and the third the correlation between the two.
Parameters
----------
dbal : pd.Series
Strategy daily closing balance indexed by date.
benchmark_dbal : pd.Series
Benchmark daily closing balance indexed by date.
label : str, optional
Label to use in graph for strategy (default is 'Strategy').
benchmark_label : str, optional
Label to use in graph for benchmark (default is 'Benchmark').
points_to_plot : int, optional
Define how many points (trading days) we intend to plot
(default is None, which implies plot all points or days).
Returns
-------
None
Examples
--------
>>> prettier_graphs(dbal['close'], benchmark_dbal['close'],
points_to_plot=5000)
"""
if points_to_plot is None:
points_to_plot = 0;
data = pd.DataFrame(dbal)
data['benchmark_dbal'] = pd.DataFrame(benchmark_dbal)
data.columns = ['dbal', 'benchmark_dbal']
data.head()
# Rebase the two series to the same point in time;
# starting where the plot will start.
for col in data:
data[col + '_rebased'] = \
(data[-points_to_plot:][col].pct_change() + 1).cumprod()
# Relative strength, strategy to benchmark.
data['relative_strength'] = data['dbal'] / data['benchmark_dbal']
# Calculate 100 day rolling correlation.
data['corr'] = _calc_corr(data['dbal'], data['benchmark_dbal'], 100)
# After this, we slice the data, effectively discarding all but
# the last points_to_plot data points, using the slicing logic from
# before. Slice the data, cut points we don't intend to plot.
plot_data = data[-points_to_plot:]
# Make new figure and set the size.
fig = plt.figure(figsize=(12, 8))
# The first subplot, planning for 3 plots high, 1 plot wide,
# this being the first.
ax = fig.add_subplot(311)
ax.set_title('Comparison')
ax.semilogy(plot_data['dbal_rebased'], linestyle='-',
label=dbal_label, linewidth=3.0)
ax.semilogy(plot_data['benchmark_dbal_rebased'], linestyle='--',
label=benchmark_label, linewidth=3.0)
ax.legend()
ax.grid(False)
# Second sub plot.
ax = fig.add_subplot(312)
label='Relative Strength, {} to {}'.format(dbal_label, benchmark_label)
ax.plot(plot_data['relative_strength'], label=label, linestyle=':', linewidth=3.0)
ax.legend()
ax.grid(True)
# Third subplot.
ax = fig.add_subplot(313)
label='Correlation between {} and {}'.format(dbal_label, benchmark_label)
ax.plot(plot_data['corr'], label=label, linestyle='-.', linewidth=3.0)
ax.legend()
ax.grid(True)
########################################################################
# VOLATILITY
def volatility_graphs(dbals, labels, points_to_plot=None):
"""
Plot volatility graphs.
The first graph is a boxplot showing the differences between
2 or more returns. The second graph shows the volatility plotted
for 2 or more returns.
Parameters
----------
dbals : list of pd.DataFrame
A list of daily closing balances (or daily instrument closing
prices) indexed by date.
labels : list of str
A list of labels.
points_to_plot : int, optional
Define how many points (trading days) we intend to plot
(default is None, which implies plot all points or days).
Returns
-------
pf.DataFrame
Statistics comparing the `dbals`.
Examples
--------
>>> df = pf.volatility_graph([ts, dbal], ['SPY', 'Strategy'],
points_to_plot=5000)
>>> df
"""
def _boxplot(volas, labels):
"""
Plot a volatility boxplot.
"""
fig = plt.figure(figsize=(12, 8))
axes = fig.add_subplot(111, ylabel='Volatility')
plt.ylim(0, 1)
plt.boxplot(volas, labels=labels)
def _volas_plot(volas, labels):
"""
Plot volatility.
"""
fig = plt.figure(figsize=(14,10))
axes = fig.add_subplot(111, ylabel='Volatility')
for i, vola in enumerate(volas):
axes.plot(vola, label=labels[i])
plt.legend(loc='best')
if points_to_plot is None:
points_to_plot = 0;
# Get volatility for each dbal set.
volas = []
for dbal in dbals:
volas.append(pf.VOLATILITY(dbal[-points_to_plot:]).dropna())
# Build metrics dataframe.
index = []
columns = labels
data = []
# Add metrics.
metrics = ['avg', 'median', 'min', 'max', 'std', 'last']
for metric in metrics:
index.append(metric)
if metric == 'avg': data.append(vola.mean() for vola in volas)
elif metric == 'median': data.append(vola.median() for vola in volas)
elif metric == 'min': data.append(vola.min() for vola in volas)
elif metric == 'max': data.append(vola.max() for vola in volas)
elif metric == 'std': data.append(vola.std() for vola in volas)
elif metric == 'last': data.append(vola[-1] for vola in volas)
df = pd.DataFrame(data, columns=columns, index=index)
_boxplot(volas, labels)
_volas_plot(volas, labels)
return df
########################################################################
# KELLY CRITERIAN
def kelly_criterian(stats, benchmark_stats=None):
"""
Use this function to help with sizing of leverage.
This function uses ideas based on the Kelly Criterian.
Parameters
----------
stats : pd.Series
Statistics for the strategy.
bbenchmark_stats : pd.Series, optimal
Statistics for the benchmark (default is None, which implies
that a benchmark is not being used).
Returns
-------
s : pf.Series
Leverage statistics.
- `sharpe_ratio` is a measure of risk adjusted return.
- `sharpe_ratio_max` is the maximum expected sharpe ratio.
- `sharpe_ratio_min` is the minimum expected sharpe ratio.
- `strategy risk` is a measure of how risky a trading strategy
is, calculated as an annual standard deviation of returns.
- `instrument_risk` is a measure of how risky an instrument is
before any leverage is applied, calculated as an annual
standard deviation of returns.
- `optimal target risk` is equal to the expected sharpe ratio,
according to the Kelly criterian. Target risk is the amount
of risk you expect to see when trading, calculated as an
annual standard deviation of returns.
- `half kelly criterian` is equal to half the expected
sharpe ratio. It uses a conservative version of the
Kelly criterian known as half Kelly.
- `aggressive leverage` is the optimal target risk divided by
the instrument risk. This is a aggrssive form of the
leverage factor, which is the cash value of a position
divided by your capital.
- `moderate leverage` is the leverage factor calculated using
half Kelly.
- `conservative leverage` is the leverage factor calculated
using half of the minimum sharpe ratio divided by 2.
"""
s = pd.Series(dtype='object')
s['sharpe_ratio'] = stats['sharpe_ratio']
s['sharpe_ratio_max'] = stats['sharpe_ratio_max']
s['sharpe_ratio_min'] = stats['sharpe_ratio_min']
s['strategy risk'] = stats['annual_std'] / 100
s['instrument risk'] = benchmark_stats['annual_std'] / 100
s['optimal target risk'] = s['sharpe_ratio']
s['half kelly criterian'] = s['sharpe_ratio'] / 2
s['aggressive leverage'] = s['optimal target risk'] / s['instrument risk']
s['moderate leverage'] = s['half kelly criterian'] / s['instrument risk']
s['conservative leverage'] = (s['sharpe_ratio_min'] / 2) / s['instrument risk']
return s
| 32.676923 | 90 | 0.589252 |
7b06e1838cbb0d4588c4a7905a6c1ea4faa47873 | 4,351 | py | Python | scripts/save_features.py | amudide/chemprop | 376bfc53bc6d449a48641539354ad51f6137e6c7 | [
"MIT"
] | null | null | null | scripts/save_features.py | amudide/chemprop | 376bfc53bc6d449a48641539354ad51f6137e6c7 | [
"MIT"
] | null | null | null | scripts/save_features.py | amudide/chemprop | 376bfc53bc6d449a48641539354ad51f6137e6c7 | [
"MIT"
] | null | null | null | """Computes and saves molecular features for a dataset."""
from multiprocessing import Pool
import os
import shutil
import sys
from typing import List, Tuple
from tqdm import tqdm
from tap import Tap # pip install typed-argument-parser (https://github.com/swansonk14/typed-argument-parser)
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from chemprop.data import get_smiles
from chemprop.features import get_available_features_generators, get_features_generator, load_features, save_features
from chemprop.utils import makedirs
class Args(Tap):
data_path: str # Path to data CSV
smiles_column: str = None # Name of the column containing SMILES strings. By default, uses the first column.
features_generator: str = 'rdkit_2d_normalized' # Type of features to generate
save_path: str # Path to .npz file where features will be saved as a compressed numpy archive
save_frequency: int = 10000 # Frequency with which to save the features
restart: bool = False # Whether to not load partially complete featurization and instead start from scratch
sequential: bool = False # Whether to run sequentially rather than in parallel
def add_arguments(self) -> None:
self.add_argument('--features_generator', choices=get_available_features_generators())
def load_temp(temp_dir: str) -> Tuple[List[List[float]], int]:
"""
Loads all features saved as .npz files in load_dir.
Assumes temporary files are named in order 0.npz, 1.npz, ...
:param temp_dir: Directory in which temporary .npz files containing features are stored.
:return: A tuple with a list of molecule features, where each molecule's features is a list of floats,
and the number of temporary files.
"""
features = []
temp_num = 0
temp_path = os.path.join(temp_dir, f'{temp_num}.npz')
while os.path.exists(temp_path):
features.extend(load_features(temp_path))
temp_num += 1
temp_path = os.path.join(temp_dir, f'{temp_num}.npz')
return features, temp_num
def generate_and_save_features(args: Args):
"""
Computes and saves features for a dataset of molecules as a 2D array in a .npz file.
:param args: Arguments.
"""
# Create directory for save_path
makedirs(args.save_path, isfile=True)
# Get data and features function
smiles = get_smiles(path=args.data_path, smiles_column=args.smiles_column)
features_generator = get_features_generator(args.features_generator)
print("Anish debugging: ", args.features_generator)
temp_save_dir = args.save_path + '_temp'
# Load partially complete data
if args.restart:
if os.path.exists(args.save_path):
os.remove(args.save_path)
if os.path.exists(temp_save_dir):
shutil.rmtree(temp_save_dir)
else:
if os.path.exists(args.save_path):
raise ValueError(f'"{args.save_path}" already exists and args.restart is False.')
if os.path.exists(temp_save_dir):
features, temp_num = load_temp(temp_save_dir)
if not os.path.exists(temp_save_dir):
makedirs(temp_save_dir)
features, temp_num = [], 0
# Build features map function
smiles = smiles[len(features):] # restrict to data for which features have not been computed yet
if args.sequential:
features_map = map(features_generator, smiles)
else:
features_map = Pool().imap(features_generator, smiles)
# Get features
temp_features = []
for i, feats in tqdm(enumerate(features_map), total=len(smiles)):
temp_features.append(feats)
# Save temporary features every save_frequency
if (i > 0 and (i + 1) % args.save_frequency == 0) or i == len(smiles) - 1:
save_features(os.path.join(temp_save_dir, f'{temp_num}.npz'), temp_features)
features.extend(temp_features)
temp_features = []
temp_num += 1
try:
# Save all features
save_features(args.save_path, features)
# Remove temporary features
shutil.rmtree(temp_save_dir)
except OverflowError:
print('Features array is too large to save as a single file. Instead keeping features as a directory of files.')
if __name__ == '__main__':
generate_and_save_features(Args().parse_args())
| 36.258333 | 120 | 0.701218 |
8e56b5cab09ac119f7e5929f68511b2d02c4a2ba | 7,780 | py | Python | NeuroevolutionVehicles/simulation.py | ethanrange/neuroevolution-vehicles | fb423b6988a4ce56d3fbf35d705aa34cc6f6ec3e | [
"MIT"
] | null | null | null | NeuroevolutionVehicles/simulation.py | ethanrange/neuroevolution-vehicles | fb423b6988a4ce56d3fbf35d705aa34cc6f6ec3e | [
"MIT"
] | null | null | null | NeuroevolutionVehicles/simulation.py | ethanrange/neuroevolution-vehicles | fb423b6988a4ce56d3fbf35d705aa34cc6f6ec3e | [
"MIT"
] | null | null | null | from walls import *
from parameters import *
from car import Car, CarSensor, carPanel
from GeneticAlgorithm import createPopulation
from NeuralNetwork import NeuralNetwork
import visualisation as vc
import time as t
import json
class Simulation():
def __init__(self):
self.GenerationCount = 0
self.population = []
self.populationResults = []
self.currentCar = self.bestCar = None
self.currentCarNum = self.bestCarNum = 0
self.NodeList = []
self.WeightList = []
self.walls = []
self.checkpoints = []
self.endLine = None
self.steeringAmount = 0
self.acceleratingForce = 0
self.loadedFile = None
self.currentTrack = 1
def initialisePopulation(self):
self.population = []
self.populationResults = []
for i in range(POP_SIZE):
newCar = Car(initpos.x, initpos.y, False, [255,0,0])
self.population.append(newCar)
self.population[i].id = '1:{}'.format(i+1)
self.populationResults.append([newCar, 0])
def initialiseSingle(self, network):
self.population = []
self.populationResults = []
newCar = Car(initpos.x, initpos.y, True, [255,0,0], network)
newCar.id = '1:1'
self.population.append(newCar)
self.populationResults.append([newCar, 0])
def initialiseValues(self):
self.currentCar = self.bestCar = self.population[0]
self.currentCarNum = self.bestCarNum = 0
self.visualiseNetwork()
self.steeringAmount = 0
self.acceleratingForce = 0
def genLevel(self):
self.walls = []
self.checkpoints = []
self.endLine = None
coordinates = CoordinatesData()
if self.currentTrack == 1:
for i in coordinates.walls:
self.walls.append(wall(i[0], i[1]))
for j in coordinates.checkpoints:
self.checkpoints.append(checkpoint(j[0], j[1], j[2]))
else:
for i in coordinates.walls2:
self.walls.append(wall(i[0], i[1]))
for j in coordinates.checkpoints2:
self.checkpoints.append(checkpoint(j[0], j[1], j[2]))
self.endLine = finishLine(coordinates.finishLines[self.currentTrack-1][0], coordinates.finishLines[self.currentTrack-1][1])
def visualiseNetwork(self):
self.NodeList, self.WeightList = vc.visualise(self.currentCar.nn, NN_TOPOLOGY, NODE_DIAMETER, VIS_OFFSET[0], VIS_OFFSET[1])
def drawNetwork(self):
strokeWeight(5)
stroke(0)
fill(230)
rect(1250, height/2, 470, height-30)
fill(250)
rect(1250, VIS_OFFSET[1]+VIS_DIM[1]/2, VIS_DIM[0], VIS_DIM[1])
push()
fill(0)
textSize(30)
textAlign(CENTER)
text('Current Car:', 1250, 65)
pop()
push()
textSize(25)
fill(0)
carNumText = 'Current car number: ' + str(self.currentCarNum+1)
text(carNumText, 1070, 510)
idText = 'Current car ID: ' + str(self.currentCar.id)
text(idText, 1070, 550)
line(1015, 580, 1485, 580)
posText = 'Car Position: (' + str(int(self.currentCar.pos.x)) + ', ' + str(int(self.currentCar.pos.y)) + ')'
text(posText, 1070, 630)
scoreText = 'Car Score: ' + str(self.currentCar.score)
text(scoreText, 1070, 670)
fitnessText = 'Car Fitness: ' + str(self.currentCar.fitness)
text(fitnessText, 1070, 710)
timeText = 'Time: ' + str(round(self.currentCar.time, 4))
text(timeText, 1070, 750)
line(1015, 780, 1485, 780)
pop()
for weight in self.WeightList:
weight.show()
for layer in self.NodeList:
for node in layer:
node.show()
def drawGenerationInfo(self):
push()
rectMode(CORNER)
strokeWeight(5)
stroke(0)
fill(230)
rect(22, 935, 970, 50)
pop()
push()
textSize(25)
fill(0,0,0,255)
GenText = 'Generation number: ' + str(self.GenerationCount+1)
text(GenText, 35, 970)
bestFitnessText = 'Best fitness: ' + str(self.bestCar.fitness) + ' (' + str(self.bestCar.id) + ')'
text(bestFitnessText, 340, 970)
pop()
self.drawProgressBar()
def drawSingleInfo(self):
push()
rectMode(CORNER)
strokeWeight(5)
stroke(0)
fill(230)
rect(22, 935, 970, 50)
pop()
push()
textSize(25)
fill(0,0,0,255)
GenText = 'Single Car Mode: ({})'.format(self.currentCar.id)
text(GenText, 35, 970)
loadedFileText = 'Loaded Network: {}'.format(self.loadedFile)
text(loadedFileText, 340, 970)
bestFitnessText = ''
text(bestFitnessText, 340, 970)
pop()
def setStartTime(self):
self.currentCar.startTime = t.time()
def drawText(self):
push()
textSize(25)
fill(255)
text(str(int(round(frameRate))), 5, 25)
pop()
textSize(12)
def drawProgressBar(self):
push()
textSize(25)
fill(0,255)
progressText = 'Progress: '
text(progressText, 640, 970)
fill(0,0)
stroke(0,255)
rectMode(CORNER)
rect(770, 948, 200, 25)
fill(0,255)
barWidth = (200.0 / POP_SIZE) * (self.currentCarNum+1)
rect(770, 948, barWidth, 25)
pop()
def createNewPopulation(self):
print('=================================================')
print('Generation {} complete. Max Fitness: {}').format(self.GenerationCount+1, self.bestCar.fitness)
fitnessArray = [i[1] for i in self.populationResults]
print('Average fitness: {}').format(sum(fitnessArray)/float(POP_SIZE))
print(fitnessArray)
self.GenerationCount += 1
newPop = createPopulation(self.populationResults, self.GenerationCount)
self.population = []
self.populationResults = []
for i in newPop:
self.population.append(i)
self.populationResults.append([i,0])
print([self.population[i].id for i in range(POP_SIZE)])
print('=================================================\n')
def loadNN(self, path):
with open(path, 'r') as file:
extractData = json.load(file)
dimensions = [extractData['input_nodes'], extractData['hidden_nodes_l1'],
extractData['hidden_nodes_l2'], extractData['output_nodes']]
loadedNetwork = NeuralNetwork(*dimensions)
loadedNetwork.IH1_Weights.matrix = extractData['IH1_Weights']['matrix']
loadedNetwork.H1H2_Weights.matrix = extractData['H1H2_Weights']['matrix']
loadedNetwork.H2O_Weights.matrix = extractData['H2O_Weights']['matrix']
loadedNetwork.H1_Bias.matrix = extractData['H1_Bias']['matrix']
loadedNetwork.H2_Bias.matrix = extractData['H2_Bias']['matrix']
loadedNetwork.O_Bias.matrix = extractData['O_Bias']['matrix']
return loadedNetwork
| 30.629921 | 131 | 0.532391 |
9c709941bf1f52dec018b7f9ee9f67535a956980 | 9,682 | py | Python | comgames/AI/TD.py | houluy/comgames | aa2bcb6ef0d16807bab0eebcfe9a4818aebc0c3b | [
"MIT"
] | 2 | 2018-02-21T16:00:20.000Z | 2018-06-18T10:22:21.000Z | comgames/AI/TD.py | houluy/comgames | aa2bcb6ef0d16807bab0eebcfe9a4818aebc0c3b | [
"MIT"
] | null | null | null | comgames/AI/TD.py | houluy/comgames | aa2bcb6ef0d16807bab0eebcfe9a4818aebc0c3b | [
"MIT"
] | null | null | null | import random
from collections import defaultdict, UserDict
import logging
import pickle
import pathlib
import json
import src.game
from .config import config
from .agent import Agent
class Value(UserDict):
def __init__(self):
super().__init__()
def _state2str(self, state):
return ''.join([str(x) for x in state])
def __setitem__(self, key, value):
if super().__getitem__(key) < value:
super().__setitem__(self._state2str(key), value)
class QLearning(TDAgent):
def __init__(self, new=False):
super().__init__(new)
self.params = config.get(game_name).get("Q_learning")
self.num_episodes = self.params.get('num_episodes')
self.min_epsilon = self.params.get("min_epsilon")
self.epsilon = self.params.get("epsilon")
self.epsilon_decay = self.params.get("epsilon_decay")
def update_Q(self, state, action, reward, next_state, next_actions):
for ind, a in enumerate(next_actions):
if ind == 0:
max_nQ = self.Q[(next_state, a)]
else:
temp_Q = self.Q[(next_state, a)]
if temp_Q > max_nQ:
max_nQ = temp_Q
target = reward + self.gamma * max_nQ
self.update(state, action, target)
def train(self):
# Records all Q values
offensive_Q_list = [0 for _ in range(self.num_episodes)]
defensive_Q_list = [0 for _ in range(self.num_episodes)]
Q_list = [0 for _ in range(self.num_episodes)]
# Records the winning status
offensive_win = 0
defensive_win = 0
tie_count = 0
# Two agents
agent_off = TDAgent(self.game_name)
agent_def = TDAgent(self.game_name)
for e in range(self.num_episodes):
state = self.env.observation()
done = 0
game_round = 0
while done == 0:
game_round += 1
actions = self.env.actions(state)
action_off = agent_off.epsilon_greedy(state, actions, self.epsilon)
intermediate_state, reward_off, done, info = self.env.step(action_off)
if done: # offensive agent wins or tie
reward_def = -reward_off
agent_off.Q.update(state, action_off, intermediate_state, reward_off)
agent_def.Q.update(last_inter_state, action_def, intermediate_state, reward_def)
else: # turn of defensive agent
actions = self.env.actions(intermediate_state)
reward_def = reward_off
# Need to udpate the Q value of defensive agent after the first round
if game_round > 1:
agent_def.Q.update(last_inter_state, action_def, intermediate_state, reward_def, actions)
game_round += 1
action_def = agent_def.epsilon_greedy(intermediate_state, actions, self.epsilon)
next_state, reward_def, done, info = self.env.step(action_def)
if done: # defensive agent wins or tie
reward_off = -reward_def
agent_def.Q.update(intermediate_state, action_def, next_state, reward_def)
agent_off.Q.update(state, action_off, next_state, reward_off)
else:
actions = self.env.actions(next_state)
agent_off.Q.update(state, action_off, next_state, reward_off, actions)
last_inter_state = intermediate_state[:]
self.logger.debug(f"Offensive: state:{state}, action:{action_off}, reward:{reward_off}, next_state: {intermediate_state}")
self.logger.debug(f"Defensive: state:{intermediate_state}, action:{action_def}, reward:{reward_def}, next_state: {next_state}")
self.logger.debug(f"Offensive Q sum: {agent_off.Q.sum()}, Defensive Q sum: {agent_def.Q.sum()}")
state = next_state[:]
self.epsilon = max(self.min_epsilon, self.epsilon*self.epsilon_decay)
self.env.reset()
# Record current Q sum
offensive_Q_list[e] = agent_off.Q.sum()
defensive_Q_list[e] = agent_def.Q.sum()
Q_list[e] = offensive_Q_list[e] + defensive_Q_list[e]
trained_Q = agent_off.Q + agent_def.Q
with open(self.Q_file, "wb") as f:
pickle.dump(trained_Q, f)
with open("Q/offensive_Q_sum.json", "w") as f:
json.dump(offensive_Q_list, f)
with open("Q/defensive_Q_sum.json", "w") as f:
json.dump(defensive_Q_list, f)
with open("Q/Q_sum.json", "w") as f:
json.dump(Q_list, f)
self.logger.info(f"Offensive wins for {offensive_win} times, defensive wins for {defensive_win} times, ties for {tie_count} times")
class DoubleQLearning(DoubleTDAgent):
def __init__(self, new=False):
super().__init__(new)
self.Q1 = self.Q
self.Q2 = Q()
def update_Q(self, state, action, reward, next_state):
rnd = random.rand()
next_actions = self.env.actions(next_state)
if rnd >= 0.5:
update_Q, action_Q = self.Q1, self.Q2
else:
update_Q, action_Q = self.Q2, self.Q1
for ind, a in enumerate(next_actions):
if ind == 0:
max_nQ = action_Q[(next_state, a)]
else:
temp_nQ = action_Q[(next_state, a)]
if temp_nQ > max_nQ:
max_nQ = temp_nQ
target = reward + self.gamma * max_nQ
return self.double_update(state, action, target, update_Q)
class QLearningET(QLearning):
""" This is the Q learning with Eligibility Trace """
pass
class NStepQLearning(QLearning):
def __init__(self, game_name):
super().__init__(game_name)
def train(self):
# Records all Q values
offensive_Q_list = [0 for _ in range(self.num_episodes)]
defensive_Q_list = [0 for _ in range(self.num_episodes)]
Q_list = [0 for _ in range(self.num_episodes)]
# Records the winning status
offensive_win = 0
defensive_win = 0
tie_count = 0
# Two agents
agent_off = TDAgent(self.game_name)
agent_def = TDAgent(self.game_name)
for e in range(self.num_episodes):
state = self.env.observation()
done = 0
game_round = 0
while done == 0:
game_round += 1
actions = self.env.actions(state)
action_off = agent_off.epsilon_greedy(state, actions, self.epsilon)
intermediate_state, reward_off, done, info = self.env.step(action_off)
if done: # offensive agent wins or tie
reward_def = -reward_off
agent_off.Q.update(state, action_off, intermediate_state, reward_off)
agent_def.Q.update(last_inter_state, action_def, intermediate_state, reward_def)
else: # turn of defensive agent
actions = self.env.actions(intermediate_state)
reward_def = reward_off
# Need to udpate the Q value of defensive agent after the first round
if game_round > 1:
agent_def.Q.update(last_inter_state, action_def, intermediate_state, reward_def, actions)
game_round += 1
action_def = agent_def.epsilon_greedy(intermediate_state, actions, self.epsilon)
next_state, reward_def, done, info = self.env.step(action_def)
if done: # defensive agent wins or tie
reward_off = -reward_def
agent_def.Q.update(intermediate_state, action_def, next_state, reward_def)
agent_off.Q.update(state, action_off, next_state, reward_off)
else:
actions = self.env.actions(next_state)
agent_off.Q.update(state, action_off, next_state, reward_off, actions)
last_inter_state = intermediate_state[:]
self.logger.debug(f"Offensive: state:{state}, action:{action_off}, reward:{reward_off}, next_state: {intermediate_state}")
self.logger.debug(f"Defensive: state:{intermediate_state}, action:{action_def}, reward:{reward_def}, next_state: {next_state}")
self.logger.debug(f"Offensive Q sum: {agent_off.Q.sum()}, Defensive Q sum: {agent_def.Q.sum()}")
state = next_state[:]
self.epsilon = max(self.min_epsilon, self.epsilon*self.epsilon_decay)
self.env.reset()
# Record current Q sum
offensive_Q_list[e] = agent_off.Q.sum()
defensive_Q_list[e] = agent_def.Q.sum()
Q_list[e] = offensive_Q_list[e] + defensive_Q_list[e]
trained_Q = agent_off.Q + agent_def.Q
with open(self.Q_file, "wb") as f:
pickle.dump(trained_Q, f)
with open("Q/offensive_Q_sum.json", "w") as f:
json.dump(offensive_Q_list, f)
with open("Q/defensive_Q_sum.json", "w") as f:
json.dump(defensive_Q_list, f)
with open("Q/Q_sum.json", "w") as f:
json.dump(Q_list, f)
self.logger.info(f"Offensive wins for {offensive_win} times, defensive wins for {defensive_win} times, ties for {tie_count} times")
#class EligibilityTraceSARSA:
# def __init__(self):
# pass
#
# def train(self):
# state =
#
| 43.809955 | 143 | 0.588928 |
31a4866eab2733223d13023f3a35cafb5439427a | 2,269 | py | Python | persist.py | XhinLiang/xhinliang-win | 2fbcdf08779644817e2e55ff4d202f85a6eb1d5f | [
"Apache-2.0"
] | null | null | null | persist.py | XhinLiang/xhinliang-win | 2fbcdf08779644817e2e55ff4d202f85a6eb1d5f | [
"Apache-2.0"
] | 22 | 2021-10-02T06:01:09.000Z | 2022-03-27T05:36:45.000Z | persist.py | XhinLiang/xhinliang-win | 2fbcdf08779644817e2e55ff4d202f85a6eb1d5f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import re
import urllib
def persist_md(md_file_path, output_dir, new_img_prefix):
url_downloader = urllib.URLopener()
change = False
md_lines = []
with open(md_file_path, 'r') as md_file:
str_arr = md_file_path.split('/')
relative_md_file_name = str_arr[len(str_arr) - 1].replace('.md', '')
md_lines = md_file.readlines()
for i in range(0, len(md_lines)):
md_line = md_lines[i]
match_obj = re.match(r'(!\[(\w*)\]\((http.*/(.*\.(\w+)))\))', md_line)
if match_obj:
origin_img = match_obj.group(1)
img_tag = match_obj.group(2)
origin_img_url = match_obj.group(3)
origin_img_name = match_obj.group(4)
new_img_file_name = "persister-" + relative_md_file_name + "-" + img_tag + "-" + origin_img_name
print('======== replace start ===========')
print('img_tag: ' +img_tag)
print('origin_img: ' + origin_img)
print('origin_img_url: ' + origin_img_url)
print('origin_img_name: ' + origin_img_name)
print('new_image_file_name: ' + new_img_file_name)
print('======== replace end ===========')
print('')
url_downloader.retrieve(origin_img_url, output_dir + "/" + new_img_file_name)
new_img = ""
md_line = md_line.replace(origin_img, new_img)
md_lines[i] = md_line
change = True
if change:
with open(md_file_path, 'w') as md_file_write:
md_file_write.writelines(md_lines)
md_file_write.flush()
def main():
search_dir = sys.argv[1]
img_output_dir = sys.argv[2]
new_img_prefix = sys.argv[3]
for root, dirs, files in os.walk(search_dir):
for file in files:
if file.endswith(".md"):
file_path = (os.path.join(root, file))
print('file ' + file_path)
persist_md(md_file_path=file_path, output_dir=img_output_dir, new_img_prefix=new_img_prefix)
if __name__ == "__main__":
main()
| 38.457627 | 112 | 0.556633 |
ae2810a2dd0ac9c4cf721c65422fc871991e4969 | 3,089 | py | Python | Software de Trading/tests/test_wallets.py | NatanNMB15/tcc-pytradebot | 52b19251a030ab9c1a1b95157b4d57a9cf6df9dc | [
"MIT"
] | 1 | 2020-05-13T14:12:42.000Z | 2020-05-13T14:12:42.000Z | Software de Trading/tests/test_wallets.py | NatanNMB15/tcc-pytradebot | 52b19251a030ab9c1a1b95157b4d57a9cf6df9dc | [
"MIT"
] | 7 | 2020-02-12T02:58:40.000Z | 2021-06-04T23:24:08.000Z | Software de Trading/tests/test_wallets.py | NatanNMB15/tcc-pytradebot | 52b19251a030ab9c1a1b95157b4d57a9cf6df9dc | [
"MIT"
] | null | null | null | # pragma pylint: disable=missing-docstring
from tests.conftest import get_patched_freqtradebot
from unittest.mock import MagicMock
def test_sync_wallet_at_boot(mocker, default_conf):
default_conf['dry_run'] = False
mocker.patch.multiple(
'freqtrade.exchange.Exchange',
get_balances=MagicMock(return_value={
"BNT": {
"free": 1.0,
"used": 2.0,
"total": 3.0
},
"GAS": {
"free": 0.260739,
"used": 0.0,
"total": 0.260739
},
})
)
freqtrade = get_patched_freqtradebot(mocker, default_conf)
assert len(freqtrade.wallets._wallets) == 2
assert freqtrade.wallets._wallets['BNT'].free == 1.0
assert freqtrade.wallets._wallets['BNT'].used == 2.0
assert freqtrade.wallets._wallets['BNT'].total == 3.0
assert freqtrade.wallets._wallets['GAS'].free == 0.260739
assert freqtrade.wallets._wallets['GAS'].used == 0.0
assert freqtrade.wallets._wallets['GAS'].total == 0.260739
assert freqtrade.wallets.get_free('BNT') == 1.0
mocker.patch.multiple(
'freqtrade.exchange.Exchange',
get_balances=MagicMock(return_value={
"BNT": {
"free": 1.2,
"used": 1.9,
"total": 3.5
},
"GAS": {
"free": 0.270739,
"used": 0.1,
"total": 0.260439
},
})
)
freqtrade.wallets.update()
assert len(freqtrade.wallets._wallets) == 2
assert freqtrade.wallets._wallets['BNT'].free == 1.2
assert freqtrade.wallets._wallets['BNT'].used == 1.9
assert freqtrade.wallets._wallets['BNT'].total == 3.5
assert freqtrade.wallets._wallets['GAS'].free == 0.270739
assert freqtrade.wallets._wallets['GAS'].used == 0.1
assert freqtrade.wallets._wallets['GAS'].total == 0.260439
assert freqtrade.wallets.get_free('GAS') == 0.270739
assert freqtrade.wallets.get_used('GAS') == 0.1
assert freqtrade.wallets.get_total('GAS') == 0.260439
def test_sync_wallet_missing_data(mocker, default_conf):
default_conf['dry_run'] = False
mocker.patch.multiple(
'freqtrade.exchange.Exchange',
get_balances=MagicMock(return_value={
"BNT": {
"free": 1.0,
"used": 2.0,
"total": 3.0
},
"GAS": {
"free": 0.260739,
"total": 0.260739
},
})
)
freqtrade = get_patched_freqtradebot(mocker, default_conf)
assert len(freqtrade.wallets._wallets) == 2
assert freqtrade.wallets._wallets['BNT'].free == 1.0
assert freqtrade.wallets._wallets['BNT'].used == 2.0
assert freqtrade.wallets._wallets['BNT'].total == 3.0
assert freqtrade.wallets._wallets['GAS'].free == 0.260739
assert freqtrade.wallets._wallets['GAS'].used is None
assert freqtrade.wallets._wallets['GAS'].total == 0.260739
assert freqtrade.wallets.get_free('GAS') == 0.260739
| 33.576087 | 62 | 0.58336 |
0885f4d475c69a945298e60aea52ff57dcb607df | 230 | py | Python | Tests/Inputs/exit.py | aisk/ironpython3 | d492fd811a0cee4d0a07cd46f02a29a3c90d964b | [
"Apache-2.0"
] | 1,872 | 2015-01-02T18:56:47.000Z | 2022-03-31T07:34:39.000Z | Tests/Inputs/exit.py | aisk/ironpython3 | d492fd811a0cee4d0a07cd46f02a29a3c90d964b | [
"Apache-2.0"
] | 675 | 2015-02-27T09:01:01.000Z | 2022-03-31T14:03:25.000Z | Tests/Inputs/exit.py | aisk/ironpython3 | d492fd811a0cee4d0a07cd46f02a29a3c90d964b | [
"Apache-2.0"
] | 278 | 2015-01-02T03:48:20.000Z | 2022-03-29T20:40:44.000Z | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import sys
sys.exit()
| 32.857143 | 77 | 0.769565 |
6555a6272aea0b27abb7fa7c1a39904d02cddd2b | 876 | py | Python | problems/best_time_to_buy_and_sell_stock_iii/solution.py | jimit105/leetcode-submissions | ff20c54acf8ad71ed0851c81a0463520fca6a69f | [
"MIT"
] | null | null | null | problems/best_time_to_buy_and_sell_stock_iii/solution.py | jimit105/leetcode-submissions | ff20c54acf8ad71ed0851c81a0463520fca6a69f | [
"MIT"
] | null | null | null | problems/best_time_to_buy_and_sell_stock_iii/solution.py | jimit105/leetcode-submissions | ff20c54acf8ad71ed0851c81a0463520fca6a69f | [
"MIT"
] | null | null | null | # Approach 1 - Bidirectional Dynamic Programming
# Time: O(N)
# Space: O(N)
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if len(prices) <= 1:
return 0
left_min, right_max = prices[0], prices[-1]
n = len(prices)
left_profits = [0] * n
right_profits = [0] * (n + 1)
for l in range(1, n):
left_profits[l] = max(left_profits[l - 1], prices[l] - left_min)
left_min = min(left_min, prices[l])
r = n - 1 - l
right_profits[r] = max(right_profits[r + 1], right_max - prices[r])
right_max = max(right_max, prices[r])
max_profit = 0
for i in range(0, n):
max_profit = max(max_profit, left_profits[i] + right_profits[i + 1])
return max_profit | 30.206897 | 80 | 0.505708 |
362a402755ef73e862afe8971882456f009c6d17 | 14,123 | py | Python | great_expectations/rule_based_profiler/rule_based_profiler.py | dewmeht/great_expectations | 7d359e039dea393b4c9051ab7a770fb9423024f4 | [
"Apache-2.0"
] | null | null | null | great_expectations/rule_based_profiler/rule_based_profiler.py | dewmeht/great_expectations | 7d359e039dea393b4c9051ab7a770fb9423024f4 | [
"Apache-2.0"
] | null | null | null | great_expectations/rule_based_profiler/rule_based_profiler.py | dewmeht/great_expectations | 7d359e039dea393b4c9051ab7a770fb9423024f4 | [
"Apache-2.0"
] | null | null | null | import uuid
from typing import Any, Dict, List, Optional
from ruamel.yaml.comments import CommentedMap
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.rule_based_profiler.domain_builder.domain_builder import (
DomainBuilder,
)
from great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (
ExpectationConfigurationBuilder,
)
from great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (
ParameterBuilder,
)
from great_expectations.rule_based_profiler.parameter_builder.parameter_container import (
ParameterContainer,
build_parameter_container_for_variables,
)
from great_expectations.rule_based_profiler.rule.rule import Rule
class RuleBasedProfiler:
"""
RuleBasedProfiler object serves to profile, or automatically evaluate a set of rules, upon a given
batch / multiple batches of data.
--ge-feature-maturity-info--
id: rule_based_profiler_overall
title: Rule-Based Profiler
icon:
short_description: Configuration Driven Profiler
description: Use YAML to configure a flexible Profiler engine, which will then generate an ExpectationSuite for a data set
how_to_guide_url:
maturity: Experimental
maturity_details:
api_stability: Low (instantiation of Profiler and the signature of the run() method will change)
implementation_completeness: Moderate (some augmentation and/or growth in capabilities is to be expected)
unit_test_coverage: High (but not complete -- additional unit tests will be added, commensurate with the upcoming new functionality)
integration_infrastructure_test_coverage: N/A -> TBD
documentation_completeness: Moderate
bug_risk: Low/Moderate
expectation_completeness: Moderate
id: domain_builders
title: Domain Builders
icon:
short_description: Configurable Domain builders for generating lists of ExpectationConfiguration objects
description: Use YAML to build domains for ExpectationConfiguration generator (table, column, semantic types, etc.)
how_to_guide_url:
maturity: Experimental
maturity_details:
api_stability: Moderate
implementation_completeness: Moderate (additional DomainBuilder classes will be developed)
unit_test_coverage: High (but not complete -- additional unit tests will be added, commensurate with the upcoming new functionality)
integration_infrastructure_test_coverage: N/A -> TBD
documentation_completeness: Moderate
bug_risk: Low/Moderate
expectation_completeness: Moderate
id: parameter_builders
title: Parameter Builders
icon:
short_description: Configurable Parameter builders for generating parameters to be used by ExpectationConfigurationBuilder classes for generating lists of ExpectationConfiguration objects (e.g., as kwargs and meta arguments), corresponding to the Domain built by a DomainBuilder class
description: Use YAML to configure single and multi batch based parameter computation modules for the use by ExpectationConfigurationBuilder classes
how_to_guide_url:
maturity: Experimental
maturity_details:
api_stability: Moderate
implementation_completeness: Moderate (additional ParameterBuilder classes will be developed)
unit_test_coverage: High (but not complete -- additional unit tests will be added, commensurate with the upcoming new functionality)
integration_infrastructure_test_coverage: N/A -> TBD
documentation_completeness: Moderate
bug_risk: Low/Moderate
expectation_completeness: Moderate
id: expectation_configuration_builders
title: ExpectationConfiguration Builders
icon:
short_description: Configurable ExpectationConfigurationBuilder classes for generating lists of ExpectationConfiguration objects (e.g., as kwargs and meta arguments), corresponding to the Domain built by a DomainBuilder class and using parameters, computed by ParameterBuilder classes
description: Use YAML to configure ExpectationConfigurationBuilder classes, which emit lists of ExpectationConfiguration objects (e.g., as kwargs and meta arguments)
how_to_guide_url:
maturity: Experimental
maturity_details:
api_stability: Moderate
implementation_completeness: Moderate (additional ExpectationConfigurationBuilder classes might be developed)
unit_test_coverage: High (but not complete -- additional unit tests will be added, commensurate with the upcoming new functionality)
integration_infrastructure_test_coverage: N/A -> TBD
documentation_completeness: Moderate
bug_risk: Low/Moderate
expectation_completeness: Moderate
--ge-feature-maturity-info--
"""
def __init__(
self,
name: str,
config_version: float,
rules: Dict[str, CommentedMap],
variables: Optional[Dict[str, Any]] = None,
data_context: Optional["DataContext"] = None, # noqa: F821
):
"""
Create a new Profiler using configured rules.
For a rule or an item in a rule configuration, instantiates the following if
available: a domain builder, a parameter builder, and a configuration builder.
These will be used to define profiler computation patterns.
Args:
name: The name of the RBP instance
config_version: The version of the RBP (currently only 1.0 is supported)
rules: A set of RuleConfigs, each of which contains its own DomainBuilder, ParameterBuilders, and ExpectationConfigurationBuilders
variables: Any variables to be substituted within the rules
data_context: DataContext object that defines a full runtime environment (data access, etc.)
"""
self._name = name
self._config_version = config_version
if variables is None:
variables = {}
self._variables = variables
self._rules = self._init_rules(
rules=rules, variables=variables, data_context=data_context
)
self._data_context = data_context
# Necessary to annotate ExpectationSuite during `run()`
self._citation = {
"name": name,
"config_version": config_version,
"rules": rules,
"variables": variables,
}
@property
def name(self) -> str:
return self._name
@property
def variables(self) -> dict:
return self._variables
@staticmethod
def _init_rules(
rules: Dict[str, CommentedMap],
variables: Dict[str, Any],
data_context: Optional["DataContext"] = None, # noqa: F821
) -> List[Rule]:
resulting_rules = []
for rule_name, rule_config in rules.items():
# Config is validated through schema but do a sanity check
for attr in (
"domain_builder",
"expectation_configuration_builders",
):
if attr not in rule_config:
raise ge_exceptions.ProfilerConfigurationError(
message=f'Invalid rule "{rule_name}": missing mandatory {attr}.'
)
# Instantiate builder attributes
domain_builder = RuleBasedProfiler._init_domain_builder(
domain_builder_config=rule_config["domain_builder"],
data_context=data_context,
)
parameter_builders = RuleBasedProfiler._init_parameter_builders(
parameter_builder_configs=rule_config.get("parameter_builders"),
data_context=data_context,
)
expectation_configuration_builders = (
RuleBasedProfiler._init_expectation_configuration_builders(
expectation_configuration_builder_configs=rule_config[
"expectation_configuration_builders"
]
)
)
# Convert variables to ParameterContainer
_variables: Optional[
ParameterContainer
] = build_parameter_container_for_variables(variables_configs=variables)
# Compile previous steps and package into a Rule object
resulting_rules.append(
Rule(
name=rule_name,
domain_builder=domain_builder,
parameter_builders=parameter_builders,
expectation_configuration_builders=expectation_configuration_builders,
variables=_variables,
)
)
return resulting_rules
@staticmethod
def _init_domain_builder(
domain_builder_config: CommentedMap,
data_context: Optional["DataContext"] = None, # noqa: F821
) -> DomainBuilder:
domain_builder: DomainBuilder = instantiate_class_from_config(
config=domain_builder_config,
runtime_environment={"data_context": data_context},
config_defaults={
"module_name": "great_expectations.rule_based_profiler.domain_builder"
},
)
return domain_builder
@staticmethod
def _init_parameter_builders(
parameter_builder_configs: Optional[List[CommentedMap]] = None,
data_context: Optional["DataContext"] = None, # noqa: F821
) -> Optional[List[ParameterBuilder]]:
if parameter_builder_configs is None:
return None
parameter_builders: List[ParameterBuilder] = []
parameter_builder_config: CommentedMap
for parameter_builder_config in parameter_builder_configs:
parameter_builder = instantiate_class_from_config(
config=parameter_builder_config,
runtime_environment={"data_context": data_context},
config_defaults={
"module_name": "great_expectations.rule_based_profiler.parameter_builder"
},
)
parameter_builders.append(parameter_builder)
return parameter_builders
@staticmethod
def _init_expectation_configuration_builders(
expectation_configuration_builder_configs: List[CommentedMap],
) -> List[ExpectationConfigurationBuilder]:
expectation_configuration_builders: List[ExpectationConfigurationBuilder] = []
for (
expectation_configuration_builder_config
) in expectation_configuration_builder_configs:
expectation_configuration_builder = instantiate_class_from_config(
config=expectation_configuration_builder_config,
runtime_environment={},
config_defaults={
"class_name": "DefaultExpectationConfigurationBuilder",
"module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
},
)
expectation_configuration_builders.append(expectation_configuration_builder)
return expectation_configuration_builders
def run(
self,
*,
expectation_suite_name: Optional[str] = None,
include_citation: bool = True,
) -> ExpectationSuite:
"""
Args:
:param expectation_suite_name: A name for returned Expectation suite.
:param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler
:return: Set of rule evaluation results in the form of an ExpectationSuite
"""
if expectation_suite_name is None:
expectation_suite_name = (
f"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}"
)
expectation_suite: ExpectationSuite = ExpectationSuite(
expectation_suite_name=expectation_suite_name,
data_context=self._data_context,
)
if include_citation:
expectation_suite.add_citation(
comment="Suite created by Rule-Based Profiler with the configuration included.",
profiler_config=self._citation,
)
rule: Rule
for rule in self._rules:
expectation_configurations: List[ExpectationConfiguration] = rule.generate()
expectation_configuration: ExpectationConfiguration
for expectation_configuration in expectation_configurations:
expectation_suite._add_expectation(
expectation_configuration=expectation_configuration,
send_usage_event=False,
)
return expectation_suite
def self_check(self, pretty_print=True) -> dict:
"""
Necessary to enable integration with `DataContext.test_yaml_config`
Args:
pretty_print: flag to turn on verbose output
Returns:
Dictionary that contains RuleBasedProfiler state
"""
# Provide visibility into parameters that RuleBasedProfiler was instantiated with.
report_object: dict = {"config": self._citation}
if pretty_print:
print(f"\nRuleBasedProfiler class name: {self.name}")
if not self._variables:
print(
'Your current RuleBasedProfiler configuration has an empty "variables" attribute. \
Please ensure you populate it if you\'d like to reference values in your "rules" attribute.'
)
return report_object
| 43.860248 | 292 | 0.675423 |
721a63326d377030a45d29e6ab6ec896525492b6 | 1,538 | py | Python | ckanext-hdx_crisis/ckanext/hdx_crisis/dao/location_data_access.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | 58 | 2015-01-11T09:05:15.000Z | 2022-03-17T23:44:07.000Z | ckanext-hdx_crisis/ckanext/hdx_crisis/dao/location_data_access.py | datopian/hdx-ckan | 2d8871c035a18e48b53859fec522b997b500afe9 | [
"Apache-2.0"
] | 1,467 | 2015-01-01T16:47:44.000Z | 2022-02-28T16:51:20.000Z | ckanext-hdx_crisis/ckanext/hdx_crisis/dao/location_data_access.py | datopian/hdx-ckan | 2d8871c035a18e48b53859fec522b997b500afe9 | [
"Apache-2.0"
] | 17 | 2015-05-06T14:04:21.000Z | 2021-11-11T19:58:16.000Z | '''
Created on Dec 8, 2014
@author: alexandru-m-g
'''
import logging
import ckan.logic as logic
import ckan.model as model
import ckanext.hdx_crisis.dao.data_access as data_access
import ckanext.hdx_theme.helpers.top_line_items_formatter as formatters
from ckan.common import c
get_action = logic.get_action
DataAccess = data_access.DataAccess
log = logging.getLogger(__name__)
def get_formatted_topline_numbers(top_line_resource_id):
'''
Helper function that is actually a wrapper: it initializez a LocationDataAccess with the provided
top_line_resource_id and then it applues the TopLineItemsWithDateFormatter over the results
:param top_line_resource_id: the resource id which has a datastore with topline items
:type top_line_resource_id: str
:return: dictionary with formatted topline items
:rtype: dict
'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
loc_data_access = LocationDataAccess(top_line_resource_id)
loc_data_access.fetch_data(context)
top_line_items = loc_data_access.get_top_line_items()
formatter = formatters.TopLineItemsWithDateFormatter(top_line_items)
formatter.format_results()
return top_line_items
class LocationDataAccess(DataAccess):
def __init__(self, top_line_resource_id):
self.resources_dict = {
'top-line-numbers': {
'resource_id': top_line_resource_id
}
}
| 28.481481 | 101 | 0.735371 |
0bf660edadd12f37c5d93c674b3c544660d4a84c | 2,467 | py | Python | test/test_ABVD.py | SimonGreenhill/ABVDGet | 4d101577af00476c113394f014a6c74ba4351e0a | [
"BSD-3-Clause"
] | 2 | 2018-01-20T13:35:54.000Z | 2021-11-24T16:11:20.000Z | test/test_ABVD.py | SimonGreenhill/ABVDGet | 4d101577af00476c113394f014a6c74ba4351e0a | [
"BSD-3-Clause"
] | 10 | 2017-05-09T22:26:31.000Z | 2018-06-15T00:05:07.000Z | test/test_ABVD.py | SimonGreenhill/ABVDGet | 4d101577af00476c113394f014a6c74ba4351e0a | [
"BSD-3-Clause"
] | 1 | 2018-01-20T13:40:40.000Z | 2018-01-20T13:40:40.000Z | import os
import unittest
import tempfile
from abvdget import ABVDatabase, Record
TESTDATA = os.path.join(os.path.dirname(__file__), 'nengone.json')
EXPECTED = {
99: {
"LID": 99,
"Annotation": "arm and hand",
"Cognacy": '1',
"Item": "nin",
"Loan": None,
"Word": "hand",
"WID": 1
},
93340: {
"LID": 99,
"Annotation": None,
"Cognacy": '13',
"Item": "iñtërnâtiônàlizætiøn",
"Loan": None,
"Word": "leg/foot",
"WID": 4,
},
90697: {
"LID": 99,
"Annotation": None,
"Cognacy": None,
"Item": "kaka",
"Loan": None,
"Word": "to eat",
"WID": 37
},
70785: {
"LID": 99,
"Annotation": None,
"Cognacy": '1',
"Item": "tini",
"Loan": None,
"Word": "Three",
"WID": 199
}
}
class TestABVD(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.abvd = ABVDatabase(files=[TESTDATA])
def test_load(self):
assert TESTDATA in self.abvd.files
def test_get_details(self):
d = self.abvd.get_details(TESTDATA)
assert d['id'] == '99'
assert d['language'] == 'Nengone'
assert d['silcode'] == 'nen'
assert d['glottocode'] == 'neng1238'
def test_get_details_injects_filename(self):
d = self.abvd.get_details(TESTDATA)
assert d.get('filename') == TESTDATA
def test_get_location(self):
d = self.abvd.get_location(TESTDATA)
assert d['latitude'] == "-21.53484700204878876661"
assert d['longitude'] == "167.98095703125000000000"
def test_get_nlexemes(self):
assert self.abvd.get_nlexemes(TESTDATA) == 4
def test_get_ncognates(self):
assert self.abvd.get_ncognates(TESTDATA) == 3
def test_process(self):
for r in self.abvd.process():
assert r.ID in EXPECTED
for k in EXPECTED[r.ID]:
self.assertEqual(EXPECTED[r.ID][k], getattr(r, k))
def test_get_slug_for(self):
self.assertEqual(self.abvd.get_slug_for('Nengone', '99'), 'Nengone_99')
def test_save_details(self):
with tempfile.NamedTemporaryFile() as out:
self.abvd.save_details(out.name)
out.seek(0)
content = out.readlines()
assert len(content) == 2 # two lines, header and Nengone
| 26.244681 | 79 | 0.544386 |
9c37efa38a138543911bc948dc1cc7931616c3b3 | 314 | py | Python | rio/utils/token.py | soasme/rio | e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2 | [
"MIT"
] | null | null | null | rio/utils/token.py | soasme/rio | e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2 | [
"MIT"
] | 14 | 2016-04-14T04:18:41.000Z | 2016-05-12T03:46:37.000Z | rio/utils/token.py | soasme/rio | e6b89634db8d3ad75ac7f7b25ddec5b19d4f66e2 | [
"MIT"
] | 1 | 2016-04-06T08:54:20.000Z | 2016-04-06T08:54:20.000Z | # -*- coding: utf-8 -*-
"""
rio.utils.token
~~~~~~~~~~~~~~~
"""
import random
import string
def password_generator(length):
"""Generate a random password.
:param length: integer.
"""
return ''.join(random.choice(string.ascii_lowercase + string.digits)
for _ in range(length))
| 18.470588 | 72 | 0.595541 |
4e7865369744d7dbf17e7762a32374d776f311cf | 756 | py | Python | python/schuelerpraktika/sascha_mueller/Aufgaben/Klassen.py | maximilianharr/code_snippets | 8b271e6fa9174e24200e88be59e417abd5f2f59a | [
"BSD-3-Clause"
] | null | null | null | python/schuelerpraktika/sascha_mueller/Aufgaben/Klassen.py | maximilianharr/code_snippets | 8b271e6fa9174e24200e88be59e417abd5f2f59a | [
"BSD-3-Clause"
] | null | null | null | python/schuelerpraktika/sascha_mueller/Aufgaben/Klassen.py | maximilianharr/code_snippets | 8b271e6fa9174e24200e88be59e417abd5f2f59a | [
"BSD-3-Clause"
] | null | null | null | #-*- coding: utf-8 -*-
class konto:
def __init__(self,name,id,geld):
self.name = name
self.id = id
self.geld = geld
def einzahlen(z,geld):
if (geld>0):
self.geld = self.geld + geld
def auszahlen(self,geld):
if (geld<0):
self.geld = self.geld - geld
def info(self):
print "Name : ",self.name
print "Kontonummer : ",self.id
print "Geldbetrag : ",self.geld
def zahle_an(self,x,geld):
if(geld>0)==(geld<self.geld):
self.geld -= geld
x.geld +=geld
print "Geld wurde überwiesen"
return True
a = konto("Max",1,100)
a.info()
b = konto("Mr.X",2,0)
b.info()
a.zahle_an(b,50)
a.info()
b.info()
| 19.384615 | 41 | 0.517196 |
49c663cbc7647958225a51740e3ad49eee2619a9 | 2,573 | py | Python | WuxiaWebScraping/WuxiaWebScraping.py | Zekihan/MyScripts | 083699a886391022c1d490267e6b8b3e43a1dab4 | [
"Apache-2.0"
] | 1 | 2020-08-13T16:48:07.000Z | 2020-08-13T16:48:07.000Z | WuxiaWebScraping/WuxiaWebScraping.pyw | Zekihan/MyScripts | 083699a886391022c1d490267e6b8b3e43a1dab4 | [
"Apache-2.0"
] | null | null | null | WuxiaWebScraping/WuxiaWebScraping.pyw | Zekihan/MyScripts | 083699a886391022c1d490267e6b8b3e43a1dab4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 15:26:17 2019
@author: Zekihan
"""
from tkinter import Tk,Scrollbar,RIGHT,Listbox,END,re,LEFT,mainloop,Y,BOTH
from tkinter import *
import webbrowser
def callback(evt):
w = evt.widget
index = int(w.curselection()[0])
if index%4 == 1:
url = w.get(index)
webbrowser.open_new(url)
def get_recent_updates():
import requests
from bs4 import BeautifulSoup
import time
url = "https://www.wuxiaworld.com"
response = requests.get(url)
recents = []
t = response.text.split("<h3>Most Recently Updated</h3>\n</div>\n<div class=\"section-content\">\n<table class=\"table table-novels\">\n<thead class=\"hidden-xs\">\n<tr>\n<th>Title</th>\n<th>Release</th>\n<th>Translator</th>\n<th>Time</th>\n</tr>\n</thead>\n<tbody>")
t = t[1].split("<tr>")
for i in range(1,len(t)):
item = []
x = t[i].split("<td>\n<span class=\"title\">\n<a href=\"")[1].split("\">")[1].split("</a>")[0]
soup = BeautifulSoup(x, "html.parser")
item.append(soup.text)
x = t[i].split("<div class=\"visible-xs-inline\">\n<a class=\"")[1].split("\" href=\"")[1].split("\">")
item.append(url + x[0])
x = x[1].split("</a>")
soup = BeautifulSoup(x[0], "html.parser")
item.append(soup.text[1:-1])
x = t[i].split("data-timestamp=\"")[1].split("\">")[0]
item.append(x)
recents.append(item)
ts = time.time()
top = Tk()
top.geometry("800x600")
sb = Scrollbar(top)
sb.pack(side = RIGHT, fill = Y)
mylist = Listbox(top, yscrollcommand = sb.set )
for recent in recents:
mylist.insert(END, recent[0])
mylist.insert(END, recent[1])
char_list = recent[2]
re_pattern = re.compile(u'[^\u0000-\uD7FF\uE000-\uFFFF]', re.UNICODE)
filtered_string = re_pattern.sub(u'\uFFFD', char_list)
mylist.insert(END, filtered_string)
diff = (int(ts)-int(recent[3]))
if diff>3600:
hour = str(int(diff/3600))
mylist.insert(END, "%s hour ago.\n" %hour)
elif diff>(24*3600):
day = str(int(diff/(24*3600)))
mylist.insert(END, "%s day ago.\n" %day)
else:
minute = str(int(diff/60))
mylist.insert(END, "%s minute ago.\n" %minute)
mylist.bind('<<ListboxSelect>>', callback)
mylist.pack( fill = BOTH, expand=True)
sb.config( command = mylist.yview )
mainloop()
get_recent_updates()
| 32.56962 | 271 | 0.559658 |
8a24b4c3f33ed54ce2504b22521cf00a67e51d07 | 4,874 | py | Python | saleor/graphql/product/bulk_mutations/product_max_min.py | hoangtuananh97/saleor | 94ad493ef61302fb458822868fc2b4a884ec2065 | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/product/bulk_mutations/product_max_min.py | hoangtuananh97/saleor | 94ad493ef61302fb458822868fc2b4a884ec2065 | [
"CC-BY-4.0"
] | 4 | 2021-09-06T03:55:32.000Z | 2021-10-15T08:47:58.000Z | saleor/graphql/product/bulk_mutations/product_max_min.py | hoangtuananh97/saleor | 94ad493ef61302fb458822868fc2b4a884ec2065 | [
"CC-BY-4.0"
] | null | null | null | import graphene
from django.utils import timezone
from saleor.graphql.core.mutations import ModelBulkDeleteMutation, ModelMutation
from saleor.graphql.product.mutations.product_max_min import (
ProductMaxMinInput,
ProductMaxMinMixin,
)
from saleor.graphql.product.types.product_max_min import ProductMaxMin
from ....core.permissions import ProductMaxMinPermissions
from ....core.tracing import traced_atomic_transaction
from ....product_max_min import models
from ...core.types.common import ProductMaxMinError
class ProductMaxMinBulkUpdateInput(ProductMaxMinInput):
id = graphene.ID(description="ID of the product class max min.", required=True)
class BaseProductMaxMinBulk(ModelMutation, ProductMaxMinMixin):
count = graphene.Int(
required=True,
default_value=0,
description="Returns how many objects were created.",
)
products_max_min = graphene.List(
graphene.NonNull(ProductMaxMin),
required=True,
default_value=[],
description="List of the created product max min.",
)
class Meta:
abstract = True
@classmethod
def config_input_cls(cls):
raise NotImplementedError
@classmethod
def add_field_user(cls, cleaned_input, data):
raise NotImplementedError
@classmethod
def validate(cls, _root, info, **data):
instances = []
data = data.get("input")
input_cls = cls.config_input_cls()
user = info.context.user
for item in data:
instance = cls.get_instance(info, **item)
cleaned_input = cls.clean_input(info, instance, item, input_cls=input_cls)
cls.validate_product_max_min(instance, cleaned_input)
cleaned_input = cls.add_field_user(cleaned_input, user)
instance = cls.construct_instance(instance, cleaned_input)
cls.clean_instance(info, instance)
instances.append(instance)
return instances
class ProductMaxMinBulkCreate(BaseProductMaxMinBulk):
class Arguments:
input = graphene.List(
graphene.NonNull(ProductMaxMinInput),
required=True,
description="Input list of product max min to create.",
)
class Meta:
model = models.ProductMaxMin
description = "Creates product max min."
permissions = (ProductMaxMinPermissions.MANAGE_PRODUCT_MAX_MIN,)
error_type_class = ProductMaxMinError
error_type_field = "product_class_max_min_errors"
@classmethod
def config_input_cls(cls):
return ProductMaxMinInput
@classmethod
def add_field_user(cls, cleaned_input, data):
cleaned_input["created_by"] = data
return cleaned_input
@classmethod
@traced_atomic_transaction()
def perform_mutation(cls, _root, info, **data):
instances = cls.validate(_root, info, **data)
data = models.ProductMaxMin.objects.bulk_create(instances)
return ProductMaxMinBulkCreate(count=len(data), products_max_min=data)
class ProductMaxMinBulkUpdate(BaseProductMaxMinBulk):
class Arguments:
input = graphene.List(
graphene.NonNull(ProductMaxMinBulkUpdateInput),
required=True,
description="Input list of product max min to update.",
)
class Meta:
model = models.ProductMaxMin
description = "Update product max min."
permissions = (ProductMaxMinPermissions.MANAGE_PRODUCT_MAX_MIN,)
error_type_class = ProductMaxMinError
error_type_field = "product_class_max_min_errors"
@classmethod
def config_input_cls(cls):
return ProductMaxMinBulkUpdateInput
@classmethod
def add_field_user(cls, cleaned_input, data):
cleaned_input["updated_by"] = data
return cleaned_input
@classmethod
@traced_atomic_transaction()
def perform_mutation(cls, _root, info, **data):
instances = cls.validate(_root, info, **data)
for instance in instances:
instance.updated_at = timezone.datetime.now()
models.ProductMaxMin.objects.bulk_update(
instances,
[
"min_level",
"max_level",
"listing_id",
],
)
return ProductMaxMinBulkUpdate(count=len(instances), products_max_min=instances)
class ProductMaxMinBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID,
required=True,
description="List of product class IDs to delete.",
)
class Meta:
model = models.ProductMaxMin
description = "Delete product max min."
permissions = (ProductMaxMinPermissions.MANAGE_PRODUCT_MAX_MIN,)
error_type_class = ProductMaxMinError
error_type_field = "product_class_max_min_errors"
| 32.711409 | 88 | 0.678293 |
e7ab62c0bbc41e9bb0db29d96cd3e3fa4d367ab4 | 5,412 | py | Python | doubt/datasets/superconductivity.py | nobias-project/doubt | 270abc9b1fb8e708893d8c4782472488459a0262 | [
"MIT"
] | 27 | 2020-06-27T09:18:30.000Z | 2022-03-18T17:28:39.000Z | doubt/datasets/superconductivity.py | nobias-project/doubt | 270abc9b1fb8e708893d8c4782472488459a0262 | [
"MIT"
] | 27 | 2020-03-30T10:13:47.000Z | 2022-01-07T14:39:37.000Z | doubt/datasets/superconductivity.py | nobias-project/doubt | 270abc9b1fb8e708893d8c4782472488459a0262 | [
"MIT"
] | 2 | 2021-05-13T08:55:19.000Z | 2021-05-13T09:23:04.000Z | '''Superconductivity data set.
This data set is from the UCI data set archive, with the description being
the original description verbatim. Some feature names may have been altered,
based on the description.
'''
from ._dataset import BaseDataset, BASE_DATASET_DESCRIPTION
import pandas as pd
import zipfile
import io
class Superconductivity(BaseDataset):
__doc__ = f'''
This dataset contains data on 21,263 superconductors and their relevant
features. The goal here is to predict the critical temperature based on
the features extracted.
{BASE_DATASET_DESCRIPTION}
Features:
- number_of_elements (int)
- mean_atomic_mass (float)
- wtd_mean_atomic_mass (float)
- gmean_atomic_mass (float)
- wtd_gmean_atomic_mass (float)
- entropy_atomic_mass (float)
- wtd_entropy_atomic_mass (float)
- range_atomic_mass (float)
- wtd_range_atomic_mass (float)
- std_atomic_mass (float)
- wtd_std_atomic_mass (float)
- mean_fie (float)
- wtd_mean_fie (float)
- gmean_fie (float)
- wtd_gmean_fie (float)
- entropy_fie (float)
- wtd_entropy_fie (float)
- range_fie (float)
- wtd_range_fie (float)
- std_fie (float)
- wtd_std_fie (float)
- mean_atomic_radius (float)
- wtd_mean_atomic_radius (float)
- gmean_atomic_radius (float)
- wtd_gmean_atomic_radius (float)
- entropy_atomic_radius (float)
- wtd_entropy_atomic_radius (float)
- range_atomic_radius (float)
- wtd_range_atomic_radius (float)
- std_atomic_radius (float)
- wtd_std_atomic_radius (float)
- mean_Density (float)
- wtd_mean_Density (float)
- gmean_Density (float)
- wtd_gmean_Density (float)
- entropy_Density (float)
- wtd_entropy_Density (float)
- range_Density (float)
- wtd_range_Density (float)
- std_Density (float)
- wtd_std_Density (float)
- mean_ElectronAffinity (float)
- wtd_mean_ElectronAffinity (float)
- gmean_ElectronAffinity (float)
- wtd_gmean_ElectronAffinity (float)
- entropy_ElectronAffinity (float)
- wtd_entropy_ElectronAffinity (float)
- range_ElectronAffinity (float)
- wtd_range_ElectronAffinity (float)
- std_ElectronAffinity (float)
- wtd_std_ElectronAffinity (float)
- mean_FusionHeat (float)
- wtd_mean_FusionHeat (float)
- gmean_FusionHeat (float)
- wtd_gmean_FusionHeat (float)
- entropy_FusionHeat (float)
- wtd_entropy_FusionHeat (float)
- range_FusionHeat (float)
- wtd_range_FusionHeat (float)
- std_FusionHeat (float)
- wtd_std_FusionHeat (float)
- mean_ThermalConductivity (float)
- wtd_mean_ThermalConductivity (float)
- gmean_ThermalConductivity (float)
- wtd_gmean_ThermalConductivity (float)
- entropy_ThermalConductivity (float)
- wtd_entropy_ThermalConductivity (float)
- range_ThermalConductivity (float)
- wtd_range_ThermalConductivity (float)
- std_ThermalConductivity (float)
- wtd_std_ThermalConductivity (float)
- mean_Valence (float)
- wtd_mean_Valence (float)
- gmean_Valence (float)
- wtd_gmean_Valence (float)
- entropy_Valence (float)
- wtd_entropy_Valence (float)
- range_Valence (float)
- wtd_range_Valence (float)
- std_Valence (float)
- wtd_std_Valence (float)
Targets:
- critical_temp (float)
Source:
https://archive.ics.uci.edu/ml/datasets/Superconductivty+Data
Examples:
Load in the data set::
>>> dataset = Superconductivity()
>>> dataset.shape
(21263, 82)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((21263, 81), (21263,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((17004, 81), (17004,), (4259, 81), (4259,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = ('https://archive.ics.uci.edu/ml/machine-learning-databases/'
'00464/superconduct.zip')
_features = range(81)
_targets = [81]
def _prep_data(self, data: bytes) -> pd.DataFrame:
''' Prepare the data set.
Args:
data (bytes): The raw data
Returns:
Pandas dataframe: The prepared data
'''
# Convert the bytes into a file-like object
buffer = io.BytesIO(data)
# Unzip the file and pull out the text
with zipfile.ZipFile(buffer, 'r') as zip_file:
txt = zip_file.read('train.csv')
# Convert text to csv file
csv_file = io.BytesIO(txt)
# Load the csv file into a dataframe
df = pd.read_csv(csv_file)
return df
| 32.214286 | 79 | 0.620288 |
171b73297ca3a9a29143236ae1de7ee9351e37c9 | 1,728 | py | Python | app/user/serializers.py | ChenyoisRita/recipe-app-api | 332cf47caa8b6232b2e6c4dfe61cc2d683de4f8d | [
"MIT"
] | null | null | null | app/user/serializers.py | ChenyoisRita/recipe-app-api | 332cf47caa8b6232b2e6c4dfe61cc2d683de4f8d | [
"MIT"
] | null | null | null | app/user/serializers.py | ChenyoisRita/recipe-app-api | 332cf47caa8b6232b2e6c4dfe61cc2d683de4f8d | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password,
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 32 | 74 | 0.646412 |
206f7e4ad5605371c7d9d60af2e93bec8f73ec1f | 10,802 | py | Python | fakenet/listeners/RawListener.py | 4k4xs4pH1r3/flare-fakenet-ng | 596bb139b59eb15323510ed41e33661a40c8d80c | [
"Apache-2.0"
] | 1,360 | 2016-06-27T20:54:26.000Z | 2021-09-15T17:20:39.000Z | fakenet/listeners/RawListener.py | stevemk14ebr/flare-fakenet-ng | 706c0d4f1355e30f6abb25626fdcb3e53f2957e1 | [
"Apache-2.0"
] | 90 | 2016-08-29T17:25:24.000Z | 2021-07-17T15:19:46.000Z | fakenet/listeners/RawListener.py | stevemk14ebr/flare-fakenet-ng | 706c0d4f1355e30f6abb25626fdcb3e53f2957e1 | [
"Apache-2.0"
] | 335 | 2016-07-11T23:25:54.000Z | 2021-09-08T22:27:33.000Z | import logging
from ConfigParser import ConfigParser
import os
import sys
import imp
import base64
import threading
import SocketServer
import ssl
import socket
from . import *
INDENT = ' '
def qualify_file_path(filename, fallbackdir):
path = filename
if path:
if not os.path.exists(path):
path = os.path.join(fallbackdir, filename)
if not os.path.exists(path):
raise RuntimeError('Cannot find %s' % (filename))
return path
class RawCustomResponse(object):
def __init__(self, proto, name, conf, configroot):
self.name = name
self.static = None
self.handler = None
spec_file = '%srawfile' % (proto.lower())
spec_str = '%sstaticstring' % (proto.lower())
spec_b64 = '%sstaticbase64' % (proto.lower())
spec_dyn = '%sdynamic' % (proto.lower())
response_specs = {
spec_file,
spec_str,
spec_b64,
spec_dyn,
}
nr_responses = len(response_specs.intersection(conf))
if nr_responses != 1:
raise ValueError('Custom %s config section %s has %d of %s' %
(proto.upper(), name, nr_responses,
'/'.join(response_specs)))
self.static = conf.get(spec_str)
if self.static is not None:
self.static = self.static.rstrip('\r\n')
if not self.static is not None:
b64_text = conf.get(spec_b64)
if b64_text:
self.static = base64.b64decode(b64_text)
if not self.static is not None:
file_path = conf.get(spec_file)
if file_path:
raw_file = qualify_file_path(file_path, configroot)
self.static = open(raw_file, 'rb').read()
pymodpath = qualify_file_path(conf.get(spec_dyn), configroot)
if pymodpath:
pymod = imp.load_source('cr_raw_' + self.name, pymodpath)
funcname = 'Handle%s' % (proto.capitalize())
if not hasattr(pymod, funcname):
raise ValueError('Loaded %s module %s has no function %s' %
(spec_dyn, conf.get(spec_dyn), funcname))
self.handler = getattr(pymod, funcname)
def respondUdp(self, sock, data, addr):
if self.static:
sock.sendto(self.static, addr)
elif self.handler:
self.handler(sock, data, addr)
class RawListener(object):
def taste(self, data, dport):
return 1
def __init__(self,
config,
name='RawListener',
logging_level=logging.INFO,
):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging_level)
self.config = config
self.name = name
self.local_ip = config.get('ipaddr')
self.server = None
self.port = self.config.get('port', 1337)
self.logger.debug('Starting...')
self.logger.debug('Initialized with config:')
for key, value in config.iteritems():
self.logger.debug(' %10s: %s', key, value)
def start(self):
# Start listener
proto = self.config.get('protocol')
if proto is not None:
if proto.lower() == 'tcp':
self.logger.debug('Starting TCP ...')
self.server = ThreadedTCPServer((self.local_ip, int(self.config['port'])), ThreadedTCPRequestHandler)
elif proto.lower() == 'udp':
self.logger.debug('Starting UDP ...')
self.server = ThreadedUDPServer((self.local_ip, int(self.config['port'])), ThreadedUDPRequestHandler)
else:
self.logger.error('Unknown protocol %s', self.config['protocol'])
return
else:
self.logger.error('Protocol is not defined.')
return
self.server.logger = self.logger
self.server.config = self.config
if self.config.get('usessl') == 'Yes':
self.logger.debug('Using SSL socket.')
keyfile_path = 'listeners/ssl_utils/privkey.pem'
keyfile_path = ListenerBase.abs_config_path(keyfile_path)
if keyfile_path is None:
self.logger.error('Could not locate %s', keyfile_path)
sys.exit(1)
certfile_path = 'listeners/ssl_utils/server.pem'
certfile_path = ListenerBase.abs_config_path(certfile_path)
if certfile_path is None:
self.logger.error('Could not locate %s', certfile_path)
sys.exit(1)
self.server.socket = ssl.wrap_socket(self.server.socket, keyfile=keyfile_path, certfile=certfile_path, server_side=True, ciphers='RSA')
self.server.custom_response = None
custom = self.config.get('custom')
def checkSetting(d, name, value):
if name not in d:
return False
return d[name].lower() == value.lower()
if custom:
configdir = self.config.get('configdir')
custom = qualify_file_path(custom, configdir)
customconf = ConfigParser()
customconf.read(custom)
for section in customconf.sections():
entries = dict(customconf.items(section))
if (('instancename' not in entries) and
('listenertype' not in entries)):
msg = 'Custom Response lacks ListenerType or InstanceName'
raise RuntimeError(msg)
if (checkSetting(entries, 'instancename', self.name) or
checkSetting(entries, 'listenertype', proto)):
if self.server.custom_response:
msg = ('Only one %s Custom Response can be configured '
'at a time' % (proto))
raise RuntimeError(msg)
self.server.custom_response = (
RawCustomResponse(proto, section, entries, configdir))
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self):
self.logger.debug('Stopping...')
if self.server:
self.server.shutdown()
self.server.server_close()
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
# Hook to ensure that all `recv` calls transparently emit a hex dump
# in the log output, even if they occur within a user-implemented
# custom handler
def do_hexdump(data):
for line in hexdump_table(data):
self.server.logger.info(INDENT + line)
orig_recv = self.request.recv
def hook_recv(self, bufsize, flags=0):
data = orig_recv(bufsize, flags)
if data:
do_hexdump(data)
return data
bound_meth = hook_recv.__get__(self.request, self.request.__class__)
setattr(self.request, 'recv', bound_meth)
# Timeout connection to prevent hanging
self.request.settimeout(int(self.server.config.get('timeout', 5)))
cr = self.server.custom_response
# Allow user-scripted responses to handle all control flow (e.g.
# looping, exception handling, etc.)
if cr and cr.handler:
cr.handler(self.request)
else:
try:
while True:
data = self.request.recv(1024)
if not data:
break
if cr and cr.static:
self.request.sendall(cr.static)
else:
self.request.sendall(data)
except socket.timeout:
self.server.logger.warning('Connection timeout')
except socket.error as msg:
self.server.logger.error('Error: %s', msg.strerror or msg)
except Exception, e:
self.server.logger.error('Error: %s', e)
class ThreadedUDPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
(data,sock) = self.request
if data:
for line in hexdump_table(data):
self.server.logger.info(INDENT + line)
cr = self.server.custom_response
if cr:
cr.respondUdp(sock, data, self.client_address)
elif data:
try:
sock.sendto(data, self.client_address)
except socket.error as msg:
self.server.logger.error('Error: %s', msg.strerror or msg)
except Exception, e:
self.server.logger.error('Error: %s', e)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# Avoid [Errno 98] Address already in use due to TIME_WAIT status on TCP
# sockets, for details see:
# https://stackoverflow.com/questions/4465959/python-errno-98-address-already-in-use
allow_reuse_address = True
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
pass
def hexdump_table(data, length=16):
hexdump_lines = []
for i in range(0, len(data), 16):
chunk = data[i:i+16]
hex_line = ' '.join(["%02X" % ord(b) for b in chunk ] )
ascii_line = ''.join([b if ord(b) > 31 and ord(b) < 127 else '.' for b in chunk ] )
hexdump_lines.append("%04X: %-*s %s" % (i, length*3, hex_line, ascii_line ))
return hexdump_lines
###############################################################################
# Testing code
def test(config):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "\t[RawListener] Sending request:\n%s" % "HELO\n"
try:
# Connect to server and send data
sock.connect(('localhost', int(config.get('port', 23))))
sock.sendall("HELO\n")
# Receive data from the server and shut down
received = sock.recv(1024)
finally:
sock.close()
def main():
logging.basicConfig(format='%(asctime)s [%(name)15s] %(message)s', datefmt='%m/%d/%y %I:%M:%S %p', level=logging.DEBUG)
config = {'port': '1337', 'usessl': 'No', 'protocol': 'tcp'}
listener = RawListener(config)
listener.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
###########################################################################
# Run tests
#test(config)
listener.stop()
if __name__ == '__main__':
main()
| 32.438438 | 147 | 0.560174 |
ef5eef0494390fbb8a48ff3ea37625c1f3a7c567 | 3,578 | py | Python | lib/googlecloudsdk/command_lib/certificate_manager/util.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/googlecloudsdk/command_lib/certificate_manager/util.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/certificate_manager/util.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utilities for Certificate Manager commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.certificate_manager import api_client
from googlecloudsdk.api_lib.certificate_manager import operations
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
_OPERATIONS_COLLECTION = 'certificatemanager.projects.locations.operations'
_CERTIFICATE_MAPS_COLLECTION = 'certificatemanager.projects.locations.certificateMaps'
_CERTIFICATE_MAP_ENTRIES_COLLECTION = 'certificatemanager.projects.locations.certificateMaps.certificateMapEntries'
_CERTIFICATES_COLLECTION = 'certificatemanager.projects.locations.certificates'
_PROJECT = lambda: properties.VALUES.core.project.Get(required=True)
def _GetRegistry():
registry = resources.REGISTRY.Clone()
registry.RegisterApiByName('certificatemanager', api_client.API_VERSION)
return registry
def _ParseOperation(operation):
return _GetRegistry().Parse(
operation,
params={
'projectsId': _PROJECT,
'locationsId': 'global'
},
collection=_OPERATIONS_COLLECTION)
def _ParseCertificateMap(certificate_map):
return _GetRegistry().Parse(
certificate_map,
params={
'projectsId': _PROJECT,
'locationsId': 'global'
},
collection=_CERTIFICATE_MAPS_COLLECTION)
def _ParseCertificateMapEntry(certificate_map_entry):
return _GetRegistry().Parse(
certificate_map_entry,
params={
'projectsId': _PROJECT,
'locationsId': 'global',
'certificateMapId': _CERTIFICATE_MAPS_COLLECTION,
},
collection=_CERTIFICATE_MAP_ENTRIES_COLLECTION)
def _ParseCertificate(certificate):
return _GetRegistry().Parse(
certificate,
params={
'projectsId': _PROJECT,
'locationsId': 'global'
},
collection=_CERTIFICATES_COLLECTION)
def WaitForOperation(response, is_async=False):
"""Handles waiting for the operation and printing information about it.
Args:
response: Response from the API call
is_async: If true, do not wait for the operation
Returns:
The last information about the operation.
"""
operation_ref = _ParseOperation(response.name)
if is_async:
log.status.Print('Started \'{}\''.format(operation_ref.Name()))
else:
message = 'Waiting for \'{}\' to complete'
operations_client = operations.OperationClient()
response = operations_client.WaitForOperation(
operation_ref, message.format(operation_ref.Name()))
return response
def CertificateMapUriFunc(resource):
return _ParseCertificateMap(resource.name).SelfLink()
def CertificateMapEntryUriFunc(resource):
return _ParseCertificateMapEntry(resource.name).SelfLink()
def CertificateUriFunc(resource):
return _ParseCertificate(resource.name).SelfLink()
| 31.946429 | 115 | 0.754891 |
6049e203adf7a7017682e027c613ec41f238e376 | 17,277 | py | Python | dface/core/detect.py | tobyclh/DFace | a2d8d303171691c59da381363aa89632fafca78b | [
"Apache-2.0"
] | null | null | null | dface/core/detect.py | tobyclh/DFace | a2d8d303171691c59da381363aa89632fafca78b | [
"Apache-2.0"
] | null | null | null | dface/core/detect.py | tobyclh/DFace | a2d8d303171691c59da381363aa89632fafca78b | [
"Apache-2.0"
] | null | null | null | import cv2
import time
import numpy as np
import torch
import torch.nn.functional as F
from dface.core.models import PNet,RNet,ONet
import dface.core.utils as utils
import dface.core.image_tools as image_tools
from torch.jit import script
def create_mtcnn_net(p_model_path=None, r_model_path=None, o_model_path=None, use_cuda=True):
pnet, rnet, onet = None, None, None
if p_model_path is not None:
pnet = PNet(use_cuda=use_cuda)
if(use_cuda):
pnet.load_state_dict(torch.load(p_model_path))
pnet.cuda()
else:
# forcing all GPU tensors to be in CPU while loading
pnet.load_state_dict(torch.load(p_model_path, map_location=lambda storage, loc: storage))
pnet.eval()
if r_model_path is not None:
rnet = RNet(use_cuda=use_cuda)
if (use_cuda):
rnet.load_state_dict(torch.load(r_model_path))
rnet.cuda()
else:
rnet.load_state_dict(torch.load(r_model_path, map_location=lambda storage, loc: storage))
rnet.eval()
if o_model_path is not None:
onet = ONet(use_cuda=use_cuda)
if (use_cuda):
onet.load_state_dict(torch.load(o_model_path))
onet.cuda()
else:
onet.load_state_dict(torch.load(o_model_path, map_location=lambda storage, loc: storage))
onet.eval()
return pnet,rnet,onet
class MtcnnDetector(object):
"""
P,R,O net face detection and landmarks align
"""
def __init__(self,
pnet = None,
rnet = None,
onet = None,
min_face_size=12,
stride=2,
threshold=[0.6, 0.7, 0.7],
scale_factor=0.709,
):
self.pnet_detector = pnet
self.rnet_detector = rnet
self.onet_detector = onet
self.use_cuda = self.rnet_detector.use_cuda
self.min_face_size = min_face_size
self.stride=stride
self.thresh = threshold
self.scale_factor = scale_factor
self.tensortype = torch.cuda if self.use_cuda else torch
# @script
def square_bbox(self, bbox):
"""
convert bbox to square
Parameters:
----------
bbox: torch Tensor , shape n x m
input bbox
Returns:
-------
square bbox
"""
square_bbox = bbox.clone()
h = bbox[:, 3] - bbox[:, 1] + 1
w = bbox[:, 2] - bbox[:, 0] + 1
l = torch.max(h, w)
square_bbox[:, 0] = bbox[:, 0] + w*0.5 - l*0.5
square_bbox[:, 1] = bbox[:, 1] + h*0.5 - l*0.5
square_bbox[:, 2] = square_bbox[:, 0] + l - 1
square_bbox[:, 3] = square_bbox[:, 1] + l - 1
return square_bbox
def generate_bounding_box(self, _map, reg, scale, threshold):
"""
generate bbox from feature map
Parameters:
----------
map: torcn Tensor , 1 x n x m
detect score for each position
reg: torcn Tensor , 4 x n x m
bbox
scale: float number
scale of this detection
threshold: float number
detect threshold
Returns:
-------
bbox array
"""
stride = 2
cellsize = 12
t_index = (_map > threshold).nonzero()
t_index = t_index.t()
# print(f't_index {t_index.shape}')
# find nothing
if t_index.shape[0] == 0:
return torch.Tensor([])
dx1, dy1, dx2, dy2 = [reg[0, i, t_index[1], t_index[2]] for i in range(4)]
reg = torch.stack([dx1, dy1, dx2, dy2])
score = _map[:, t_index[1], t_index[2]]
t_index = t_index.float()
boundingbox = torch.cat([((stride * t_index[2:2+1]) / scale),
((stride * t_index[1:1+1]) / scale),
((stride * t_index[2:2+1] + cellsize) / scale),
((stride * t_index[1:1+1] + cellsize) / scale),
score,
reg,
# landmarks
])
return boundingbox.t()
# @script
def resize_image(self, img, scale):
"""
resize image and transform dimention to [batchsize, channel, height, width]
Parameters:
----------
img: torch Tensor , BxCxHxW
scale: float number
scale factor of resize operation
Returns:
-------
transformed image tensor , 1 x channel x height x width
"""
_, _, height, width = img.shape
new_height = int(height * scale) # resized new height
new_width = int(width * scale) # resized new width
new_dim = (new_height, new_width)
img_resized = F.interpolate(img, size=new_dim, mode='bilinear', align_corners=True)
return img_resized
def pad(self, bboxes, w, h):
"""
pad the the boxes
Parameters:
----------
bboxes: torch Tensor, N x 5
input bboxes
w: float number
width of the input image
h: float number
height of the input image
Returns :
------
dy, dx : torch Tensor, n x 1
start point of the bbox in target image
edy, edx : torch Tensor, n x 1
end point of the bbox in target image
y, x : torch Tensor, n x 1
start point of the bbox in original image
ex, ex : torch Tensor, n x 1
end point of the bbox in original image
tmph, tmpw: torch Tensor, n x 1
height and width of the bbox
"""
tmpw = (bboxes[:, 2] - bboxes[:, 0] + 1).float()
tmph = (bboxes[:, 3] - bboxes[:, 1] + 1).float()
numbox = bboxes.shape[0]
dx = self.tensortype.FloatTensor(numbox).fill_(0)
dy = self.tensortype.FloatTensor(numbox).fill_(0)
edx, edy = tmpw.clone()-1, tmph.clone()-1
x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
tmp_index = (ex > w-1).nonzero()
edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index]
ex[tmp_index] = w - 1
tmp_index = (ey > h-1).nonzero()
edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index]
ey[tmp_index] = h - 1
tmp_index = (x < 0).nonzero()
dx[tmp_index] = 0 - x[tmp_index]
x[tmp_index] = 0
tmp_index = (y < 0).nonzero()
dy[tmp_index] = 0 - y[tmp_index]
y[tmp_index] = 0
return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
return_list = [item.int() for item in return_list]
return return_list
def detect_pnet(self, im):
"""Get face candidates through pnet
Parameters:
----------
im: torch Tensor
input image array
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_align: numpy array
boxes after calibration
"""
net_size = 12
current_scale = float(net_size) / self.min_face_size # find initial scale
im_resized = self.resize_image(im, current_scale)
_, _, current_height, current_width = im_resized.shape
# fcn
all_boxes = list()
while min(current_height, current_width) > net_size:
feed_imgs = im_resized
if self.pnet_detector.use_cuda:
feed_imgs = feed_imgs.cuda()
cls_map, reg = self.pnet_detector(feed_imgs.float())
boxes = self.generate_bounding_box(cls_map[ 0, :, :], reg, current_scale, self.thresh[0])
current_scale *= self.scale_factor
im_resized = self.resize_image(im, current_scale)
_, _, current_height, current_width = im_resized.shape
if boxes.nelement() == 0:
continue
keep = utils.nms(boxes[:, :5], 0.5, 'Union')
boxes = boxes[keep]
all_boxes.append(boxes)
if len(all_boxes) == 0:
return None, None
all_boxes = torch.cat(all_boxes)
# merge the detection from first stage
keep = utils.nms(all_boxes[:, 0:5], 0.7, 'Union')
all_boxes = all_boxes[keep]
# boxes = all_boxes[:, :5]
bw = all_boxes[:, 2] - all_boxes[:, 0] + 1
bh = all_boxes[:, 3] - all_boxes[:, 1] + 1
# landmark_keep = all_boxes[:, 9:].reshape((5,2))
boxes = all_boxes[:,:5]
# boxes = boxes.t()
align_topx = all_boxes[:, 0] + all_boxes[:, 5] * bw
align_topy = all_boxes[:, 1] + all_boxes[:, 6] * bh
align_bottomx = all_boxes[:, 2] + all_boxes[:, 7] * bw
align_bottomy = all_boxes[:, 3] + all_boxes[:, 8] * bh
# refine the boxes
boxes_align = torch.stack([ align_topx,
align_topy,
align_bottomx,
align_bottomy,
all_boxes[:, 4],
],dim=-1)
# boxes_align = boxes_align.t()
return boxes, boxes_align
def detect_rnet(self, im, dets):
"""Get face candidates using rnet
Parameters:
----------
im: torch Tensor 1x3xHxW
input image array
dets: numpy array
detection results of pnet
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_align: numpy array
boxes after calibration
"""
_, _, h, w = im.shape
if dets is None:
return None,None
dets = self.square_bbox(dets)
dets[:, 0:4] = torch.round(dets[:, 0:4])
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h)
num_boxes = dets.shape[0]
if num_boxes == 0:
return None, None
'''
# helper for setting RNet batch size
batch_size = self.rnet_detector.batch_size
ratio = float(num_boxes) / batch_size
if ratio > 3 or ratio < 0.3:
print "You may need to reset RNet batch size if this info appears frequently, \
face candidates:%d, current batch_size:%d"%(num_boxes, batch_size)
'''
# cropped_ims_tensors = np.zeros((num_boxes, 3, 24, 24), dtype=np.float32)
cropped_ims_tensors = []
for i in range(num_boxes):
tmp = self.tensortype.FloatTensor(1, 3, tmph[i], tmpw[i]).fill_(0)
tmp[..., dy[i]:edy[i]+1, dx[i]:edx[i]+1] = im[..., y[i]:ey[i]+1, x[i]:ex[i]+1]
crop_im = F.interpolate(tmp, size=(24, 24))
crop_im_tensor = crop_im
cropped_ims_tensors.append(crop_im_tensor)
feed_imgs = torch.cat(cropped_ims_tensors)
if self.rnet_detector.use_cuda:
feed_imgs = feed_imgs.cuda()
cls_map, reg = self.rnet_detector(feed_imgs)
cls_map = cls_map
reg = reg
# landmark = landmark.cpu().data.numpy()
keep_inds = (cls_map.squeeze() > self.thresh[1]).nonzero().squeeze()
if keep_inds.dim() > 0 and len(keep_inds) > 0:
boxes = dets[keep_inds]
_cls = cls_map[keep_inds]
reg = reg[keep_inds]
# landmark = landmark[keep_inds]
else:
return None, None
keep = utils.nms(boxes, 0.7)
if len(keep) == 0:
return None, None
keep_cls = _cls[keep]
keep_boxes = boxes[keep]
keep_reg = reg[keep]
# keep_landmark = landmark[keep]
bw = keep_boxes[:, 2] - keep_boxes[:, 0] + 1
bh = keep_boxes[:, 3] - keep_boxes[:, 1] + 1
boxes = torch.cat([ keep_boxes[:,0:4], keep_cls[:,0:1]], dim=-1)
align_topx = keep_boxes[:,0] + keep_reg[:,0] * bw
align_topy = keep_boxes[:,1] + keep_reg[:,1] * bh
align_bottomx = keep_boxes[:,2] + keep_reg[:,2] * bw
align_bottomy = keep_boxes[:,3] + keep_reg[:,3] * bh
boxes_align = torch.stack([align_topx,
align_topy,
align_bottomx,
align_bottomy,
keep_cls[:, 0],
], dim=-1)
return boxes, boxes_align
def detect_onet(self, im, dets):
"""Get face candidates using onet
Parameters:
----------
im: numpy array
input image array
dets: numpy array
detection results of rnet
Returns:
-------
boxes_align: numpy array
boxes after calibration
landmarks_align: numpy array
landmarks after calibration
"""
_, _, h, w = im.shape
if dets is None:
return None, None
dets = self.square_bbox(dets)
dets[:, 0:4] = torch.round(dets[:, 0:4])
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h)
num_boxes = dets.shape[0]
# cropped_ims_tensors = np.zeros((num_boxes, 3, 24, 24), dtype=np.float32)
cropped_ims_tensors = []
for i in range(num_boxes):
tmp = self.tensortype.FloatTensor(1, 3, tmph[i], tmpw[i]).fill_(0)
tmp[..., dy[i]:edy[i]+1, dx[i]:edx[i]+1] = im[..., y[i]:ey[i]+1, x[i]:ex[i]+1]
crop_im = F.interpolate(tmp, size=(48, 48))
crop_im_tensor = crop_im
# cropped_ims_tensors[i, :, :, :] = crop_im_tensor
cropped_ims_tensors.append(crop_im_tensor)
feed_imgs = torch.cat(cropped_ims_tensors)
if self.rnet_detector.use_cuda:
feed_imgs = feed_imgs.cuda()
cls_map, reg, landmark = self.onet_detector(feed_imgs)
keep_inds = (cls_map.squeeze() > self.thresh[2]).nonzero().squeeze()
if keep_inds.dim() > 0 and len(keep_inds) > 0:
boxes = dets[keep_inds]
_cls = cls_map[keep_inds]
reg = reg[keep_inds]
landmark = landmark[keep_inds]
else:
return None, None
keep = utils.nms(boxes, 0.7, mode="Minimum")
if len(keep) == 0:
return None, None
keep_cls = _cls[keep]
keep_boxes = boxes[keep]
keep_reg = reg[keep]
keep_landmark = landmark[keep]
bw = keep_boxes[:, 2] - keep_boxes[:, 0] + 1
bh = keep_boxes[:, 3] - keep_boxes[:, 1] + 1
align_topx = keep_boxes[:, 0] + keep_reg[:, 0] * bw
align_topy = keep_boxes[:, 1] + keep_reg[:, 1] * bh
align_bottomx = keep_boxes[:, 2] + keep_reg[:, 2] * bw
align_bottomy = keep_boxes[:, 3] + keep_reg[:, 3] * bh
align_landmark_topx = keep_boxes[:, 0]
align_landmark_topy = keep_boxes[:, 1]
boxes_align = torch.stack([align_topx,
align_topy,
align_bottomx,
align_bottomy,
keep_cls[:, 0],
],dim=-1)
landmark = torch.stack([align_landmark_topx + keep_landmark[:, 0] * bw,
align_landmark_topy + keep_landmark[:, 1] * bh,
align_landmark_topx + keep_landmark[:, 2] * bw,
align_landmark_topy + keep_landmark[:, 3] * bh,
align_landmark_topx + keep_landmark[:, 4] * bw,
align_landmark_topy + keep_landmark[:, 5] * bh,
align_landmark_topx + keep_landmark[:, 6] * bw,
align_landmark_topy + keep_landmark[:, 7] * bh,
align_landmark_topx + keep_landmark[:, 8] * bw,
align_landmark_topy + keep_landmark[:, 9] * bh,
], dim=-1)
return boxes_align, landmark
def detect_face(self,img):
"""Detect face over image"""
boxes_align = torch.Tensor([])
landmark_align = torch.Tensor([])
img = image_tools.convert_image_to_tensor(img).unsqueeze(0)
if self.use_cuda:
img = img.cuda()
# pnet
if self.pnet_detector:
boxes, boxes_align = self.detect_pnet(img.clone())
if boxes_align is None:
return torch.Tensor([]), torch.Tensor([])
# rnet
if self.rnet_detector:
boxes, boxes_align = self.detect_rnet(img.clone(), boxes_align)
if boxes_align is None:
return torch.Tensor([]), torch.Tensor([])
# onet
if self.onet_detector:
boxes_align, landmark_align = self.detect_onet(img.clone(), boxes_align)
if boxes_align is None:
return torch.Tensor([]), torch.Tensor([])
return boxes_align, landmark_align
| 32.053803 | 101 | 0.515888 |
a7f323cdf696b18581d2b1afd1e51d44b4da6600 | 5,163 | py | Python | invenio_communities/communities/services/service.py | max-moser/invenio-communities | cfdd6a8b36bf6856db4fd55123c832700eb32376 | [
"MIT"
] | null | null | null | invenio_communities/communities/services/service.py | max-moser/invenio-communities | cfdd6a8b36bf6856db4fd55123c832700eb32376 | [
"MIT"
] | null | null | null | invenio_communities/communities/services/service.py | max-moser/invenio-communities | cfdd6a8b36bf6856db4fd55123c832700eb32376 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2021 CERN.
# Copyright (C) 2021-2022 Northwestern University.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio Communities Service API."""
from elasticsearch_dsl import Q
from invenio_records_resources.services.base import LinksTemplate
from invenio_records_resources.services.records import RecordService
from invenio_records_resources.services.uow import RecordCommitOp, unit_of_work
from marshmallow.exceptions import ValidationError
class CommunityService(RecordService):
"""community Service."""
def __init__(
self, config, files_service=None, invitations_service=None,
members_service=None):
"""Constructor for CommunityService."""
super().__init__(config)
self._files = files_service
self._invitations = invitations_service
self._members = members_service
@property
def files(self):
"""Community files service."""
return self._files
@property
def invitations(self):
"""Community invitations service."""
return self._invitations
@property
def members(self):
"""Community members service."""
return self._members
def search_user_communities(
self, identity, params=None, es_preference=None, **kwargs):
"""Search for records matching the querystring."""
self.require_permission(identity, 'search_user_communities')
# Prepare and execute the search
params = params or {}
search_result = self._search(
'search',
identity,
params,
es_preference,
extra_filter=Q(
"term",
**{"access.owned_by.user": identity.id}
),
permission_action='read',
**kwargs).execute()
return self.result_list(
self,
identity,
search_result,
params,
links_tpl=LinksTemplate(self.config.links_user_search, context={
"args": params
}),
links_item_tpl=self.links_item_tpl,
)
@unit_of_work()
def rename(self, identity, id_, data, revision_id=None, raise_errors=True,
uow=None):
"""Rename a community."""
record = self.record_cls.pid.resolve(id_)
self.check_revision_id(record, revision_id)
# Permissions
self.require_permission(identity, "rename", record=record)
if 'id' not in data:
raise ValidationError(
'Missing data for required field.',
field_name='id',
)
data, errors = self.schema.load(
data,
context={"identity": identity},
raise_errors=raise_errors, # if False, flow is continued with
schema_args={'partial': True} # data only containing valid data,
# but errors are reported
) # (as warnings)
# Run components
self.run_components(
'rename', identity, data=data, record=record, uow=uow)
uow.register(RecordCommitOp(record, indexer=self.indexer))
return self.result_item(
self,
identity,
record,
links_tpl=self.links_item_tpl,
)
def read_logo(self, identity, id_):
"""Read the community's logo."""
record = self.record_cls.pid.resolve(id_)
self.require_permission(identity, 'read', record=record)
logo_file = record.files.get('logo')
if logo_file is None:
raise FileNotFoundError()
return self.files.file_result_item(
self.files,
identity,
logo_file,
record,
links_tpl=self.files.file_links_item_tpl(id_),
)
@unit_of_work()
def update_logo(self, identity, id_, stream, content_length=None,
uow=None):
"""Update the community's logo."""
record = self.record_cls.pid.resolve(id_)
self.require_permission(identity, 'update', record=record)
record.files['logo'] = stream
uow.register(RecordCommitOp(record))
return self.files.file_result_item(
self.files,
identity,
record.files['logo'],
record,
links_tpl=self.files.file_links_item_tpl(id_),
)
@unit_of_work()
def delete_logo(self, identity, id_, uow=None):
"""Delete the community's logo."""
record = self.record_cls.pid.resolve(id_)
deleted_file = record.files.pop('logo', None)
if deleted_file is None:
raise FileNotFoundError()
uow.register(RecordCommitOp(record))
return self.files.file_result_item(
self.files,
identity,
deleted_file,
record,
links_tpl=self.files.file_links_item_tpl(id_),
)
| 31.481707 | 79 | 0.592872 |
6dbbca48f5251ac90f83ed0420c774d1d5bbb849 | 452 | py | Python | datatypes_lab/io.py | jeremyosborne/examples-python | 5900b3a4f47d59de0a32d3257a8b90a44e80fdcd | [
"MIT"
] | null | null | null | datatypes_lab/io.py | jeremyosborne/examples-python | 5900b3a4f47d59de0a32d3257a8b90a44e80fdcd | [
"MIT"
] | null | null | null | datatypes_lab/io.py | jeremyosborne/examples-python | 5900b3a4f47d59de0a32d3257a8b90a44e80fdcd | [
"MIT"
] | null | null | null | """Lab:
Ask the user for their name.
Ask the user for their favorite color.
Fail politely if the user puts a ":" character in the name or color.
Write the name:color to the file "echo_out.txt".
Use a try/except and watch for IOError.
Before exiting, echo back all of the contents of the file, splitting
on the colon and formatting the name in a left aligned fixed width field of 20.
Run this multiple times to make sure it is working as expected.
"""
| 37.666667 | 79 | 0.761062 |
2a463476f35f096cdfe4c06e0cf455662421efd9 | 20,877 | py | Python | google/cloud/speech_v1p1beta1/services/speech/async_client.py | ufft47/python-speech | 3c908e213411593b5a53a6dd024b5610848b02d7 | [
"Apache-2.0"
] | null | null | null | google/cloud/speech_v1p1beta1/services/speech/async_client.py | ufft47/python-speech | 3c908e213411593b5a53a6dd024b5610848b02d7 | [
"Apache-2.0"
] | null | null | null | google/cloud/speech_v1p1beta1/services/speech/async_client.py | ufft47/python-speech | 3c908e213411593b5a53a6dd024b5610848b02d7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import (
Dict,
AsyncIterable,
Awaitable,
AsyncIterator,
Sequence,
Tuple,
Type,
Union,
)
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.speech_v1p1beta1.types import cloud_speech
from google.rpc import status_pb2 # type: ignore
from .transports.base import SpeechTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import SpeechGrpcAsyncIOTransport
from .client import SpeechClient
class SpeechAsyncClient:
"""Service that implements Google Cloud Speech API."""
_client: SpeechClient
DEFAULT_ENDPOINT = SpeechClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = SpeechClient.DEFAULT_MTLS_ENDPOINT
custom_class_path = staticmethod(SpeechClient.custom_class_path)
parse_custom_class_path = staticmethod(SpeechClient.parse_custom_class_path)
phrase_set_path = staticmethod(SpeechClient.phrase_set_path)
parse_phrase_set_path = staticmethod(SpeechClient.parse_phrase_set_path)
common_billing_account_path = staticmethod(SpeechClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(
SpeechClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(SpeechClient.common_folder_path)
parse_common_folder_path = staticmethod(SpeechClient.parse_common_folder_path)
common_organization_path = staticmethod(SpeechClient.common_organization_path)
parse_common_organization_path = staticmethod(
SpeechClient.parse_common_organization_path
)
common_project_path = staticmethod(SpeechClient.common_project_path)
parse_common_project_path = staticmethod(SpeechClient.parse_common_project_path)
common_location_path = staticmethod(SpeechClient.common_location_path)
parse_common_location_path = staticmethod(SpeechClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpeechAsyncClient: The constructed client.
"""
return SpeechClient.from_service_account_info.__func__(SpeechAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpeechAsyncClient: The constructed client.
"""
return SpeechClient.from_service_account_file.__func__(SpeechAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> SpeechTransport:
"""Returns the transport used by the client instance.
Returns:
SpeechTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(SpeechClient).get_transport_class, type(SpeechClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, SpeechTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the speech client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.SpeechTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = SpeechClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def recognize(
self,
request: cloud_speech.RecognizeRequest = None,
*,
config: cloud_speech.RecognitionConfig = None,
audio: cloud_speech.RecognitionAudio = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cloud_speech.RecognizeResponse:
r"""Performs synchronous speech recognition: receive
results after all audio has been sent and processed.
Args:
request (:class:`google.cloud.speech_v1p1beta1.types.RecognizeRequest`):
The request object. The top-level message sent by the
client for the `Recognize` method.
config (:class:`google.cloud.speech_v1p1beta1.types.RecognitionConfig`):
Required. Provides information to the
recognizer that specifies how to process
the request.
This corresponds to the ``config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
audio (:class:`google.cloud.speech_v1p1beta1.types.RecognitionAudio`):
Required. The audio data to be
recognized.
This corresponds to the ``audio`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.speech_v1p1beta1.types.RecognizeResponse:
The only message returned to the client by the Recognize method. It
contains the result as zero or more sequential
SpeechRecognitionResult messages.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([config, audio])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_speech.RecognizeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if config is not None:
request.config = config
if audio is not None:
request.audio = audio
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.recognize,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5000.0,
),
default_timeout=5000.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def long_running_recognize(
self,
request: cloud_speech.LongRunningRecognizeRequest = None,
*,
config: cloud_speech.RecognitionConfig = None,
audio: cloud_speech.RecognitionAudio = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Performs asynchronous speech recognition: receive results via
the google.longrunning.Operations interface. Returns either an
``Operation.error`` or an ``Operation.response`` which contains
a ``LongRunningRecognizeResponse`` message. For more information
on asynchronous speech recognition, see the
`how-to <https://cloud.google.com/speech-to-text/docs/async-recognize>`__.
Args:
request (:class:`google.cloud.speech_v1p1beta1.types.LongRunningRecognizeRequest`):
The request object. The top-level message sent by the
client for the `LongRunningRecognize` method.
config (:class:`google.cloud.speech_v1p1beta1.types.RecognitionConfig`):
Required. Provides information to the
recognizer that specifies how to process
the request.
This corresponds to the ``config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
audio (:class:`google.cloud.speech_v1p1beta1.types.RecognitionAudio`):
Required. The audio data to be
recognized.
This corresponds to the ``audio`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.speech_v1p1beta1.types.LongRunningRecognizeResponse` The only message returned to the client by the LongRunningRecognize method.
It contains the result as zero or more sequential
SpeechRecognitionResult messages. It is included in
the result.response field of the Operation returned
by the GetOperation call of the
google::longrunning::Operations service.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([config, audio])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = cloud_speech.LongRunningRecognizeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if config is not None:
request.config = config
if audio is not None:
request.audio = audio
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.long_running_recognize,
default_timeout=5000.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
cloud_speech.LongRunningRecognizeResponse,
metadata_type=cloud_speech.LongRunningRecognizeMetadata,
)
# Done; return the response.
return response
def streaming_recognize(
self,
requests: AsyncIterator[cloud_speech.StreamingRecognizeRequest] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[cloud_speech.StreamingRecognizeResponse]]:
r"""Performs bidirectional streaming speech recognition:
receive results while sending audio. This method is only
available via the gRPC API (not REST).
Args:
requests (AsyncIterator[`google.cloud.speech_v1p1beta1.types.StreamingRecognizeRequest`]):
The request object AsyncIterator. The top-level message sent by the
client for the `StreamingRecognize` method. Multiple
`StreamingRecognizeRequest` messages are sent. The first
message must contain a `streaming_config` message and
must not contain `audio_content`. All subsequent
messages must contain `audio_content` and must not
contain a `streaming_config` message.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
AsyncIterable[google.cloud.speech_v1p1beta1.types.StreamingRecognizeResponse]:
StreamingRecognizeResponse is the only message returned to the client by
StreamingRecognize. A series of zero or more
StreamingRecognizeResponse messages are streamed back
to the client. If there is no recognizable audio, and
single_utterance is set to false, then no messages
are streamed back to the client.
Here's an example of a series of
StreamingRecognizeResponses that might be returned
while processing audio:
1. results { alternatives { transcript: "tube" }
stability: 0.01 }
2. results { alternatives { transcript: "to be a" }
stability: 0.01 }
3. results { alternatives { transcript: "to be" }
stability: 0.9 } results { alternatives {
transcript: " or not to be" } stability: 0.01 }
4.
results { alternatives { transcript: "to be or not to be"
confidence: 0.92 }
alternatives { transcript: "to bee or not to bee" }
is_final: true }
5. results { alternatives { transcript: " that's" }
stability: 0.01 }
6. results { alternatives { transcript: " that is" }
stability: 0.9 } results { alternatives {
transcript: " the question" } stability: 0.01 }
7.
results { alternatives { transcript: " that is the question"
confidence: 0.98 }
alternatives { transcript: " that was the question" }
is_final: true }
Notes:
- Only two of the above responses #4 and #7 contain
final results; they are indicated by
is_final: true. Concatenating these together
generates the full transcript: "to be or not to be
that is the question".
- The others contain interim results. #3 and #6
contain two interim \`results`: the first portion
has a high stability and is less likely to change;
the second portion has a low stability and is very
likely to change. A UI designer might choose to
show only high stability results.
- The specific stability and confidence values shown
above are only for illustrative purposes. Actual
values may vary.
-
In each response, only one of these fields will be set:
error, speech_event_type, or one or more
(repeated) results.
"""
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.streaming_recognize,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5000.0,
),
default_timeout=5000.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-speech",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("SpeechAsyncClient",)
| 43.859244 | 207 | 0.629832 |
e79ba0565bdfba1ca92b3fee03ada7181349d20e | 6,539 | py | Python | src/PathEmb.py | zhangjiaobxy/PathEmb | 2b8627f106919b39152db11f52741980df6ba8f0 | [
"MIT"
] | 1 | 2019-08-15T19:22:54.000Z | 2019-08-15T19:22:54.000Z | src/PathEmb.py | zhangjiaobxy/PathEmb | 2b8627f106919b39152db11f52741980df6ba8f0 | [
"MIT"
] | null | null | null | src/PathEmb.py | zhangjiaobxy/PathEmb | 2b8627f106919b39152db11f52741980df6ba8f0 | [
"MIT"
] | null | null | null | # PathEmb package
import os, sys, subprocess, argparse
########################################################################
# step 0:
# print help info of PathEmb
# callExample:
# PathEmb.helpInfo()
def helpInfo():
parser = argparse.ArgumentParser(description = 'Run PathEmb.')
parser.add_argument('label', nargs = '?', default = 'm1', help = 'Label of regression. m1: NETAL EC, m2: NETAL LCCS, m3: HubAlign EC, m4: HubAlign LCCS')
parser.add_argument('dbStart', nargs = '?', default = 1, help = 'Start number of DB. Default is 1.')
parser.add_argument('dbEnd', nargs = '?', default = 100, help = 'End number of DB. Default is 100.')
parser.add_argument('qStart', nargs = '?', default = 101, help = 'Start number of query set. Default is 101.')
parser.add_argument('qEnd', nargs = '?', default = 110, help = 'End number of query set. Default is 110.')
parser.add_argument('r', nargs = '?', default = 5, help = 'Random walks per vertex. Default is 5.')
parser.add_argument('l', nargs = '?', default = 30, help = 'Walk length. Default is 30.')
parser.add_argument('h', nargs = '?', default = 1, help = 'Direction guidance of walk. Default is 1.')
parser.add_argument('u', nargs = '?', default = 15, help = 'Window size. Default is 15.')
parser.add_argument('d', nargs = '?', default = 50, help = 'Feature dimension. Default is 50.')
parser.add_argument('K', nargs = '?', default = 10, help = 'Top K percent similar pathways. Default is 10.')
parser.print_help()
print '\nusage: call functions of PathEmb: '
print ' 0. get help info:'
print ' PathEmb.helpInfo()'
print ' 1. get label file:'
print ' PathEmb.labelExtract(label, dbStart, dbEnd, qStart, qEnd)'
print ' PathEmb.labelExtract(\'m1\', 1, 100, 101, 110)'
print ' 2. get random walks for each vertex:'
print ' PathEmb.randomWalk(r, l, h)'
print ' PathEmb.randomWalk(5, 30, 1)'
print ' 3. get feature representation of pathway:'
print ' PathEmb.doc2vec(u, d)'
print ' PathEmb.doc2vec(15, 50)'
print ' 4. get csv file of the AdaBoost regression:'
print ' PathEmb.csv(label, d, dbStart, dbEnd, qStart, qEnd)'
print ' PathEmb.csv(\'m1\', 50, 1, 100, 101, 110)'
print ' 5. run AdaBoost regression:'
print ' PathEmb.ABRegression(label, K)'
print ' PathEmb.ABRegression(\'m1\', 10)'
########################################################################
# step 1:
# run labelExtract to get label files
# input:
# ./data/rawData/
# output:
# ./data/netalLabel (netal output file: netal_ec_label.txt, netal_lccs_label.txt)
# ./data/hubLabel (hubalign output file: hubalign_ec_label.txt, hubalign_lccs_label.txt)
# parameter:
# label (label = m1, m2, m3, m4)
# (m1: NETAL EC, m2: NETAL LCCS, m3: HubAlign EC, m4: HubAlign LCCS)
# dbStart: the start range of database
# dbEnd: the end range of database
# queryStart: the start range of query networks
# queryEnd: the end range of query networks
# callExample:
# PathEmb.labelExtract('m1', 1, 100, 101, 110)
def labelExtract(label, dbStart, dbEnd, queryStart, queryEnd):
if label == 'm1' or label == 'm2':
os.system('bash runNetal.sh ' + str(dbStart) + ' ' + str(dbEnd) + ' ' + str(queryStart) + ' ' + str(queryEnd))
elif label == 'm3' or label == 'm4':
os.system('bash runHubalign.sh ' + str(dbStart) + ' ' + str(dbEnd) + ' ' + str(queryStart) + ' ' + str(queryEnd))
else:
print '\n*********************************************************************\n'
print "Invalid label, system exit! "
print '\n*********************************************************************\n'
raise SystemExit
########################################################################
# step 2:
# run randomWalk.py to get random walks for each vertex
# input:
# ./data/rawData/ (*.txt file)
# output:
# ./data/randomWalks/ (*.walks file)
# parameter:
# r: random walks per vertex
# l: walk length
# h: direction guidance of walk
# callExample:
# PathEmb.randomWalk(5,30,1)
def randomWalk(r, l, h):
cmd = 'python randomWalk.py ' + '--r ' + str(r) + ' --l ' + str(l) + ' --h ' + str(h)
os.system(cmd)
########################################################################
# step 3:
# run doc2vec to get the feature representation of pathway
# input:
# ./data/randomWalks/ (*.walks file)
# output:
# ./doc2vec/doc2vecFormat_train.bin
# ./doc2vec/doc2vecFormat_infer.txt
# parameter:
# u: window size
# d: feature dimension (vector size)
# callExample:
# PathEmb.doc2vec(15, 50)
def doc2vec(u, d):
cmd1 = 'python doc2vecFormat.py'
cmd2 = 'python doc2vecTrain.py ' + str(u) + ' ' + str(d)
cmd3 = 'python doc2vecInfer.py'
os.system(cmd1)
os.system(cmd2)
os.system(cmd3)
########################################################################
# step 4:
# run csv to get the csv file of feature and label
# input:
# ./data/doc2vec/doc2vecFormat_infer.txt (feature file)
# ./data/netalLabel (label file of netal: netal_ec_label.txt, netal_lccs_label.txt), or,
# ./data/hubLabel (label file of hubalign: hubalign_ec_label.txt, hubalign_lccs_label.txt)
# output:
# ./data/doc2vec/csvFea.csv
# ./data/csv/csvFea_*label.csv
# parameter:
# label (label = m1, m2, m3, m4)
# (m1: NETAL EC, m2: NETAL LCCS, m3: HubAlign EC, m4: HubAlign LCCS)
# d: feature dimension (vector size)
# dbStart: the start range of database
# dbEnd: the end range of database
# queryStart: the start range of query networks
# queryEnd: the end range of query networks
# callExample:
# PathEmb.doc2vec('m1', 50, 1, 100, 101, 110)
def csv(label, d, dbStart, dbEnd, queryStart, queryEnd):
cmd1 = 'python csvFea.py ' + str(dbStart) + ' ' + str(dbEnd) + ' ' + str(queryStart) + ' ' + str(queryEnd)
cmd2 = 'python csvFeaLab.py ' + str(d) + ' ' + str(label)
os.system(cmd1)
os.system(cmd2)
########################################################################
# step 5:
# run AdaBoost regression
# input:
# ./data/csv/csvFea_*label.csv
# output:
# ./data/output/csvFea_*label_topK%.csv
# parameter:
# label (label = m1, m2, m3, m4)
# (m1: NETAL EC, m2: NETAL LCCS, m3: HubAlign EC, m4: HubAlign LCCS)
# K (output top K percent similar pathways)
# callExample: PathEmb.ABRegression('m1', 10)
def ABRegression(label, K):
cmd = 'python ABRegression.py ' + str(label) + ' ' + str(K)
os.system(cmd)
########################################################################
| 41.386076 | 154 | 0.583728 |
651955a3da848e15a6195d194d54c4c4d177d914 | 1,241 | py | Python | examples/hacker_news/hacker_news_tests/test_jobs/test_download_job.py | alexismanuel/dagster | b2cdf8cc985ad48ff203b44b664ff3cb4aded9a3 | [
"Apache-2.0"
] | 4,606 | 2018-06-21T17:45:20.000Z | 2022-03-31T23:39:42.000Z | examples/hacker_news/hacker_news_tests/test_jobs/test_download_job.py | alexismanuel/dagster | b2cdf8cc985ad48ff203b44b664ff3cb4aded9a3 | [
"Apache-2.0"
] | 6,221 | 2018-06-12T04:36:01.000Z | 2022-03-31T21:43:05.000Z | examples/hacker_news/hacker_news_tests/test_jobs/test_download_job.py | alexismanuel/dagster | b2cdf8cc985ad48ff203b44b664ff3cb4aded9a3 | [
"Apache-2.0"
] | 619 | 2018-08-22T22:43:09.000Z | 2022-03-31T22:48:06.000Z | import tempfile
from dagster import ResourceDefinition, fs_io_manager
from hacker_news.jobs.hacker_news_api_download import (
configured_pyspark,
hacker_news_api_download,
hourly_download_config,
)
from hacker_news.resources.hn_resource import hn_snapshot_client
from hacker_news.resources.parquet_io_manager import partitioned_parquet_io_manager
def test_download():
with tempfile.TemporaryDirectory() as temp_dir:
result = hacker_news_api_download.to_job(
resource_defs={
"io_manager": fs_io_manager,
"partition_start": ResourceDefinition.string_resource(),
"partition_end": ResourceDefinition.string_resource(),
"parquet_io_manager": partitioned_parquet_io_manager.configured(
{"base_path": temp_dir}
),
"warehouse_io_manager": partitioned_parquet_io_manager.configured(
{"base_path": temp_dir}
),
"pyspark": configured_pyspark,
"hn_client": hn_snapshot_client,
},
config=hourly_download_config,
).execute_in_process(partition_key="2020-12-30-01:00")
assert result.success
| 36.5 | 83 | 0.667204 |
4b88f2dfa7eed34f92fe45f277d7e65b50a4fe8c | 220 | py | Python | support/test/python/simple_python/print_env.py | cposture/vimspector | 2eb32f31533b79520ca6788fd41634525234d7e5 | [
"Apache-2.0"
] | 3,130 | 2018-05-25T20:02:11.000Z | 2022-03-31T00:37:59.000Z | support/test/python/simple_python/print_env.py | cposture/vimspector | 2eb32f31533b79520ca6788fd41634525234d7e5 | [
"Apache-2.0"
] | 517 | 2018-09-02T17:40:31.000Z | 2022-03-31T06:26:16.000Z | support/test/python/simple_python/print_env.py | cposture/vimspector | 2eb32f31533b79520ca6788fd41634525234d7e5 | [
"Apache-2.0"
] | 177 | 2018-11-14T12:33:33.000Z | 2022-03-30T05:46:36.000Z | #!/usr/bin/env python
import os
def Main():
print( os.environ.get( 'Something', 'ERROR' ) )
print( os.environ.get( 'SomethingElse', 'ERROR' ) )
for k, v in os.environ:
print( f'{ k } = "{ v }"' )
Main()
| 13.75 | 53 | 0.563636 |
5003b947bee991ef6ce844dd8209943da77ea8a8 | 3,219 | py | Python | haiku.py | balysv/HaikuBotto | 1fbe55552cd8ab152c2f2466b797dafe012a6894 | [
"Unlicense"
] | 3 | 2017-04-15T03:00:22.000Z | 2020-01-20T04:41:13.000Z | haiku.py | balysv/HaikuBotto | 1fbe55552cd8ab152c2f2466b797dafe012a6894 | [
"Unlicense"
] | null | null | null | haiku.py | balysv/HaikuBotto | 1fbe55552cd8ab152c2f2466b797dafe012a6894 | [
"Unlicense"
] | 1 | 2020-05-29T08:06:16.000Z | 2020-05-29T08:06:16.000Z | '''
MIT License Copyright(c) 2016 Balys Valentukevicius
Generates haiku and post them to Twitter. See self.config.py
for self.configuration options.
'''
import config
import markovify
import twitter
import sylco
import threading
import os
from random import randint
class HaikuBotto(object):
def __init__(self):
self.config = config.Config()
self.api = twitter.Api(
consumer_key=self.config.twitter_consumer_key,
consumer_secret=self.config.twitter_consumer_secret,
access_token_key=self.config.twitter_access_token_key,
access_token_secret=self.config.twitter_access_token_secret
)
'''
Begin looping haiku generation and Twitter posts
'''
def start(self):
haiku = self.generate_haiku()
self.api.PostUpdate(haiku)
threading.Timer(self.config.generation_frequency, self.start).start()
'''
1. Create a Markovify text model from all inputs
2. Generate a random text snippet using markov chains
3. Proceed if syllable count is correct, otherwise go to (2)
4. Concat all haiku lines
'''
def generate_haiku(self):
all_text = "";
for i in os.listdir(self.config.markovify_input_dir):
with open(self.config.markovify_input_dir + i) as f:
all_text += f.read()
text_model = markovify.Text(all_text)
print("looking for first...")
first = None
while first == None or sylco.getsyls(first) != self.config.haiku_first_syl_count:
first = text_model.make_short_sentence(
self.config.haiku_first_syl_count * self.config.haiku_avg_char_per_syl,
tries=100,
max_overlap_ratio=self.config.markovify_max_overlap_ratio,
max_overlap_total=self.config.markovify_max_overlap_total
)
print("looking for second...")
second = None
while second == None or sylco.getsyls(second) != self.config.haiku_second_syl_count:
second = text_model.make_short_sentence(
self.config.haiku_second_syl_count * self.config.haiku_avg_char_per_syl,
tries=100,
max_overlap_ratio=self.config.markovify_max_overlap_ratio,
max_overlap_total=self.config.markovify_max_overlap_total
)
print("looking for third...")
third = None
while third == None or third == first or sylco.getsyls(third) != self.config.haiku_third_syl_count:
third = text_model.make_short_sentence(
self.config.haiku_third_syl_count * self.config.haiku_avg_char_per_syl,
tries=100,
max_overlap_ratio=self.config.markovify_max_overlap_ratio,
max_overlap_total=self.config.markovify_max_overlap_total
)
haiku = "".join([first, "\n", second, "\n", third])
haiku = "".join(c for c in haiku if c not in ('!','.',':','?',';'))
print("")
print("***********************")
print("-----------------------")
print(haiku)
print("-----------------------")
print("***********************")
return haiku
| 36.168539 | 107 | 0.618515 |
8cf92e74a04518a24e2ea4b1f55d122a5d0ab40f | 772 | py | Python | lab/pytest/pytest-tut/test_one.py | diyarkudrat/SPD-2.3-Testing-And-Architecture | dde26e3a6aa02deeb7c0941971841b4f9e16f3fa | [
"MIT"
] | null | null | null | lab/pytest/pytest-tut/test_one.py | diyarkudrat/SPD-2.3-Testing-And-Architecture | dde26e3a6aa02deeb7c0941971841b4f9e16f3fa | [
"MIT"
] | null | null | null | lab/pytest/pytest-tut/test_one.py | diyarkudrat/SPD-2.3-Testing-And-Architecture | dde26e3a6aa02deeb7c0941971841b4f9e16f3fa | [
"MIT"
] | null | null | null | import pytest
def calculate_kinetic_energy(mass, velocity):
"""Returns kinetic energy of mass [kg] with velocity level."""
return 0.5 * mass * velocity ** 2
def get_average(li):
if len(li) < 1:
return None
sum = 0
for num in li:
sum += num
mean = sum / len(li)
return mean
def palindrome(word):
if not isinstance(word, str):
raise TypeError('Please provide a string argument')
return word == word[::-1]
def test_palindrome():
with pytest.raises(TypeError):
palindrome(24)
def test_get_average():
li = []
assert get_average(li) == None
def test_calculate_kinetic_energy():
mass = 10 # [kg]
velocity = 4 # [m/s]
assert calculate_kinetic_energy(mass, velocity) == 80
| 18.829268 | 66 | 0.625648 |
e3d46266513df6cd0d782617fd530c63381f4967 | 10,636 | py | Python | udf/ext_strat_mentions.py | drew026/xDD_Geodynamics | 79444d94017300a51f608a449e65854e854b3673 | [
"CC-BY-4.0"
] | null | null | null | udf/ext_strat_mentions.py | drew026/xDD_Geodynamics | 79444d94017300a51f608a449e65854e854b3673 | [
"CC-BY-4.0"
] | null | null | null | udf/ext_strat_mentions.py | drew026/xDD_Geodynamics | 79444d94017300a51f608a449e65854e854b3673 | [
"CC-BY-4.0"
] | null | null | null | ##==============================================================================
## LOOK FOR STRATIGRAPHIC NOMENCLATURE - MENTION RECOGINITION
##==============================================================================
# ACQUIRE RELEVANT MODULES
#==============================================================================
import time, urllib, csv, random, psycopg2, re, yaml
from psycopg2.extensions import AsIs
#tic
start_time = time.time()
#function for dowloading CSVs from a URL
def download_csv( url ):
#return variable
dump_dict = {}
#get strat_names from Macrostrat API
dump = urllib.request( url )
dump = csv.reader(dump)
#unpack downloaded CSV as list of tuples
#--> length of VARIABLE == number of fields
#--> length of VARIABLE[i] == number of rows
#--> VARIABLE[i][0] = header name
cols = list(zip(*dump))
#key names correspond to field names (headers in the CSV file)
for field in cols:
dump_dict[field[0]]=field[1:]
dump_dict['headers'] = sorted(dump_dict.keys())
return dump_dict
#==============================================================================
# CONNECT TO POSTGRES
#==============================================================================
# Connect to Postgres
with open('./credentials', 'r') as credential_yaml:
credentials = yaml.load(credential_yaml)
with open('./config', 'r') as config_yaml:
config = yaml.load(config_yaml)
connection = psycopg2.connect(
dbname=credentials['postgres']['database'],
user=credentials['postgres']['user'],
host=credentials['postgres']['host'],
port=credentials['postgres']['port'])
cursor = connection.cursor()
#initialize mentions
cursor.execute("""DELETE FROM strat_phrases WHERE strat_flag='mention';
""")
#import sentences to mine - just restricted to sentences with target instance
cursor.execute("""
SELECT DISTINCT ON (target_instances.docid,
target_instances.sentid)
target_instances.docid,
target_instances.sentid,
%(my_app)s_sentences_%(my_product)s.words
FROM %(my_app)s_sentences_%(my_product)s, target_instances
WHERE %(my_app)s_sentences_%(my_product)s.docid = target_instances.docid
AND %(my_app)s_sentences_%(my_product)s.sentid = target_instances.sentid;
""",{
"my_app": AsIs(config['app_name']),
"my_product": AsIs(config['product'].lower())
})
sentences=cursor.fetchall()
#convert list of tuples to list of lists
sentences = [list(elem) for elem in sentences]
#import docid - strat_name tuples
cursor.execute("""
SELECT * FROM strat_dict;
""")
connection.commit()
strat_dict = cursor.fetchall()
#convert list of tuples to list of lists
strat_dict = [list(elem) for elem in strat_dict]
#make a dictionary of docid-strat_name tuples
doc_list={}
for i in strat_dict:
doc_list[i[0]]=set(i[1])
#==============================================================================
# DEFINE STRATIGRPAHIC VARIABLES
#==============================================================================
#get interval_names from Macrostrat API
int_dict = download_csv( 'https://macrostrat.org/api/defs/intervals?all&format=csv' )
#user-defined variables
with open('./var/strat_variables.txt') as fid:
strat_variables = fid.readlines()
for i in strat_variables:
exec(i)
#PRE-PROCESS: hack to replace weird strings
for idx,line in enumerate(sentences):
for ws in weird_strings:
if ws[0] in ' '.join(sentences[idx][2]):
sentences[idx][2]=[word.replace(ws[0],ws[1]) for word in sentences[idx][2]]
#with a dictionary of stratigraphic entites mapped to a given document, find the mentions
# i.e. find 'the Bitter Springs stromatolite' after identifying 'the Bitter Springs Formation'
strat_flag = 'mention'
age_agree='-'
strat_list=[]
#loop through documents with discoverd stratigraphic entities
for idx1,doc in enumerate(doc_list.keys()):
#list of sentences data from a given document
target_sents = [k for k in sentences if k[0]==doc]
#list of stratigraphic names associated with that document
target_strat = list(doc_list[doc])
#loop through sentence data per document
for idx2,line in enumerate(target_sents):
doc_id, sent_id, words = line
sentence = ' '.join(words)
for name in target_strat:
#parse the (strat_name, strat_name_id) tuple
strat_phrase=name.split(DICT_DELIM)[0]
strat_phrase=strat_phrase.split(' ')
strat_phrase=' '.join(strat_phrase[0:-1])
strat_name_id=name.split(DICT_DELIM)[1]
matches=[m.start() for m in re.finditer(r'\b' + strat_phrase + r'\b',sentence)]
if matches:
#if at least one match is found, count number of spaces backward to arrive at word index
name_idx = [sentence[0:m].count(' ') for m in matches]
#remove double hits (i.e. stromatolitic-thrombolitic)
name_idx = list(set(name_idx))
#split the strat mention into parts
name_part = strat_phrase.split(' ')
#loop through all discoveries
for i in name_idx:
#record it as a mention if:
# 1) it is not at the end of the sentence
# 2) the phrase is not followed by a strat_flag
# (this is to avoid duplication)
# 3) the mention is not part of garbled table e.g. 'Tumbiana Tumbiana Tumbiana Tumbiana'
if i<len(words)-len(name_part) and words[i+len(name_part)] not in strat_flags and words[i] != words[i+1]:
int_name='na'
int_id='0'
#look to see if there is an interval name before the mention
if i>1 and words[i-1] in int_dict['name']:
#record this interval name
int_name=words[i-1]
#list comprehensions to record interval id
locations = [k for k, t in enumerate(int_dict['name']) if t==int_name]
int_id = [int_dict['int_id'][I] for I in locations]
int_id=int_id[0]
#look to see if there is an age_flag before the mention
elif i>1 and words[i-1] in age_flags:
#record age flag with its preceding word (most likely a number)
int_name = words[i-2] + ' ' + words[i-1]
#record where mention is found
max_word_id = str(i+len(name_part))
min_word_id = str(i)
#add to local variable
strat_list.append('\t'.join(str(x) for x in [idx2, doc_id, sent_id,name.split(DICT_DELIM)[0], strat_phrase,strat_flag, min_word_id, max_word_id, strat_name_id,int_name,int_id, sentence]))
#write to PSQL table
cursor.execute("""
INSERT INTO strat_phrases( docid,
sentid,
strat_phrase,
strat_phrase_root,
strat_flag,
phrase_start,
phrase_end,
strat_name_id,
int_name,
int_id,
sentence,
age_agree)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""",
(doc_id, sent_id,name.split(DICT_DELIM)[0], strat_phrase, strat_flag, min_word_id, max_word_id, strat_name_id,int_name,int_id, sentence, age_agree)
)
#push insertions to the database
connection.commit()
#some sort of magic
connection.set_isolation_level(0)
cursor.execute(""" VACUUM ANALYZE strat_phrases;
""")
connection.commit()
connection.set_isolation_level(0)
cursor.execute(""" VACUUM ANALYZE target_instances;
""")
connection.commit()
#summarize the number of DISTINCT strat_name_roots found in a given sentence
cursor.execute(""" WITH query AS(SELECT docid, sentid,
COUNT(DISTINCT strat_phrase_root) AS count
FROM strat_phrases
GROUP BY docid,sentid)
UPDATE strat_phrases
SET num_phrase = query.count
FROM query
WHERE strat_phrases.docid = query.docid
AND strat_phrases.sentid = query.sentid
""")
connection.commit()
#summarize the number of DISTINCT strat_name_roots found for a given document
cursor.execute(""" WITH query AS(SELECT docid,
COUNT(DISTINCT strat_phrase_root) AS count
FROM strat_phrases
GROUP BY docid)
UPDATE target_instances
SET num_strat_doc = query.count
FROM query
WHERE target_instances.docid = query.docid
""")
connection.commit()
#close the postgres connection
connection.close()
#summary statistic
success = 'number of stratigraphic mentions : %s' %len(strat_list)
#summary of performance time
elapsed_time = time.time() - start_time
print('\n ###########\n\n %s \n elapsed time: %d seconds\n\n ###########\n\n',
success,elapsed_time)
#print out random result
r=random.randint(0,len(strat_list)-1); show = "\n".join(str(x) for x in strat_list[r].split('\t'))
print("=========================\n" + show + "\n=========================")
| 40.441065 | 211 | 0.516736 |
8339eddbf6e7e683581c18997f90a086f2e0fa63 | 903 | py | Python | Bugscan_exploits-master/exp_list/exp-back_77.pyc_dis.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-back_77.pyc_dis.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-back_77.pyc_dis.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #Embedded file name: taodi_cookie_cheat.py
if 0:
i11iIiiIii
def assign(service, arg):
if service == '''taodi''':
return (True, arg)
if 0:
O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
def audit(arg):
o0OO00 = arg
oo, i1iII1IiiIiI1, iIiiiI1IiI1I1, o0OoOoOO00, I11i = curl.curl(decode('\x192\xb5\x84\xe3\xd3I\x08\xf5\xb1qc\x90z\x7f\xb8\x87\xed\xe29\x89\xe2\xdf\xd5\xc6\xef\xa8[\xd7\x1e\x1a\x9fG5\xe7\xef\xe4\xa17a\x9c') + o0OO00 + decode('U4\xf8\xcf\xce\xb3k"\xd2\x95wb\xbcCA\xe3\x84\xe0\xe0'))
if iIiiiI1IiI1I1 and iIiiiI1IiI1I1.find(decode('Y1\xfb\xc7\xc7\xf9G\x13\xec\xda`o\x8d,p\xae\x80\xe1\xffj\xd5\xf3\xc2\xd8\xc9\xa0\xed')) != -1:
security_hole(o0OO00)
if 0:
OOooo000oo0.i1 * ii1IiI1i % IIIiiIIii
if __name__ == '__main__':
from dummy import *
#KEY---345095a6a09c0643bcf41007fd1311cdf4889004e886b2bca8d4881fb27a7fca--- | 37.625 | 283 | 0.678848 |
9d78acd494ddf817080f2ad2a1eb629dab38f1ef | 6,839 | py | Python | tests/features/test_archive.py | ratschlab/tools-project-archives | a42ef1d3d60b24ff39ce5aa73a8fb332b4f25056 | [
"MIT"
] | 1 | 2021-12-02T15:13:47.000Z | 2021-12-02T15:13:47.000Z | tests/features/test_archive.py | ratschlab/tools-project-archives | a42ef1d3d60b24ff39ce5aa73a8fb332b4f25056 | [
"MIT"
] | null | null | null | tests/features/test_archive.py | ratschlab/tools-project-archives | a42ef1d3d60b24ff39ce5aa73a8fb332b4f25056 | [
"MIT"
] | null | null | null | import tarfile
import pytest
import archiver
from archiver import integrity
from archiver.archive import create_archive
from tests import helpers
from tests.helpers import run_archiver_tool, generate_splitting_directory
from .archiving_helpers import assert_successful_archive_creation, \
get_public_key_paths
def test_create_archive(tmp_path):
folder_name = "test-folder"
folder_path = helpers.get_directory_with_name(folder_name)
archive_path = helpers.get_directory_with_name("normal-archive")
destination_path = tmp_path / "name-of-destination-folder"
create_archive(folder_path, destination_path, compression=5)
assert_successful_archive_creation(destination_path, archive_path, folder_name, unencrypted="all")
@pytest.mark.parametrize("workers", [2, 1])
def test_create_archive_split(tmp_path, generate_splitting_directory, workers):
max_size = 1000 * 1000 * 50
folder_name = "large-test-folder"
source_path = generate_splitting_directory
archive_path = helpers.get_directory_with_name("split-archive-ressources")
destination_path = tmp_path / "name-of-destination-folder"
create_archive(source_path, destination_path, compression=6, splitting=max_size, threads=workers)
assert_successful_archive_creation(destination_path, archive_path, folder_name, split=2, unencrypted="all")
def test_create_archive_split_granular(tmp_path, generate_splitting_directory):
"""
end-to-end test for granular splitting workflow
"""
max_size = 1000 * 1000 * 50
folder_name = "large-test-folder"
source_path = generate_splitting_directory
archive_path = helpers.get_directory_with_name("split-archive-ressources")
destination_path = tmp_path / "name-of-destination-folder"
run_archiver_tool(['create', 'filelist', '--part', f"{max_size}B",
source_path, destination_path])
run_archiver_tool(['create', 'tar', '--threads', str(2),
source_path, destination_path])
run_archiver_tool(['create', 'compressed-tar', '--threads', str(2),
destination_path])
assert_successful_archive_creation(destination_path, archive_path,
folder_name, split=2, unencrypted="all")
assert run_archiver_tool(['check', '--deep', destination_path]).returncode == 0
def test_create_symlink_archive(tmp_path, caplog):
folder_name = "symlink-folder"
folder_path = helpers.get_directory_with_name(folder_name)
archive_path = helpers.get_directory_with_name("symlink-archive")
destination_path = tmp_path / "name-of-destination-folder"
create_archive(folder_path, destination_path, compression=5)
assert_successful_archive_creation(destination_path, archive_path, folder_name, unencrypted="all")
assert "Broken symlink symlink-folder/invalid_link found pointing to a non-existing file " in caplog.text
assert "Symlink with outside target symlink-folder/invalid_link_abs found pointing to /not/existing which is outside the archiving directory" in caplog.text
def test_create_symlink_archive_split(tmp_path, caplog):
folder_name = "symlink-folder"
folder_path = helpers.get_directory_with_name(folder_name)
destination_path = tmp_path / "name-of-destination-folder"
create_archive(folder_path, destination_path, compression=5, splitting=20, threads=2)
assert "Broken symlink symlink-folder/invalid_link found pointing to a non-existing file " in caplog.text
assert "Symlink with outside target symlink-folder/invalid_link_abs found pointing to /not/existing which is outside the archiving directory" in caplog.text
def test_create_encrypted_archive(tmp_path):
folder_name = "test-folder"
folder_path = helpers.get_directory_with_name(folder_name)
archive_path = helpers.get_directory_with_name("encrypted-archive")
destination_path = tmp_path / "name-of-destination-folder"
keys = get_public_key_paths()
create_archive(folder_path, destination_path, encryption_keys=keys, compression=5, remove_unencrypted=True)
assert_successful_archive_creation(destination_path, archive_path, folder_name, encrypted="all")
def test_create_archive_split_encrypted(tmp_path, generate_splitting_directory):
max_size = 1000 * 1000 * 50
folder_name = "large-test-folder"
source_path = generate_splitting_directory
archive_path = helpers.get_directory_with_name("split-archive-ressources")
destination_path = tmp_path / "name-of-destination-folder"
keys = get_public_key_paths()
create_archive(source_path, destination_path, encryption_keys=keys, compression=6, remove_unencrypted=True, splitting=max_size)
assert_successful_archive_creation(destination_path, archive_path, folder_name, split=2, encrypted="all")
@pytest.mark.parametrize('splitting_param', [None, 1000**5])
def test_split_archive_with_exotic_filenames(tmp_path, splitting_param):
# file name with trailing \r
back_slash_r = ('back_slash_r'.encode('UTF-8') + bytearray.fromhex('0D')).decode('utf-8')
# TODO: fails on with bsdtar '¨æß¿X7Á\x80tÂæitÝ«ä\x0b\x9ee\x1d\x80r%6\x81\x19_÷\x1an'
file_names = sorted([back_slash_r, 'file.txt', 'file',
'with space', 'more spaces', 'space at the end ',
"tips'n tricks", 'back\rlashes', back_slash_r,
r"double_slash_\\r", r"double_slash_\\\r", r'many_slashes_\\\X',
'newline_with_\\n_slash', 'newline_with_\\\n_slash', 'mixed_extended_ascii\n_olé',
'backslash_escapes_mixed_extended_ascii\\r_\\n_你好',
"öéeé", '你好', '__$$__\'\"~!@#$%^&*()_+`',
"xM\x1d(+gfx]sD\x0f(c-\nF\x1a*&bb\x0b~c\rD-,", 'LE7\xa0\x1bÛ\xa0½òþ', # random sequences
'back_slash_r_explicit\r.txt', 'new\n\nline.txt', 'newlineatend\n'])
file_dir = tmp_path/'files'
file_dir.mkdir()
for f in file_names:
helpers.create_file_with_size(file_dir/f, 100)
dest = tmp_path/'myarchive'
create_archive(file_dir, dest, encryption_keys=None,
compression=6, remove_unencrypted=True, splitting=splitting_param)
assert integrity.check_integrity(dest, deep_flag=True, threads=1)
archive_name = 'files' if not splitting_param else 'files.part1'
# don't use listing to check tar content, but check it directly.
# listing is tricky to process with special characters
archiver.helpers.run_shell_cmd(
['plzip', '--decompress', str(dest / f'{archive_name}.tar.lz')])
with tarfile.open(dest / f'{archive_name}.tar') as f:
file_names_in_tar = {p[len('files/'):] for p in f.getnames()}
file_names_in_tar = {n for n in file_names_in_tar if n} # removing 'files/' directory entry
assert file_names_in_tar == set(file_names)
| 43.56051 | 160 | 0.733002 |
e25478f32c227036b9dacc668b7b81e1e9627866 | 410 | py | Python | fdk_client/application/models/ProductListingPrice.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/application/models/ProductListingPrice.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/application/models/ProductListingPrice.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | """Application Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .Price import Price
from .Price import Price
class ProductListingPrice(BaseSchema):
# Catalog swagger.json
effective = fields.Nested(Price, required=False)
marked = fields.Nested(Price, required=False)
| 18.636364 | 52 | 0.736585 |
9a773603c4e9a1b0a6bf7cab1f3f3ede4add2a86 | 2,961 | py | Python | HW5/2b_all/2b_2/RNN_test.py | leo811121/UIUC-CS-547 | 2a1caeb8f006ce30bb7312cc977c7a87290a7858 | [
"CECILL-B"
] | 1 | 2021-01-26T14:34:08.000Z | 2021-01-26T14:34:08.000Z | HW5/2b_all/2b_2/RNN_test.py | leo811121/UIUC-CS-547 | 2a1caeb8f006ce30bb7312cc977c7a87290a7858 | [
"CECILL-B"
] | null | null | null | HW5/2b_all/2b_2/RNN_test.py | leo811121/UIUC-CS-547 | 2a1caeb8f006ce30bb7312cc977c7a87290a7858 | [
"CECILL-B"
] | 1 | 2021-01-26T14:32:59.000Z | 2021-01-26T14:32:59.000Z | import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torch.distributed as dist
import time
import os
import sys
import io
from RNN_model import RNN_model
glove_embeddings = np.load('/u/training/tra335/HW5/1a/preprocessed_data/glove_embeddings.npy')
vocab_size = 100000
x_test = []
with io.open('/u/training/tra335/HW5/1a/preprocessed_data/imdb_test_glove.txt','r',encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
line = line.split(' ')
line = np.asarray(line,dtype=np.int)
line[line>vocab_size] = 0
x_test.append(line)
y_test = np.zeros((25000,))
y_test[0:12500] = 1
vocab_size += 1
model = torch.load('rnn.model')
model.cuda()
batch_size = 200
no_of_epochs = 500
#L_Y_train = len(y_train)
L_Y_test = len(y_test)
train_loss = []
train_accu = []
test_accu = []
for epoch in range(no_of_epochs):
# ## test
if((epoch+1)%1)==0:
model.eval()
epoch_acc = 0.0
epoch_loss = 0.0
epoch_counter = 0
time1 = time.time()
I_permutation = np.random.permutation(L_Y_test)
for i in range(0, L_Y_test, batch_size):
###################################################################################################
x_input2 = [x_test[j] for j in I_permutation[i:i+batch_size]]
sequence_length = (epoch + 1) * 50
x_input = np.zeros((batch_size,sequence_length),dtype=np.int)
for j in range(batch_size):
x = np.asarray(x_input2[j])
sl = x.shape[0]
if(sl < sequence_length):
x_input[j,0:sl] = x
else:
start_index = np.random.randint(sl-sequence_length+1)
x_input[j,:] = x[start_index:(start_index+sequence_length)]
x_input = glove_embeddings[x_input]
y_input = y_test[I_permutation[i:i+batch_size]]
data = Variable(torch.FloatTensor(x_input)).cuda()
target = Variable(torch.FloatTensor(y_input)).cuda()
##################################################################################################
with torch.no_grad():
loss, pred = model(data,target)
prediction = pred >= 0.0
truth = target >= 0.5
acc = prediction.eq(truth).sum().cpu().data.numpy()
epoch_acc += acc
epoch_loss += loss.data.item()
epoch_counter += batch_size
epoch_acc /= epoch_counter
epoch_loss /= (epoch_counter/batch_size)
test_accu.append(epoch_acc)
time2 = time.time()
time_elapsed = time2 - time1
print("Seqence:", sequence_length," Accuracy: %.2f" % (epoch_acc*100.0), "loss: %.4f" % epoch_loss)
# print(" ", "%.2f" % (epoch_acc*100.0), "%.4f" % epoch_loss)
| 28.747573 | 107 | 0.563999 |
530197119f3f99aa2dddf8019144447dd6d5e905 | 2,055 | py | Python | Kailash_Work/Other_Programs/date_time_operations.py | teamtact/learning-challenge-season-2 | aea5897df0e066f6c6ac1d5eb64584e23073db4f | [
"MIT"
] | 3 | 2019-07-01T14:38:12.000Z | 2019-07-11T18:57:16.000Z | Kailash_Work/Other_Programs/date_time_operations.py | teamtact/learning-challenge-season-2 | aea5897df0e066f6c6ac1d5eb64584e23073db4f | [
"MIT"
] | null | null | null | Kailash_Work/Other_Programs/date_time_operations.py | teamtact/learning-challenge-season-2 | aea5897df0e066f6c6ac1d5eb64584e23073db4f | [
"MIT"
] | 25 | 2019-07-01T08:58:40.000Z | 2019-07-02T05:55:31.000Z | #INCLUDE statements for using date and time attributes
import datetime
present = datetime.date.today()
print("Today's data is {}" .format(present)) #prints the today's date completely
#print different attributes of the date
print("Day is {}".format(present.day))
print("Month is {}".format(present.month))
print("Year is {}".format(present.year))
#print date in different format
print("\nPrinting date in different format")
print(present.strftime("%d %B,%Y (%A)"))
print(present.strftime("please attend our event on %A,%B %d in the year %Y"))
#d,b,y,B,Y,a,A
#date,month,year(2 dig),month(Full),year(4 dig),day,day(Full)
#To find difference between the dates
birthstr = input("\nEnter your upcoming birthdate in dd/mm/yyyy format:")#specify the date format to eliminate errors
print(birthstr)
#Converting string type to datetime type (by default input will be of string type)
#date() at the end is used, since we need only no of days and not time
birthdate = datetime.datetime.strptime(birthstr,'%d/%m/%Y').date()
diff = birthdate - present
print("You have still {} days left for your birthday" .format(diff.days)) #since we need only no of days and not time
#To dispaly Calendar
#Package needed to work with Calendars
import calendar
# Enter the month and year
print("\nCALENDAR DISPLAY")
year = int(input("Enter year: "))
month = int(input("Enter month: "))
# display the calendar
print("\n")
print(calendar.month(year,month))
#-------------------------------------------------------------------------------
#already datetime has been imported
timesnow = datetime.datetime.now()
print("\nCuurent Time is {}" .format(timesnow))
#print different attributes of time
print("Hour shows {}" .format(timesnow.hour))
print("Minute shows {}" .format(timesnow.minute))
print("Second shows {}" .format(timesnow.second))
#print time in different format
print("\nPrinting time in different format")
print(datetime.datetime.strftime(timesnow,"%H:%M:%S %p")) #p is for denoting pm or am
| 37.363636 | 118 | 0.683698 |
bff786c0885f417ea998c1f60a21144d0bc6c9f7 | 14 | py | Python | openerp/tests/addons/test_impex/__init__.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 41 | 2019-02-12T10:15:19.000Z | 2021-02-14T00:04:47.000Z | odoo/openerp/addons/test_impex/__init__.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 2 | 2019-11-23T16:24:28.000Z | 2020-04-21T18:38:58.000Z | odoo/openerp/addons/test_impex/__init__.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 8 | 2019-08-26T01:55:26.000Z | 2021-01-23T22:18:35.000Z | import models
| 7 | 13 | 0.857143 |
5a94d8fde04876732ab61aa388e72ad29b8deb8d | 351 | py | Python | pages/process.py | artbrgn/NBA_Longevity | cd8b06adc116af4f27bf78b326f28e150b647f42 | [
"MIT"
] | 2 | 2020-01-23T14:55:13.000Z | 2020-01-23T14:55:25.000Z | pages/process.py | artbrgn/NBA_Longevity | cd8b06adc116af4f27bf78b326f28e150b647f42 | [
"MIT"
] | 5 | 2019-08-29T03:22:21.000Z | 2021-08-23T20:18:13.000Z | pages/process.py | artbrgn/NBA_Longevity | cd8b06adc116af4f27bf78b326f28e150b647f42 | [
"MIT"
] | 3 | 2019-12-13T05:09:33.000Z | 2020-01-07T17:02:32.000Z | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Process
"""
),
],
)
layout = dbc.Row([column1]) | 15.26087 | 43 | 0.603989 |
d932f92e9707d4d3f568eafda33a99186a9543ce | 4,686 | py | Python | mkdocs_awesome_pages_plugin/tests/navigation/test_meta.py | rarescosma/mkdocs-awesome-pages-plugin | 2788b3709f8dd0285e14d86a251b7e68c7020720 | [
"MIT"
] | null | null | null | mkdocs_awesome_pages_plugin/tests/navigation/test_meta.py | rarescosma/mkdocs-awesome-pages-plugin | 2788b3709f8dd0285e14d86a251b7e68c7020720 | [
"MIT"
] | null | null | null | mkdocs_awesome_pages_plugin/tests/navigation/test_meta.py | rarescosma/mkdocs-awesome-pages-plugin | 2788b3709f8dd0285e14d86a251b7e68c7020720 | [
"MIT"
] | null | null | null | from typing import Optional
from unittest import TestCase
from .base import NavigationTestCase
from ...meta import Meta
from ...navigation import NavigationMeta
from ...options import Options
from ...utils import normpath
class TestCommonDirname(TestCase):
def test_all_match(self):
self.assertEqual(
NavigationMeta._common_dirname(["a/1.md", "a/2.md"]), "a"
)
def test_some_match(self):
self.assertEqual(
NavigationMeta._common_dirname(["a/1.md", "a/2.md", "b/3.md"]), None
)
def test_none_match(self):
self.assertEqual(
NavigationMeta._common_dirname(["a/1.md", "b/2.md"]), None
)
def test_empty(self):
self.assertEqual(NavigationMeta._common_dirname([]), None)
def test_some_none_entries(self):
self.assertEqual(
NavigationMeta._common_dirname(["section/page.md", None]), None
)
def test_all_none_entries(self):
self.assertEqual(NavigationMeta._common_dirname([None, None]), None)
class TestMeta(NavigationTestCase):
def assertMeta(
self,
actual: Meta,
expected: Optional[Meta] = None,
*,
path: Optional[str] = None
):
if expected is None:
expected = Meta(path=path)
self.assertEqual(
actual.collapse_single_pages, expected.collapse_single_pages
)
self.assertEqual(actual.collapse, expected.collapse)
self.assertEqual(actual.arrange, expected.arrange)
self.assertEqual(actual.title, expected.title)
self.assertEqual(normpath(actual.path), normpath(expected.path))
def assertEmptyMeta(self, meta: Meta):
self.assertMeta(meta)
def setUp(self):
super(TestMeta, self).setUp()
self.options = Options(
filename=".pages", collapse_single_pages=False, strict=True
)
def test_empty(self):
meta = NavigationMeta([], self.options)
self.assertEqual(len(meta.sections), 0)
self.assertEmptyMeta(meta.root)
def test_page_in_root(self):
meta = NavigationMeta([self.page("Page", "page.md")], self.options)
self.assertEqual(len(meta.sections), 0)
self.assertMeta(meta.root, path=".pages")
def test_empty_section(self):
section = self.section("Section", [])
meta = NavigationMeta([section], self.options)
self.assertEqual(len(meta.sections), 1)
self.assertEmptyMeta(meta.sections[section])
self.assertEmptyMeta(meta.root)
def test_section(self):
section = self.section(
"Section", [self.page("Page", "section/page.md")]
)
meta = NavigationMeta([section], self.options)
self.assertEqual(len(meta.sections), 1)
self.assertMeta(meta.sections[section], path="section/.pages")
self.assertMeta(meta.root, path=".pages")
def test_multiple_sections(self):
b = self.section("B", [self.page("1", "a/b/1.md")])
a = self.section("A", [b])
d = self.section("D", [])
e = self.section("E", [self.page("2", "c/e/2.md")])
c = self.section("C", [d, e])
meta = NavigationMeta([a, c], self.options)
self.assertEqual(len(meta.sections), 5)
self.assertMeta(meta.sections[a], path="a/.pages")
self.assertMeta(meta.sections[b], path="a/b/.pages")
self.assertEmptyMeta(meta.sections[c])
self.assertEmptyMeta(meta.sections[d])
self.assertMeta(meta.sections[e], path="c/e/.pages")
self.assertEmptyMeta(meta.root)
def test_filename_option(self):
section = self.section(
"Section", [self.page("Page", "section/page.md")]
)
meta = NavigationMeta(
[section],
Options(
filename=".index", collapse_single_pages=False, strict=True
),
)
self.assertEqual(len(meta.sections), 1)
self.assertMeta(meta.sections[section], path="section/.index")
self.assertMeta(meta.root, path=".index")
def test_links(self):
meta = NavigationMeta(
[self.page("Page", "page.md"), self.link("Link")], self.options
)
self.assertEqual(len(meta.sections), 0)
self.assertMeta(meta.root, path=".pages")
def test_no_common_dirname(self):
section = self.section(
"Section", [self.page("1", "a/1.md"), self.page("2", "b/2.md")]
)
meta = NavigationMeta([section], self.options)
self.assertEqual(len(meta.sections), 1)
self.assertEmptyMeta(meta.sections[section])
self.assertEmptyMeta(meta.root)
| 31.24 | 80 | 0.612463 |
9965582855c872e910efdcc769e10e96ebcbd2bf | 21,718 | py | Python | examples/model_compression/ofa/run_glue_ofa.py | frozenfish123/PaddleNLP | b9c2910fb58730c8341067122c347cde5f6e7567 | [
"Apache-2.0"
] | 1 | 2021-06-15T14:20:00.000Z | 2021-06-15T14:20:00.000Z | examples/model_compression/ofa/run_glue_ofa.py | liliustb/PaddleNLP | 17fe183370809337d42390c0842272cef87c5c9d | [
"Apache-2.0"
] | null | null | null | examples/model_compression/ofa/run_glue_ofa.py | liliustb/PaddleNLP | 17fe183370809337d42390c0842272cef87c5c9d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import random
import time
import math
from functools import partial
import numpy as np
import paddle
import paddle.nn.functional as F
from paddle.io import DataLoader
from paddle.metric import Accuracy, Precision, Recall
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import BertModel, BertForSequenceClassification, BertTokenizer
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.utils.log import logger
from paddlenlp.metrics import AccuracyAndF1, Mcc, PearsonAndSpearman
from paddleslim.nas.ofa import OFA, DistillConfig, utils
from paddleslim.nas.ofa.utils import nlp_utils
from paddleslim.nas.ofa.convert_super import Convert, supernet
METRIC_CLASSES = {
"cola": Mcc,
"sst-2": Accuracy,
"mrpc": AccuracyAndF1,
"sts-b": PearsonAndSpearman,
"qqp": AccuracyAndF1,
"mnli": Accuracy,
"qnli": Accuracy,
"rte": Accuracy,
}
MODEL_CLASSES = {"bert": (BertForSequenceClassification, BertTokenizer), }
def parse_args():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " +
", ".join(METRIC_CLASSES.keys()), )
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.", )
parser.add_argument(
"--batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.", )
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--lambda_logit",
default=1.0,
type=float,
help="lambda for logit loss.")
parser.add_argument(
"--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.", )
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--n_gpu",
type=int,
default=1,
help="number of gpus to use, 0 for cpu.")
parser.add_argument(
'--width_mult_list',
nargs='+',
type=float,
default=[1.0, 5 / 6, 2 / 3, 0.5],
help="width mult in compress")
args = parser.parse_args()
return args
def set_seed(args):
# Use the same data seed(for data shuffle) for all procs to guarantee data
# consistency after sharding.
random.seed(args.seed)
np.random.seed(args.seed)
# Maybe different op seeds(for dropout) for different procs is better. By:
# `paddle.seed(args.seed + paddle.distributed.get_rank())`
paddle.seed(args.seed)
@paddle.no_grad()
def evaluate(model, criterion, metric, data_loader, width_mult=1.0):
model.eval()
metric.reset()
for batch in data_loader:
input_ids, segment_ids, labels = batch
logits = model(input_ids, segment_ids, attention_mask=[None, None])
if isinstance(logits, tuple):
logits = logits[0]
loss = criterion(logits, labels)
correct = metric.compute(logits, labels)
metric.update(correct)
res = metric.accumulate()
# Teacher model's evaluation
if width_mult == 100:
if isinstance(metric, AccuracyAndF1):
print(
"teacher model, eval loss: %f, acc: %s, precision: %s, recall: %s, f1: %s, acc and f1: %s, "
% (
loss.numpy(),
res[0],
res[1],
res[2],
res[3],
res[4], ),
end='')
elif isinstance(metric, Mcc):
print(
"teacher model, eval loss: %f, mcc: %s, " %
(loss.numpy(), res[0]),
end='')
elif isinstance(metric, PearsonAndSpearman):
print(
"teacher model, eval loss: %f, pearson: %s, spearman: %s, pearson and spearman: %s, "
% (loss.numpy(), res[0], res[1], res[2]),
end='')
else:
print(
"teacher model, eval loss: %f, acc: %s, " % (loss.numpy(), res),
end='')
else:
if isinstance(metric, AccuracyAndF1):
print(
"width_mult: %s, eval loss: %f, acc: %s, precision: %s, recall: %s, f1: %s, acc and f1: %s, "
% (
width_mult,
loss.numpy(),
res[0],
res[1],
res[2],
res[3],
res[4], ),
end='')
elif isinstance(metric, Mcc):
print(
"width_mult: %s, eval loss: %f, mcc: %s, " %
(str(width_mult), loss.numpy(), res[0]),
end='')
elif isinstance(metric, PearsonAndSpearman):
print(
"width_mult: %s, eval loss: %f, pearson: %s, spearman: %s, pearson and spearman: %s, "
% (str(width_mult), loss.numpy(), res[0], res[1], res[2]),
end='')
else:
print(
"width_mult: %s, eval loss: %f, acc: %s, " %
(str(width_mult), loss.numpy(), res),
end='')
model.train()
### monkey patch for bert forward to accept [attention_mask, head_mask] as attention_mask
def bert_forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=[None, None]):
wtype = self.pooler.dense.fn.weight.dtype if hasattr(
self.pooler.dense, 'fn') else self.pooler.dense.weight.dtype
if attention_mask[0] is None:
attention_mask[0] = paddle.unsqueeze(
(input_ids == self.pad_token_id).astype(wtype) * -1e9, axis=[1, 2])
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output, attention_mask)
sequence_output = encoder_outputs
pooled_output = self.pooler(sequence_output)
return sequence_output, pooled_output
BertModel.forward = bert_forward
### reorder weights according head importance and neuron importance
def reorder_neuron_head(model, head_importance, neuron_importance):
# reorder heads and ffn neurons
for layer, current_importance in enumerate(neuron_importance):
# reorder heads
idx = paddle.argsort(head_importance[layer], descending=True)
nlp_utils.reorder_head(model.bert.encoder.layers[layer].self_attn, idx)
# reorder neurons
idx = paddle.argsort(
paddle.to_tensor(current_importance), descending=True)
nlp_utils.reorder_neuron(
model.bert.encoder.layers[layer].linear1.fn, idx, dim=1)
nlp_utils.reorder_neuron(
model.bert.encoder.layers[layer].linear2.fn, idx, dim=0)
def soft_cross_entropy(inp, target):
inp_likelihood = F.log_softmax(inp, axis=-1)
target_prob = F.softmax(target, axis=-1)
return -1. * paddle.mean(paddle.sum(inp_likelihood * target_prob, axis=-1))
def convert_example(example,
tokenizer,
label_list,
max_seq_length=512,
is_test=False):
"""convert a glue example into necessary features"""
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
label = example['labels']
label = np.array([label], dtype=label_dtype)
# Convert raw text to feature
if (int(is_test) + len(example)) == 2:
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
else:
example = tokenizer(
example['sentence1'],
text_pair=example['sentence2'],
max_seq_len=max_seq_length)
if not is_test:
return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
def do_train(args):
paddle.set_device("gpu" if args.n_gpu else "cpu")
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
set_seed(args)
args.task_name = args.task_name.lower()
metric_class = METRIC_CLASSES[args.task_name]
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
train_ds = load_dataset('glue', args.task_name, splits="train")
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=args.max_seq_length)
train_ds = train_ds.map(trans_func, lazy=True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=args.batch_size, shuffle=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # segment
Stack(dtype="int64" if train_ds.label_list else "float32") # label
): fn(samples)
train_data_loader = DataLoader(
dataset=train_ds,
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
if args.task_name == "mnli":
dev_ds_matched, dev_ds_mismatched = load_dataset(
'glue', args.task_name, splits=["dev_matched", "dev_mismatched"])
dev_ds_matched = dev_ds_matched.map(trans_func, lazy=True)
dev_ds_mismatched = dev_ds_mismatched.map(trans_func, lazy=True)
dev_batch_sampler_matched = paddle.io.BatchSampler(
dev_ds_matched, batch_size=args.batch_size, shuffle=False)
dev_data_loader_matched = DataLoader(
dataset=dev_ds_matched,
batch_sampler=dev_batch_sampler_matched,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
dev_batch_sampler_mismatched = paddle.io.BatchSampler(
dev_ds_mismatched, batch_size=args.batch_size, shuffle=False)
dev_data_loader_mismatched = DataLoader(
dataset=dev_ds_mismatched,
batch_sampler=dev_batch_sampler_mismatched,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
else:
dev_ds = load_dataset('glue', args.task_name, splits='dev')
dev_ds = dev_ds.map(trans_func, lazy=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=args.batch_size, shuffle=False)
dev_data_loader = DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
num_labels = 1 if train_ds.label_list == None else len(train_ds.label_list)
model = model_class.from_pretrained(
args.model_name_or_path, num_classes=num_labels)
# Step1: Initialize a dictionary to save the weights from the origin BERT model.
origin_weights = model.state_dict()
# Step2: Convert origin model to supernet.
sp_config = supernet(expand_ratio=args.width_mult_list)
model = Convert(sp_config).convert(model)
# Use weights saved in the dictionary to initialize supernet.
utils.set_state_dict(model, origin_weights)
del origin_weights
# Step3: Define teacher model.
teacher_model = model_class.from_pretrained(
args.model_name_or_path, num_classes=num_labels)
# Step4: Config about distillation.
mapping_layers = ['bert.embeddings']
for idx in range(model.bert.config['num_hidden_layers']):
mapping_layers.append('bert.encoder.layers.{}'.format(idx))
default_distill_config = {
'lambda_distill': 0.1,
'teacher_model': teacher_model,
'mapping_layers': mapping_layers,
}
distill_config = DistillConfig(**default_distill_config)
# Step5: Config in supernet training.
ofa_model = OFA(model,
distill_config=distill_config,
elastic_order=['width'])
criterion = paddle.nn.loss.CrossEntropyLoss(
) if train_ds.label_list else paddle.nn.loss.MSELoss()
metric = metric_class()
if args.task_name == "mnli":
dev_data_loader = (dev_data_loader_matched, dev_data_loader_mismatched)
# Step6: Calculate the importance of neurons and head,
# and then reorder them according to the importance.
head_importance, neuron_importance = nlp_utils.compute_neuron_head_importance(
args.task_name,
ofa_model.model,
dev_data_loader,
loss_fct=criterion,
num_layers=model.bert.config['num_hidden_layers'],
num_heads=model.bert.config['num_attention_heads'])
reorder_neuron_head(ofa_model.model, head_importance, neuron_importance)
if paddle.distributed.get_world_size() > 1:
ofa_model.model = paddle.DataParallel(ofa_model.model)
if args.max_steps > 0:
num_training_steps = args.max_steps
num_train_epochs = math.ceil(num_training_steps /
len(train_data_loader))
else:
num_training_steps = len(train_data_loader) * args.num_train_epochs
num_train_epochs = args.num_train_epochs
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,
args.warmup_steps)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=ofa_model.model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
global_step = 0
tic_train = time.time()
for epoch in range(num_train_epochs):
# Step7: Set current epoch and task.
ofa_model.set_epoch(epoch)
ofa_model.set_task('width')
for step, batch in enumerate(train_data_loader):
global_step += 1
input_ids, segment_ids, labels = batch
for width_mult in args.width_mult_list:
# Step8: Broadcast supernet config from width_mult,
# and use this config in supernet training.
net_config = utils.dynabert_config(ofa_model, width_mult)
ofa_model.set_net_config(net_config)
logits, teacher_logits = ofa_model(
input_ids, segment_ids, attention_mask=[None, None])
rep_loss = ofa_model.calc_distill_loss()
if args.task_name == 'sts-b':
logit_loss = 0.0
else:
logit_loss = soft_cross_entropy(logits,
teacher_logits.detach())
loss = rep_loss + args.lambda_logit * logit_loss
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
if global_step % args.logging_steps == 0:
if (not args.n_gpu > 1) or paddle.distributed.get_rank() == 0:
logger.info(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
% (global_step, epoch, step, loss,
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
if global_step % args.save_steps == 0:
tic_eval = time.time()
if args.task_name == "mnli":
evaluate(
teacher_model,
criterion,
metric,
dev_data_loader_matched,
width_mult=100)
evaluate(
teacher_model,
criterion,
metric,
dev_data_loader_mismatched,
width_mult=100)
else:
evaluate(
teacher_model,
criterion,
metric,
dev_data_loader,
width_mult=100)
print("eval done total : %s s" % (time.time() - tic_eval))
for idx, width_mult in enumerate(args.width_mult_list):
net_config = utils.dynabert_config(ofa_model, width_mult)
ofa_model.set_net_config(net_config)
tic_eval = time.time()
if args.task_name == "mnli":
acc = evaluate(ofa_model, criterion, metric,
dev_data_loader_matched, width_mult)
evaluate(ofa_model, criterion, metric,
dev_data_loader_mismatched, width_mult)
print("eval done total : %s s" %
(time.time() - tic_eval))
else:
acc = evaluate(ofa_model, criterion, metric,
dev_data_loader, width_mult)
print("eval done total : %s s" %
(time.time() - tic_eval))
if (not args.n_gpu > 1
) or paddle.distributed.get_rank() == 0:
output_dir = os.path.join(args.output_dir,
"model_%d" % global_step)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# need better way to get inner model of DataParallel
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
if global_step >= num_training_steps:
return
def print_arguments(args):
"""print arguments"""
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).items()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
if __name__ == "__main__":
args = parse_args()
print_arguments(args)
if args.n_gpu > 1:
paddle.distributed.spawn(do_train, args=(args, ), nprocs=args.n_gpu)
else:
do_train(args)
| 37.639515 | 109 | 0.59209 |
d5a74b972ed8455c4b9ae880dc9cf1853d3eb443 | 8,440 | py | Python | LogSystem_JE/venv/Lib/site-packages/pygments/lexers/pawn.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | LogSystem_JE/venv/Lib/site-packages/pygments/lexers/pawn.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | LogSystem_JE/venv/Lib/site-packages/pygments/lexers/pawn.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pygments.lexers.pawn
~~~~~~~~~~~~~~~~~~~~
Lexers for the Pawn languages.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
from pygments.util import get_bool_opt
__all__ = ['SourcePawnLexer', 'PawnLexer']
class SourcePawnLexer(RegexLexer):
"""
For SourcePawn source code with preprocessor directives.
.. versionadded:: 1.6
"""
name = 'SourcePawn'
aliases = ['sp']
filenames = ['*.sp']
mimetypes = ['text/x-sourcepawn']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'root': [
# preprocessor directives: without whitespace
(r'^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws1 + '#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(r'[{}]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(case|const|continue|native|'
r'default|else|enum|for|if|new|operator|'
r'public|return|sizeof|static|decl|struct|switch)\b', Keyword),
(r'(bool|Float)\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
SM_TYPES = {'Action', 'bool', 'Float', 'Plugin', 'String', 'any',
'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType',
'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart',
'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow',
'ConVarBounds', 'QueryCookie', 'ReplySource',
'ConVarQueryResult', 'ConVarQueryFinished', 'Function',
'Action', 'Identity', 'PluginStatus', 'PluginInfo', 'DBResult',
'DBBindType', 'DBPriority', 'PropType', 'PropFieldType',
'MoveType', 'RenderMode', 'RenderFx', 'EventHookMode',
'EventHook', 'FileType', 'FileTimeMode', 'PathType',
'ParamType', 'ExecType', 'DialogType', 'Handle', 'KvDataTypes',
'NominateResult', 'MapChange', 'MenuStyle', 'MenuAction',
'MenuSource', 'RegexError', 'SDKCallType', 'SDKLibrary',
'SDKFuncConfSource', 'SDKType', 'SDKPassMethod', 'RayType',
'TraceEntityFilter', 'ListenOverride', 'SortOrder', 'SortType',
'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus',
'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond',
'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType',
'TopMenuPosition', 'TopMenuObject', 'UserMsg'}
def __init__(self, **options):
self.smhighlighting = get_bool_opt(options,
'sourcemod', True)
self._functions = set()
if self.smhighlighting:
from pygments.lexers._sourcemod_builtins import FUNCTIONS
self._functions.update(FUNCTIONS)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.smhighlighting:
if value in self.SM_TYPES:
token = Keyword.Type
elif value in self._functions:
token = Name.Builtin
yield index, token, value
class PawnLexer(RegexLexer):
"""
For Pawn source code.
.. versionadded:: 2.0
"""
name = 'Pawn'
aliases = ['pawn']
filenames = ['*.p', '*.pwn', '*.inc']
mimetypes = ['text/x-pawn']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*][\w\W]*?[*]/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'root': [
# preprocessor directives: without whitespace
(r'^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws1 + '#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*[\w\W]*?\*(\\\n)?/', Comment.Multiline),
(r'[{}]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(switch|case|default|const|new|static|char|continue|break|'
r'if|else|for|while|do|operator|enum|'
r'public|return|sizeof|tagof|state|goto)\b', Keyword),
(r'(bool|Float)\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/\*(.|\n)*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
"""This is basically C. There is a keyword which doesn't exist in C
though and is nearly unique to this language."""
if 'tagof' in text:
return 0.01
| 40.970874 | 84 | 0.448697 |
f248d7331b4abf4e141d68df28c70e4a3036da77 | 3,525 | py | Python | mergify_engine/actions/post_check.py | colmose/mergify-engine | bb52a6887c37e4bcf59a794dec7877600f607d03 | [
"Apache-2.0"
] | 1 | 2019-06-12T09:15:04.000Z | 2019-06-12T09:15:04.000Z | mergify_engine/actions/post_check.py | colmose/mergify-engine | bb52a6887c37e4bcf59a794dec7877600f607d03 | [
"Apache-2.0"
] | 16 | 2020-10-28T08:02:59.000Z | 2020-11-17T12:46:22.000Z | mergify_engine/actions/post_check.py | prosepro/mergify-engine | 7336b73b27d8fa4e007dfa87846b9221e4adadeb | [
"Apache-2.0"
] | 1 | 2018-08-20T11:40:59.000Z | 2018-08-20T11:40:59.000Z | # -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import voluptuous
from mergify_engine import actions
from mergify_engine import check_api
from mergify_engine import context
from mergify_engine import rules
from mergify_engine import subscription
from mergify_engine.rules import types
def CheckRunJinja2(v):
return types.Jinja2(
v,
{
"check_rule_name": "whatever",
"check_succeed": True,
"check_conditions": "the expected condition conditions",
},
)
class PostCheckAction(actions.Action):
validator = {
voluptuous.Required(
"title",
default="'{{ check_rule_name }}' {% if check_succeed %}succeed{% else %}failed{% endif %}",
): CheckRunJinja2,
voluptuous.Required(
"summary", default="{{ check_conditions }}"
): CheckRunJinja2,
}
always_run = True
allow_retrigger_mergify = True
def _post(
self, ctxt: context.Context, rule: rules.EvaluatedRule
) -> check_api.Result:
# TODO(sileht): Don't run it if conditions contains the rule itself, as it can
# created an endless loop of events.
if not ctxt.subscription.has_feature(subscription.Features.CUSTOM_CHECKS):
return check_api.Result(
check_api.Conclusion.ACTION_REQUIRED,
"Custom checks are disabled",
ctxt.subscription.missing_feature_reason(
ctxt.pull["base"]["repo"]["owner"]["login"]
),
)
check_succeed = not bool(rule.missing_conditions)
check_conditions = ""
for cond in rule.conditions:
checked = " " if cond in rule.missing_conditions else "X"
check_conditions += f"\n- [{checked}] `{cond}`"
extra_variables = {
"check_rule_name": rule.name,
"check_succeed": check_succeed,
"check_conditions": check_conditions,
}
try:
title = ctxt.pull_request.render_template(
self.config["title"],
extra_variables,
)
except context.RenderTemplateFailure as rmf:
return check_api.Result(
check_api.Conclusion.FAILURE,
"Invalid title template",
str(rmf),
)
try:
summary = ctxt.pull_request.render_template(
self.config["summary"], extra_variables
)
except context.RenderTemplateFailure as rmf:
return check_api.Result(
check_api.Conclusion.FAILURE,
"Invalid summary template",
str(rmf),
)
if rule.missing_conditions:
return check_api.Result(check_api.Conclusion.FAILURE, title, summary)
else:
return check_api.Result(check_api.Conclusion.SUCCESS, title, summary)
run = _post
cancel = _post
| 32.638889 | 103 | 0.614468 |
dcfdde23ab22c02959120a6a7c026c81d61f20fc | 6,734 | py | Python | nanodlna/cli.py | matgoebl/nano-dlna | 24ff2231d35e11dc25b76abd39af4e9d43e18b2b | [
"MIT"
] | null | null | null | nanodlna/cli.py | matgoebl/nano-dlna | 24ff2231d35e11dc25b76abd39af4e9d43e18b2b | [
"MIT"
] | null | null | null | nanodlna/cli.py | matgoebl/nano-dlna | 24ff2231d35e11dc25b76abd39af4e9d43e18b2b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from __future__ import print_function
import argparse
import json
import os
import sys
import datetime
import tempfile
from . import devices, dlna, streaming
import logging
def set_logs(args):
if args.debug_activated:
log_filename = os.path.join(
tempfile.mkdtemp(),
"nanodlna-{}.log".format(
datetime.datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
)
)
logging.basicConfig(
filename=log_filename,
filemode="w",
level=logging.DEBUG,
format="[ %(asctime)s ] %(levelname)s : %(message)s"
)
print("nano-dlna log will be saved here: {}".format(log_filename))
else:
logging.basicConfig(
level=logging.WARN,
format="[ %(asctime)s ] %(levelname)s : %(message)s"
)
def get_subtitle(file_video):
video, extension = os.path.splitext(file_video)
file_subtitle = "{0}.srt".format(video)
if not os.path.exists(file_subtitle):
return None
return file_subtitle
def list_devices(args):
set_logs(args)
logging.info("Scanning devices...")
my_devices = devices.get_devices(args.timeout)
logging.info("Number of devices found: {}".format(len(my_devices)))
for i, device in enumerate(my_devices, 1):
print("Device {0}:\n{1}\n\n".format(i, json.dumps(device, indent=4)))
def find_device(args):
device = None
if not args.device_url and not args.device_query:
logging.info("No device url and no query string provided")
sys.exit("No device specified; exiting")
if args.device_url:
logging.info("Select device by URL")
device = devices.register_device(args.device_url)
else:
my_devices = devices.get_devices(args.timeout)
if len(my_devices) == 0:
sys.exit("No devices found; exiting")
elif len(my_devices) == 1:
logging.info("Only one device exists, selecting this")
device = my_devices[0]
else:
logging.info("Select device by query")
for listed in my_devices:
if args.device_query.lower() in str(listed).lower():
device = listed
break
if not device:
sys.exit("No devices found; exiting")
logging.info("Device selected: {}".format(json.dumps(device)))
return device
def resume(args):
set_logs(args)
logging.info("Selecting device to resume")
device = find_device(args)
# Resume through DLNA protocol
logging.info("Sending resume command")
dlna.resume(device)
def pause(args):
set_logs(args)
logging.info("Selecting device to pause")
device = find_device(args)
# Pause through DLNA protocol
logging.info("Sending pause command")
dlna.pause(device)
def stop(args):
set_logs(args)
logging.info("Selecting device to stop")
device = find_device(args)
# Stop through DLNA protocol
logging.info("Sending stop command")
dlna.stop(device)
def play(args):
set_logs(args)
logging.info("Starting to play")
# Get video and subtitle file names
files = {"file_video": args.file_video}
if args.use_subtitle:
if not args.file_subtitle:
args.file_subtitle = get_subtitle(args.file_video)
if args.file_subtitle:
files["file_subtitle"] = args.file_subtitle
logging.info("Media files: {}".format(json.dumps(files)))
# Select device to play
logging.info("Selecting device to play")
device = None
if args.device_url:
logging.info("Select device by URL")
device = devices.register_device(args.device_url)
else:
my_devices = devices.get_devices(args.timeout)
if len(my_devices) > 0:
if args.device_query:
logging.info("Select device by query")
device = [
device for device in my_devices
if args.device_query.lower() in str(device).lower()][0]
else:
logging.info("Select first device")
device = my_devices[0]
if not device:
sys.exit("No devices found.")
logging.info("Device selected: {}".format(json.dumps(device)))
# Configure streaming server
logging.info("Configuring streaming server")
target_ip = device["hostname"]
if args.local_host:
serve_ip = args.local_host
else:
serve_ip = streaming.get_serve_ip(target_ip)
if args.file_video.startswith('http://') or args.file_video.startswith('https://'):
files_urls = files
else:
files_urls = streaming.start_server(files, serve_ip)
logging.info("Streaming server ready")
# Play the video through DLNA protocol
logging.info("Sending play command")
dlna.play(files_urls, device)
def run():
parser = argparse.ArgumentParser(
description="A minimal UPnP/DLNA media streamer.")
parser.set_defaults(func=lambda args: parser.print_help())
parser.add_argument("-t", "--timeout", type=float, default=5)
parser.add_argument("-b", "--debug",
dest="debug_activated", action="store_true")
subparsers = parser.add_subparsers(dest="subparser_name")
p_list = subparsers.add_parser('list')
p_list.set_defaults(func=list_devices)
p_stop = subparsers.add_parser('stop')
p_stop.add_argument("-d", "--device", dest="device_url")
p_stop.add_argument("-q", "--query-device", dest="device_query")
p_stop.set_defaults(func=stop)
p_pause = subparsers.add_parser('pause')
p_pause.add_argument("-d", "--device", dest="device_url")
p_pause.add_argument("-q", "--query-device", dest="device_query")
p_pause.set_defaults(func=pause)
p_resume = subparsers.add_parser('resume')
p_resume.add_argument("-d", "--device", dest="device_url")
p_resume.add_argument("-q", "--query-device", dest="device_query")
p_resume.set_defaults(func=resume)
p_play = subparsers.add_parser('play')
p_play.add_argument("-d", "--device", dest="device_url")
p_play.add_argument("-H", "--host", dest="local_host")
p_play.add_argument("-q", "--query-device", dest="device_query")
p_play.add_argument("-s", "--subtitle", dest="file_subtitle")
p_play.add_argument("-n", "--no-subtitle",
dest="use_subtitle", action="store_false")
p_play.add_argument("file_video")
p_play.set_defaults(func=play)
args = parser.parse_args()
try:
args.func(args)
except Exception as e:
logging.error(e)
sys.exit(1)
if __name__ == "__main__":
run()
| 26.616601 | 87 | 0.630383 |
dd483d6576879314688c8fd07c3b1d5f1f28616d | 1,377 | py | Python | flod_booking/api/ArrangementConflictsResource.py | Trondheim-kommune/Bookingbasen | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2017-10-17T12:15:28.000Z | 2017-10-17T12:15:28.000Z | flod_booking/api/ArrangementConflictsResource.py | Trondheim-kommune/Bookingbasen | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | [
"BSD-2-Clause-FreeBSD"
] | 6 | 2021-03-22T17:15:52.000Z | 2022-01-13T00:39:58.000Z | flod_booking/api/ArrangementConflictsResource.py | Trondheim-kommune/Bookingbasen | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-09-09T13:35:03.000Z | 2019-09-09T13:35:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from domain.models import Application
from flask.ext.bouncer import requires, GET
from flask.ext.restful import fields, marshal
from ArrangementBaseResource import ArrangementBaseResource
from BaseResource import ISO8601DateTime
from ResourceResource import resource_fields
from common_fields import person_fields, organisation_fields
generic_slot_fields = {
'id': fields.Integer,
'start_date': ISO8601DateTime,
'end_date': ISO8601DateTime,
'start_time': ISO8601DateTime,
'end_time': ISO8601DateTime,
'week_day': fields.Integer,
}
application_fields = {
'id': fields.Integer,
'text': fields.String,
'person': fields.Nested(person_fields),
'organisation': fields.Nested(organisation_fields),
'resource': fields.Nested(resource_fields),
'requested_resource': fields.Nested(resource_fields),
'slots': fields.Nested(generic_slot_fields),
'status': fields.String,
'application_time': ISO8601DateTime,
'type': fields.String
}
class ArrangementConflictsResource(ArrangementBaseResource):
t = Application
type_name = "application"
@requires(GET, 'ArrangementConflict')
def get(self, application_id=None):
arrangement = self.get_arrangement(application_id)
return marshal(self.get_affected_applications(arrangement), application_fields)
| 31.295455 | 87 | 0.753086 |
1489798413fa351f113f445731d64306f1ead86d | 495 | py | Python | env/Lib/site-packages/plotly/validators/histogram2d/_colorscale.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/histogram2d/_colorscale.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/histogram2d/_colorscale.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(self, plotly_name="colorscale", parent_name="histogram2d", **kwargs):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
**kwargs
)
| 38.076923 | 86 | 0.682828 |
e44ad3cc5efe39dc156b584b81397bc54210d9a2 | 2,008 | py | Python | flash/core/serve/types/label.py | Actis92/lightning-flash | 49972268cfc0f95f1bd2b8fbf25036970cc44b59 | [
"Apache-2.0"
] | 1,457 | 2021-01-28T20:40:16.000Z | 2022-03-31T06:22:05.000Z | flash/core/serve/types/label.py | Actis92/lightning-flash | 49972268cfc0f95f1bd2b8fbf25036970cc44b59 | [
"Apache-2.0"
] | 1,123 | 2021-01-28T20:37:56.000Z | 2022-03-31T19:34:44.000Z | flash/core/serve/types/label.py | Actis92/lightning-flash | 49972268cfc0f95f1bd2b8fbf25036970cc44b59 | [
"Apache-2.0"
] | 170 | 2021-01-29T00:41:39.000Z | 2022-03-29T16:09:52.000Z | from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torch
from flash.core.serve.types.base import BaseType
@dataclass(unsafe_hash=True)
class Label(BaseType):
"""Type specifically made for labels that are mapped to a key.
Parameters
----------
path
Path to a file that has multiple classes separated by new line character.
Index of the line will be considered as the key for each class. This parameter
is mutually exclusive to `classes` parameter
classes
A list, tuple or a dict of classes. If it's list or a tuple, index of the
class, is the key. If it's a dictionary, the key must be an integer
"""
path: Union[str, Path, None] = field(default=None)
classes: Union[List, Tuple, Dict, None] = field(default=None, repr=False)
def __post_init__(self):
if self.classes is None:
if self.path is None:
raise ValueError(
"Must provide either classes as a list or " "path to a text file that contains classes"
)
with Path(self.path).open(mode="r") as f:
self.classes = tuple(item.strip() for item in f.readlines())
if isinstance(self.classes, dict):
self._reverse_map = {}
for key, value in self.classes.items():
if not isinstance(key, int):
raise TypeError("Key from the label dict must be an int")
self._reverse_map[value] = key
elif isinstance(self.classes, (list, tuple)):
self._reverse_map = {value: i for i, value in enumerate(self.classes)}
else:
raise TypeError("`classes` must be a list, tuple or a dict")
def deserialize(self, label: str) -> torch.Tensor:
index = self._reverse_map[label]
return torch.as_tensor(index)
def serialize(self, key: torch.Tensor) -> str:
return self.classes[key.item()]
| 37.886792 | 107 | 0.625498 |
cee315eed5f0a61687812085025ea0886ab98648 | 1,175 | py | Python | src/targetdb/models/unique_object.py | Subaru-PFS/ets_target_database | 781e4507c3a625de1a59e86dc18ec4f16cd1986f | [
"MIT"
] | null | null | null | src/targetdb/models/unique_object.py | Subaru-PFS/ets_target_database | 781e4507c3a625de1a59e86dc18ec4f16cd1986f | [
"MIT"
] | null | null | null | src/targetdb/models/unique_object.py | Subaru-PFS/ets_target_database | 781e4507c3a625de1a59e86dc18ec4f16cd1986f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from sqlalchemy import BigInteger
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
# from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import backref
from sqlalchemy.orm import relation
from . import Base
class unique_object(Base):
__tablename__ = "unique_object"
unique_object_id = Column(
BigInteger,
primary_key=True,
unique=True,
autoincrement=True,
comment="Unique unique_object identifier",
)
ra = Column(Float, comment="ICRS (degree)")
dec = Column(Float, comment="ICRS (degree)")
epoch = Column(String, comment="Reference epoch, e.g., J2000.0, J2015.5, etc.")
created_at = Column(DateTime)
updated_at = Column(DateTime)
def __init__(
self,
ra,
dec,
epoch,
created_at,
updated_at,
):
self.ra = ra
self.dec = dec
self.epoch = epoch
self.created_at = created_at
self.updated_at = updated_at
| 24.479167 | 83 | 0.676596 |
fd0453517c400232abc9e49aadb2e105e7f179a0 | 4,009 | py | Python | tests/test_traffic.py | MichelKhalaf/traffic | 84e315d84a4ab9d8711414e7c275733e27a089ed | [
"MIT"
] | null | null | null | tests/test_traffic.py | MichelKhalaf/traffic | 84e315d84a4ab9d8711414e7c275733e27a089ed | [
"MIT"
] | null | null | null | tests/test_traffic.py | MichelKhalaf/traffic | 84e315d84a4ab9d8711414e7c275733e27a089ed | [
"MIT"
] | null | null | null | import pandas as pd
from traffic.core import Flight
from traffic.data import eurofirs
from traffic.data.samples import switzerland
def test_properties() -> None:
assert len(switzerland) == 1244
assert f"{switzerland.start_time}" == "2018-08-01 05:00:00+00:00"
assert f"{switzerland.end_time}" == "2018-08-01 21:59:50+00:00"
assert len(switzerland.callsigns) == 1243
assert len(switzerland.icao24) == 842
handle = switzerland["DLH02A"]
assert handle is not None
assert (
repr(handle.aircraft) == "Tail(icao24='3c6645', registration='D-AIRE',"
" typecode='A321', flag='🇩🇪')"
)
handle = switzerland["4baa61"]
assert handle is not None
assert handle.callsign == "THY7WR"
def min_altitude(flight: Flight) -> float:
return flight.altitude_min # type: ignore
selected = max(switzerland, key=min_altitude)
assert selected.flight_id is None
assert selected.min("altitude") == 47000.0
assert selected.icao24 == "aab6c0"
def test_index() -> None:
df = pd.DataFrame.from_records(
[
{
"icao24": "500142",
"callsign": "T7STK",
"start": "2018-08-01 15:00",
"stop": "2018-08-01 16:00",
},
{
"icao24": "4068cb",
"callsign": "EXS33W",
"start": None,
"stop": None,
},
{
"icao24": "4009f9",
"callsign": "BAW585E",
"start": None,
"stop": "2018-08-01 17:00",
},
]
)
assert len(switzerland[df]) == 3
assert switzerland[df.iloc[0]] is not None
subset = switzerland[["EXS33W", "4009f9"]]
assert subset is not None
assert len(subset) == 4
s_0 = switzerland[0]
assert s_0 is not None
assert s_0.callsign == "SAA260"
assert switzerland[2000] is None
subset = switzerland[:2]
assert subset is not None
assert subset.callsigns == {"SAA260", "SAA261"}
assert subset.icao24 == {"00b0ed"}
def test_aircraft() -> None:
subset = switzerland[["EXS33W", "4009f9"]]
expected = {"A320", "B733"}
assert subset is not None
assert set(f.max("typecode") for f in subset.aircraft_data()) == expected
def high_altitude(flight: Flight) -> bool:
return flight.altitude_min > 35000 # type: ignore
def test_chaining() -> None:
sw_filtered = (
switzerland.between("2018-08-01", "2018-08-02") # type: ignore
.inside_bbox(eurofirs["LSAS"])
.assign_id()
.filter_if(high_altitude)
.resample("10s")
.filter()
.filter(altitude=53)
.unwrap()
.airborne()
.eval(max_workers=4)
)
assert len(sw_filtered) == 784
assert sw_filtered.data.shape[0] > 80000
assert min(len(f) for f in sw_filtered) == 60
assert sw_filtered.data.altitude.max() == 47000.0
# not smart to pop this out
flight_id: str = sw_filtered.flight_ids.pop()
handle = sw_filtered[flight_id]
assert handle is not None
assert handle.callsign == flight_id.split("_")[0]
def test_none() -> None:
assert switzerland.query("altitude > 60000") is None
assert switzerland.after("2018-08-01").before("2018-08-01") is None
assert switzerland.iterate_lazy().query("altitude > 60000").eval() is None
def test_aggregate() -> None:
s_0 = switzerland[0]
assert s_0 is not None
s_0 = s_0.compute_xy()
x_max = s_0.data.x.max()
x_min = s_0.data.x.min()
y_max = s_0.data.y.max()
y_min = s_0.data.y.min()
resolution = {"x": 2e2, "y": 5e3}
expected_shape = (
int(abs(x_max // resolution["x"]) + abs(x_min // resolution["x"]) + 1),
int(abs(y_max // resolution["y"]) + abs(y_min // resolution["y"]) + 1),
)
output_shape = (
s_0.agg_xy(resolution, icao24="nunique").to_xarray().icao24.values.shape
)
assert output_shape == expected_shape
| 29.262774 | 80 | 0.589673 |
a010065d9926e79e2b2765d9da0b5eaa7164edf1 | 5,580 | py | Python | swagger_server/test/operational_controllers/test_site_and_domain_roles.py | hedleyroos/core-access-control | 9c275beecd4f37280f7234b358f5f9caad372378 | [
"BSD-3-Clause"
] | null | null | null | swagger_server/test/operational_controllers/test_site_and_domain_roles.py | hedleyroos/core-access-control | 9c275beecd4f37280f7234b358f5f9caad372378 | [
"BSD-3-Clause"
] | 71 | 2017-12-04T09:14:15.000Z | 2019-01-30T11:38:06.000Z | swagger_server/test/operational_controllers/test_site_and_domain_roles.py | hedleyroos/core-access-control | 9c275beecd4f37280f7234b358f5f9caad372378 | [
"BSD-3-Clause"
] | 1 | 2021-08-17T12:05:27.000Z | 2021-08-17T12:05:27.000Z | # coding: utf-8
from __future__ import absolute_import
import random
import uuid
from collections import OrderedDict
from ge_core_shared import db_actions, decorators
from flask import json
from project.settings import API_KEY_HEADER
from swagger_server.models.site_role import SiteRole # noqa: E501
from swagger_server.models.domain import Domain # noqa: E501
from swagger_server.models.role import Role # noqa: E501
from swagger_server.models.site import Site # noqa: E501
from swagger_server.models.site_create import SiteCreate # noqa: E501
from swagger_server.models.domain_role import DomainRole # noqa: E501
from swagger_server.test import BaseTestCase, db_create_entry
class TestOperationalController(BaseTestCase):
@decorators.db_exception
def setUp(self):
super().setUp()
# Create top level parent domain.
self.domain_data = {
"name": ("%s" % uuid.uuid1())[:30],
"description": "a super cool test domain",
}
self.domain_model = db_actions.crud(
model="Domain",
api_model=Domain,
data=self.domain_data,
action="create"
)
role_data = {
"label": ("%s" % uuid.uuid1())[:30],
"description": "user_site_role to create",
}
role_model = db_actions.crud(
model="Role",
api_model=Role,
data=role_data,
action="create"
)
domain_role_data = {
"role_id": role_model.id,
"domain_id": self.domain_model.id
}
db_actions.crud(
model="DomainRole",
api_model=DomainRole,
data=domain_role_data,
action="create"
)
# Set a single role on the top level domain.
self.data = OrderedDict()
self.data["d:%s" % self.domain_model.id] = [role_model.id]
domain_id = self.domain_model.id
for index in range(1, random.randint(5, 20)):
# Create a domain tree with roles per domain.
domain_data = {
"name": ("%s" % uuid.uuid1())[:30],
"description": "%s" % uuid.uuid1(),
"parent_id": domain_id
}
domain_model = db_actions.crud(
model="Domain",
api_model=Domain,
data=domain_data,
action="create"
)
# Set id for next iteration.
domain_id = domain_model.id
roles = []
self.data["d:%s" % domain_model.id] = []
for index in range(1, random.randint(5, 20)):
role_data = {
"label": ("%s" % uuid.uuid1())[:30],
"description": "user_site_role to create",
}
role_model = db_actions.crud(
model="Role",
api_model=Role,
data=role_data,
action="create"
)
roles.append(role_model)
for role in roles:
domain_role_data = {
"role_id": role.id,
"domain_id": domain_model.id
}
db_actions.crud(
model="DomainRole",
api_model=DomainRole,
data=domain_role_data,
action="create"
)
self.data["d:%s" % domain_model.id].append(role.id)
# Assign the site to the last domain in the tree.
site_data = {
"name": ("%s" % uuid.uuid1())[:30],
"domain_id": domain_id,
"description": "a super cool test site",
"client_id": 0,
"is_active": True,
}
self.site_model = db_create_entry(
model="Site",
data=site_data,
)
# create a bunch of roles for a site..
self.data["s:%s" % self.site_model.id] = []
for index in range(1, random.randint(5, 20)):
role_data = {
"label": ("%s" % uuid.uuid1())[:30],
"description": "user_site_role to create",
}
role_model = db_actions.crud(
model="Role",
api_model=Role,
data=role_data,
action="create"
)
site_role_data = {
"role_id": role_model.id,
"site_id": self.site_model.id,
}
site_role_model = db_actions.crud(
model="SiteRole",
api_model=SiteRole,
data=site_role_data,
action="create"
)
self.data["s:%s" % self.site_model.id].append(role_model.id)
self.headers = {API_KEY_HEADER: "test-api-key"}
def test_get_site_and_domain_roles(self):
"""Test case for get_user_site_role_labels_aggregated
"""
response = self.client.open(
'/api/v1/ops/site_and_domain_roles/{site_id}'.format(site_id=self.site_model.id),
method='GET', headers=self.headers)
r_data = json.loads(response.data)
roles = []
# Each sub domain and finally the site also has the previous roles in
# the tree as well as their own.
for key, value in self.data.items():
roles.extend(value)
self.assertListEqual(sorted(r_data["roles_map"][key]), sorted(roles))
if __name__ == '__main__':
import unittest
unittest.main()
| 33.214286 | 93 | 0.527419 |
2b427775708cea40073aaa346e9100ddc03ea188 | 4,811 | py | Python | Extensions/regexs.py | semustafacevik/SearchDomainDH | dc2409c42f6749b5e7f2373c60c21d463531a507 | [
"bzip2-1.0.6"
] | null | null | null | Extensions/regexs.py | semustafacevik/SearchDomainDH | dc2409c42f6749b5e7f2373c60c21d463531a507 | [
"bzip2-1.0.6"
] | null | null | null | Extensions/regexs.py | semustafacevik/SearchDomainDH | dc2409c42f6749b5e7f2373c60c21d463531a507 | [
"bzip2-1.0.6"
] | null | null | null | from Extensions.functions import *
import re
class Regexs:
def __init__(self, word):
self.word = word
self.totalResult = ''
self.getTotalResult()
self.disinfectedResult = self.totalResult
self.cleanResult()
self.temp = []
def cleanResult(self):
dirtyItems = [
"<title",
"</title>",
"<p",
"</p>",
"<div",
"</div>",
"<cite",
"</cite>",
""",
" ",
" ",
"q=",
"x22@",
"<span",
"</span>",
"mail",
"Mail",
"posta",
"Posta"]
for dirtyItem in dirtyItems:
self.disinfectedResult = self.disinfectedResult.replace(dirtyItem, " ")
dirtyItems = [
"<em>",
"</em>",
"<b>",
"</b>",
"%2f",
"%3a",
"<strong>",
"</strong>",
"<wbr>",
"</wbr>",
"<",
">",
":",
"=",
";",
"&",
"%3A",
"%3D",
"%3C",
"/",
"\\"]
for dirtyItem in dirtyItems:
self.disinfectedResult = self.disinfectedResult.replace(dirtyItem, "")
def getEmails(self):
print('\nSearching Emails...')
result_response['resultEmails'] = ''
regex_emails = re.compile(r"[\w_-]+(?:\.[\w_-]+)*@(?:[\w0-9](?:[\w0-9-]*[\w0-9])?\.)+[\w0-9](?:[\w0-9-]*[\w0-9])")
self.temp = regex_emails.findall(self.disinfectedResult)
emails = self.unique()
for email in emails:
if(not email.count('http') and not email.count('www') and len(email)<30+len(self.word)):
result_response['resultEmails'] += '¨' + email.lower()
print('OK - Emails!')
def getFileUrls(self):
print('\nSearching File Urls...')
result_response['resultFileUrls'] = ''
regex_fileUrls = re.compile('<a href="(.*?)"')
self.temp = regex_fileUrls.findall(self.totalResult)
fileUrls = self.unique()
for url in fileUrls:
if (url.count('doc') or url.count('ppt') or url.count('pdf') or url.count('xls') or url.count('csv')) and not url.count('translat') :
result_response['resultFileUrls'] += '¨' + url
else:
pass
print('OK - File Urls!')
def getHostnames(self):
print('\nSearching Hostnames...')
result_response['resultHostnames'] = ''
regex_hostnames = re.compile(r'[a-zA-Z0-9.-]*\.' + self.word)
self.temp = regex_hostnames.findall(self.totalResult)
hostnames = self.unique()
for hostname in hostnames:
if(not hostname.startswith('2f') and not hostname[0].isdigit()):
result_response['resultHostnames'] += '¨' + hostname
print('OK - Hostnames!')
def getLinkedInLinks(self):
print('\nSearching LinkedIn Links...')
result_response['resultLinkedInLinks'] = ''
linkedInResult = result['result_linkedin']
linkedInResult = linkedInResult.replace('tr.linkedin.com', 'www.linkedin.com')
regex_linkedInLinks = re.compile(r"=\"https:\/\/www.linkedin.com(.*?)\"")
self.temp = regex_linkedInLinks.findall(linkedInResult)
regex_linkedInLinks = re.compile(r"=https:\/\/www.linkedin.com(.*?)&")
self.temp += regex_linkedInLinks.findall(linkedInResult)
linkedInLinks = self.unique()
for link in linkedInLinks:
result_response['resultLinkedInLinks'] += '¨' + 'https://www.linkedin.com' + link
print('OK - LinkedIn Links!')
def getLinkedInProfiles(self):
print('\nSearching LinkedIn Profiles...')
result_response['resultLinkedInProfiles'] = ''
linkedInResult = result['result_linkedin']
linkedInResult = linkedInResult.replace('&', '&')
regex_linkedInProfiles = re.compile(r"[\w.,_ |\\/&-]* [\-|\|]* LinkedIn")
self.temp = regex_linkedInProfiles.findall(linkedInResult)
linkedInProfiles = self.unique()
for profile in linkedInProfiles:
profile = profile.replace(' | LinkedIn', '').replace(' - LinkedIn', '')
if profile != " ":
result_response['resultLinkedInProfiles'] += '¨' + profile
print('OK - LinkedIn Profiles!')
def getTotalResult(self):
for value in result.values():
self.totalResult += value + ' * '
def unique(self) -> list:
return list(set(self.temp))
| 28.808383 | 145 | 0.503222 |
4ad03ecfd1691d42b2b55dffb97d331b5d887cfc | 984 | py | Python | Chapter 5 - Dictionary & Sets/02_dictionary_methods.py | alex-dsouza777/Python-Basics | 8f1c406f2319cd65b5d54dfea990d09fa69d9adf | [
"MIT"
] | null | null | null | Chapter 5 - Dictionary & Sets/02_dictionary_methods.py | alex-dsouza777/Python-Basics | 8f1c406f2319cd65b5d54dfea990d09fa69d9adf | [
"MIT"
] | null | null | null | Chapter 5 - Dictionary & Sets/02_dictionary_methods.py | alex-dsouza777/Python-Basics | 8f1c406f2319cd65b5d54dfea990d09fa69d9adf | [
"MIT"
] | 1 | 2021-04-21T10:23:08.000Z | 2021-04-21T10:23:08.000Z | myDict = {
"fast" : "Fast Learner in a quick manner",
"root" : "A Coder",
"marks" : [1,2,5],
"anotherdict" : {'root':'Player'},
1 : 2
}
#Dictionary Methods
print(myDict.keys()) #Prints the keys
print(type(myDict.keys())) #prints types
print(myDict.values()) #Shows values
print(list(myDict.keys())) #convert to list
print(myDict.items()) #Prints (keys,value) for all contents in the dictionary
print(myDict)
updateDict = {
"friend" : "groot",
"fruit" : "apples are fav",
"root" : "my friend"
}
myDict.update(updateDict) #updates the dictionary by adding key-value pairs from updateDict
print(myDict)
print(myDict.get("root")) #Prints value associated with key root
print(myDict["root"]) #Prints value associated with key root
#The difference between .get and [] syntax in dictionaries
print(myDict.get("root2")) #Returns NONE as root2 is not present in the dictionary
print(myDict["root2"]) #throws error as root2 is not present im the dictionary | 30.75 | 91 | 0.699187 |
b9b1a7b846749cce3c24fba766b55c7540c0e716 | 1,069 | py | Python | tethys/core/exceptions.py | tethys-platform/tethys | c27daf5a832b05f9d771b04355001c331bc08766 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-05-20T19:03:14.000Z | 2020-06-03T20:43:34.000Z | tethys/core/exceptions.py | tethys-platform/tethys | c27daf5a832b05f9d771b04355001c331bc08766 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tethys/core/exceptions.py | tethys-platform/tethys | c27daf5a832b05f9d771b04355001c331bc08766 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TethysException(Exception):
pass
class TethysRuntimeError(RuntimeError, TethysException):
pass
class TethysRONotFound(TethysException):
pass
class TethysBadRepositoryObjectValue(TethysException):
pass
class TethysInputNodesNotFound(TethysException):
pass
class TethysSessionClosed(TethysException):
pass
class TethysStreamInProcess(TethysException):
pass
class TethysROFieldValidationError(TethysException):
pass
| 23.23913 | 74 | 0.775491 |
b8585b224f3c8ffe421bf63553fdde9cbde14ee4 | 8,420 | py | Python | RockPaperOrScissor/RockPaperOrScissor.py | TheBlackPlague/TensorflowExample | e9c7c259a83235bbc377da6c39d13ef6ec99ea06 | [
"MIT"
] | 1 | 2019-11-02T15:10:52.000Z | 2019-11-02T15:10:52.000Z | RockPaperOrScissor/RockPaperOrScissor.py | TheBlackPlague/TensorflowExample | e9c7c259a83235bbc377da6c39d13ef6ec99ea06 | [
"MIT"
] | null | null | null | RockPaperOrScissor/RockPaperOrScissor.py | TheBlackPlague/TensorflowExample | e9c7c259a83235bbc377da6c39d13ef6ec99ea06 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2019 Shaheryar Sohail
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Import path to ensure we aren't repeating ourselves.
import os.path
# Import wget to download database.
import wget
# Check if database & test database exist. If not, then download them.
if os.path.exists("/tmp/rps.zip") == True:
print("Found the database zip. Not downloading again. :D")
if os.path.exists("/tmp/rps.zip") == False and os.path.exists("/tmp/rps/") == False:
# Download the database.
print("Downloading database...")
url = "https://storage.googleapis.com/laurencemoroney-blog.appspot.com/rps.zip"
wget.download(url, "/tmp/rps.zip")
print("")
if os.path.exists("/tmp/rps-test-set.zip") == True:
print("Found the test database zip. Not downloading again. :D")
if os.path.exists("/tmp/rps-test-set.zip") == False and os.path.exists("/tmp/rps-test-set/") == False:
# Download the testing database so we can compare.
print("Downloading test database...")
url = "https://storage.googleapis.com/laurencemoroney-blog.appspot.com/rps-test-set.zip"
wget.download(url, "/tmp/rps-test-set.zip")
print("")
# Import modules to extract the zip file and save contents in an organized way.
import os
import zipfile
# Find the databases or extract them.
if os.path.exists("/tmp/rps/"):
print("Database found!")
if os.path.exists("/tmp/rps/") == False:
# Zip Extraction and saving.
print("Extracting database. This may take a couple of seconds.")
zipdir = "/tmp/rps.zip"
zipreference = zipfile.ZipFile(zipdir, "r")
zipreference.extractall("/tmp/")
print("Extracted database.")
zipreference.close()
if os.path.exists("/tmp/rps-test-set/"):
print("Test Database found!")
if os.path.exists("/tmp/rps-test-set/") == False:
# Zip Extraction and saving.
print("Extracting test database. This may take a couple of seconds.")
zipdir = "/tmp/rps-test-set.zip"
zipreference = zipfile.ZipFile(zipdir, "r")
zipreference.extractall("/tmp/")
print("Extracted test database.")
zipreference.close()
# Save directories as var.
rockdir = os.path.join("/tmp/rps/rock")
paperdir = os.path.join("/tmp/rps/paper")
scissordir = os.path.join("/tmp/rps/scissors")
print("Total Training Rock Images: ", len(os.listdir(rockdir)))
print("Total Training Paper Images: ", len(os.listdir(paperdir)))
print("Total Training Scissor Images: ", len(os.listdir(scissordir)))
# Save files as var.
rockfile = os.listdir(rockdir)
paperfile = os.listdir(paperdir)
scissorfile = os.listdir(scissordir)
# Import Tensorflow (for Neural Network) and Keras_preprocessing (for preprocessing).
import tensorflow as tf
import keras_preprocessing
# Import Image for generating image data.
from keras_preprocessing import image
from keras_preprocessing.image import ImageDataGenerator
# Model has been confirmed to already exist.
if os.path.exists("/tmp/rps.h5") == True:
print("Model has been found. Not going to train.")
model = tf.keras.models.load_model("/tmp/rps.h5")
model.summary()
# Check if model has already been trained & saved.
if os.path.exists("/tmp/rps.h5") == False:
# Model wasn't found.
print("The model wasn't found. The training time can take around 3 minutes, and all the way up to 2 hours.")
# Set our training directory and make an image data generator for training.
trainingdir = "/tmp/rps/"
print("Setting training directory to: " + trainingdir)
trainingdatagenerator = ImageDataGenerator(
rescale = 1./255,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = "nearest"
)
# Set our testing directory and make an image data generator for testing.
testingdir = "/tmp/rps-test-set/"
print("Setting training directory to: " + testingdir)
testingdatagenerator = ImageDataGenerator(rescale = 1./255)
# Have our train generator and test generator be flowing from our training and test directories.
print("Creating a training generator with flow from: " + trainingdir)
traingenerator = trainingdatagenerator.flow_from_directory(
trainingdir,
target_size = (150, 150),
class_mode = "categorical"
)
print("Creating a testing generator with flow from: " + testingdir)
testgenerator = testingdatagenerator.flow_from_directory(
testingdir,
target_size = (150, 150),
class_mode = "categorical"
)
# Create a DNNM (Deep Neural Network Model).
print("Creating a DNNM (Deep Neural Network Model).")
model = tf.keras.models.Sequential([
# Convulations for Deep Neural Network (DNN).
# First convulation.
tf.keras.layers.Conv2D(
64,
(3, 3),
activation = "relu",
# Input Shape = (150 x 150) with 3 byte colors.
input_shape = (150, 150, 3)
),
tf.keras.layers.MaxPooling2D(2, 2),
# Second convulation.
tf.keras.layers.Conv2D(
64,
(3, 3),
activation = "relu"
),
tf.keras.layers.MaxPooling2D(2, 2),
# Third convulation.
tf.keras.layers.Conv2D(
128,
(3, 3),
activation = "relu"
),
tf.keras.layers.MaxPooling2D(2, 2),
# Fourth convulation.
tf.keras.layers.Conv2D(
128,
(3, 3),
activation = "relu"
),
tf.keras.layers.MaxPooling2D(2, 2),
# Flatten the results into the DNN.
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
# Create a dense hidden layer with 512 neurons.
tf.keras.layers.Dense(
512,
activation = "relu"
),
# Create an output layer having 3 neurons (Rock, Paper, or Scissor).
tf.keras.layers.Dense(
3,
activation = "softmax"
)
])
model.summary()
# Compile the model.
print("Compiling model.")
model.compile(
loss = "categorical_crossentropy",
optimizer = "rmsprop",
metrics = [
"accuracy"
]
)
print("Model compiled.")
# Train the model via generators.
print("Training the model.")
history = model.fit_generator(
traingenerator,
epochs = 25,
validation_data = testgenerator,
verbose = 1
)
print("Model trained succesfully.")
# Save the model.
print("Saving the model.")
saveloc = "/tmp/rps.h5"
model.save(saveloc)
print("Model saved to: " + saveloc + " succesfully.")
# Import Numpy (for Value management).
import numpy as np
# Set the testing image. Make sure to change path to location of image.
path = "master:TensorflowExample/RockPaperOrScissor/Data/t1.png"
img = image.load_img(path, target_size = (150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis = 0)
# Create an array.
setofimage = np.vstack([x])
# Predict what it is.
prediction = model.predict(setofimage, batch_size = 10)[0]
# Output result to user.
if prediction[0] == 1:
print("Paper!")
if prediction[1] == 1:
print("Rock!")
if prediction[2] == 1:
print("Scissor!")
| 34.793388 | 112 | 0.655107 |
5545a8c854970c4ab0b937590bb26837c1a840d3 | 2,197 | py | Python | Source/tools/Layer_Distance_Calculation/LN/draw_LN.py | Brian-ning/HMNE | 1b4ee4c146f526ea6e2f4f8607df7e9687204a9e | [
"Apache-2.0"
] | 1 | 2020-10-18T13:15:00.000Z | 2020-10-18T13:15:00.000Z | Source/tools/Layer_Distance_Calculation/LN/draw_LN.py | Brian-ning/HMNE | 1b4ee4c146f526ea6e2f4f8607df7e9687204a9e | [
"Apache-2.0"
] | null | null | null | Source/tools/Layer_Distance_Calculation/LN/draw_LN.py | Brian-ning/HMNE | 1b4ee4c146f526ea6e2f4f8607df7e9687204a9e | [
"Apache-2.0"
] | null | null | null | #coding: utf-8
import pickle
import networkx as nx
import matplotlib.pyplot as plt
layers = ['0_LN_1995_1999.txt','1_LN_2000_2004.txt','2_LN_2005_2009.txt','3_LN_2010_2014.txt']
graphs = []
nodes_infor = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 40, 43, 44, 45, 49, 50, 51, 52, 53, 54, 55, 56, 59, 60, 61, 62, 63, 64, 65, 68, 69, 73, 74, 75, 82, 86, 87, 88, 89, 90, 91, 92, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 139, 140, 141, 142, 146, 147, 148, 149, 150, 151, 152, 158, 159, 160, 163], [13, 14, 26, 27, 38, 39, 41, 42, 46, 47, 48, 57, 58, 66, 67, 70, 71, 72, 76, 77, 78, 79, 80, 81, 83, 84, 85, 94, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 143, 144, 145, 153, 154, 155, 156, 157, 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190]]
nodes_infor1 = [str(i) for i in nodes_infor[0]]
nodes_infor2 = [str(i) for i in nodes_infor[1]]
nodes_infor = []
nodes_infor.append(nodes_infor1)
nodes_infor.append(nodes_infor2)
for l in layers:
with open(l,'r+') as f:
graph = nx.Graph(name=l)
for line in f.readlines():
src,dst = line.strip().split()
graph.add_node(src)
graph.add_node(dst)
graph.add_edge(src,dst)
graphs.append(graph)
merged_graph = nx.Graph(name='merged')
for g in graphs:
merged_graph.add_nodes_from(g.nodes())
merged_graph.add_edges_from(g.edges())
pos = nx.spring_layout(merged_graph)
graphs.append(merged_graph)
for g in graphs:
plt.figure(g.name)
nx.draw_networkx_nodes(g,pos,node_size=150,nodelist=list(set(nodes_infor[0])&set(g.nodes())),node_color='r',node_shape='o',alpha=0.8)
nx.draw_networkx_nodes(g,pos,node_size=150,nodelist=list(set(nodes_infor[1])&set(g.nodes())),node_color='b',node_shape='D',alpha=0.8)
nx.draw_networkx_edges(g,pos)
nx.draw_networkx_labels(g,pos,font_size=8)
plt.axis('off')
plt.savefig(g.name+'.pdf')
plt.show()
| 43.94 | 863 | 0.633591 |
aafa454ae47d86e107f5497ed75f0b66e83672d2 | 945 | py | Python | tests/applier/data_greenplum_cdc_applier_metacols.py | albertteoh/data_pipeline | a99f1c7412375b3e9f4115108fcdde517b2e71a6 | [
"Apache-2.0"
] | 16 | 2017-10-31T21:43:26.000Z | 2019-08-11T08:49:06.000Z | tests/applier/data_greenplum_cdc_applier_metacols.py | albertteoh/data_pipeline | a99f1c7412375b3e9f4115108fcdde517b2e71a6 | [
"Apache-2.0"
] | 1 | 2017-11-01T06:25:56.000Z | 2017-11-01T06:25:56.000Z | tests/applier/data_greenplum_cdc_applier_metacols.py | albertteoh/data_pipeline | a99f1c7412375b3e9f4115108fcdde517b2e71a6 | [
"Apache-2.0"
] | 9 | 2017-10-30T05:23:15.000Z | 2022-02-17T03:53:09.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import collections
import data_pipeline.constants.const as const
import data_postgres_cdc_applier_metacols as pg
TestCase = pg.TestCase
tests = pg.tests
| 37.8 | 62 | 0.780952 |
faff181697c2e615cb476a1fe75ae920dfec092b | 4,127 | py | Python | ronin/utils/types.py | b0r3dd3v/konnichuwaf | 588643cc9d18ef951778c53e96b116483ca5c9c0 | [
"Apache-2.0"
] | 53 | 2016-12-25T02:10:38.000Z | 2021-09-27T18:48:26.000Z | ronin/utils/types.py | b0r3dd3v/konnichuwaf | 588643cc9d18ef951778c53e96b116483ca5c9c0 | [
"Apache-2.0"
] | 4 | 2016-12-28T04:39:27.000Z | 2020-05-04T18:34:02.000Z | ronin/utils/types.py | tliron/ronin | 588643cc9d18ef951778c53e96b116483ca5c9c0 | [
"Apache-2.0"
] | 8 | 2017-03-19T00:56:52.000Z | 2021-04-07T18:19:21.000Z | # Copyright 2016-2018 Tal Liron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from .unicode import string, to_str
from inspect import isclass
def import_symbol(name):
"""
Imports a symbol based on its fully qualified name.
:param name: symbol name
:type name: str
:returns: symbol
:raises ImportError: if could not import the module
:raises AttributeError: if could not find the symbol in the module
"""
if name and ('.' in name):
module_name, name = name.rsplit('.', 1)
return getattr(__import__(module_name, fromlist=[name], level=0), name)
raise ImportError('import not found: {}'.format(name))
def type_name(the_type):
"""
Human-readable name of type(s). Built-in types will avoid the "__builtin__" prefix.
Tuples are always handled as a join of "|".
:param the_type: type(s)
:type the_type: type|(type)
:returns: name of type(s)
:rtype: str
"""
if isinstance(the_type, tuple):
return '|'.join([type_name(v) for v in the_type])
module = to_str(the_type.__module__)
name = to_str(the_type.__name__)
return name if module == '__builtin__' else '{}.{}'.format(module, name)
def verify_type(value, the_type):
"""
Raises :class:`TypeError` if the value is not an instance of the type.
:param value: value
:param the_type: type or type name
:type the_type: type|str
:raises TypeError: if ``value`` is not an instance of ``the_type``
:raises ~exceptions.ValueError: if ``the_type`` is invalid
:raises ImportError: if could not import the module
:raises AttributeError: if could not find the symbol in the module
"""
if isinstance(the_type, string):
the_type = import_symbol(the_type)
if not isclass(the_type):
raise ValueError('{} is not a type'.format(the_type))
if not isinstance(value, the_type):
raise TypeError('not an instance of {}: {}'.format(type_name(the_type),
type_name(type(value))))
def verify_subclass(value, the_type):
"""
Raises :class:`TypeError` if the value is not a subclass of the type.
:param value: value
:param the_type: type or type name
:type the_type: type|str
:raises TypeError: if ``value`` is not a subclass of ``the_type``
:raises ~exceptions.ValueError: if ``the_type`` is invalid
:raises ImportError: if could not import the module
:raises AttributeError: if could not find the symbol in the module
"""
if isinstance(the_type, str):
the_type = import_symbol(the_type)
if not isclass(the_type):
raise ValueError('{} is not a type'.format(the_type))
if not issubclass(value, the_type):
raise TypeError('not a subclass of {}: {}'.format(type_name(the_type),
type_name(type(value))))
def verify_type_or_subclass(value, the_type):
"""
Raises :class:`TypeError` if the value is not an instance or subclass of the type.
:param value: value
:param the_type: type or type name
:type the_type: type|str
:raises TypeError: if ``value`` is not an instance or subclass of ``the_type``
:raises ~exceptions.ValueError: if ``the_type`` is invalid
:raises ImportError: if could not import the module
:raises AttributeError: if could not find the symbol in the module
"""
if isclass(value):
verify_subclass(value, the_type)
else:
verify_type(value, the_type)
| 34.680672 | 87 | 0.660528 |
c693676bd9534c0534f1fb896b0fa309f1b6eb1e | 950 | py | Python | ams/ticket/migrations/0001_initial.py | magnuspedro/ams | 72ef810d14d9a4724e781489d081140be6674d60 | [
"MIT"
] | null | null | null | ams/ticket/migrations/0001_initial.py | magnuspedro/ams | 72ef810d14d9a4724e781489d081140be6674d60 | [
"MIT"
] | null | null | null | ams/ticket/migrations/0001_initial.py | magnuspedro/ams | 72ef810d14d9a4724e781489d081140be6674d60 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.4 on 2019-01-16 16:07
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(default=uuid.UUID('69516a9d-5183-4fe5-be63-d3e50d14e480'), max_length=255)),
('price', models.FloatField()),
('lot', models.IntegerField()),
('status', models.BooleanField(default=False)),
('date', models.DateField()),
('half', models.BooleanField(default=False)),
('delegation', models.CharField(max_length=50)),
('event', models.CharField(max_length=50)),
],
),
]
| 31.666667 | 118 | 0.564211 |
5c966f97748efbc5708c3d627780487c4e33f8d2 | 1,077 | py | Python | setup.py | Pack3tL0ss/pycentral | a61d263f5534bbb55ce45a63fcf06ba1b16c1aa0 | [
"MIT"
] | null | null | null | setup.py | Pack3tL0ss/pycentral | a61d263f5534bbb55ce45a63fcf06ba1b16c1aa0 | [
"MIT"
] | null | null | null | setup.py | Pack3tL0ss/pycentral | a61d263f5534bbb55ce45a63fcf06ba1b16c1aa0 | [
"MIT"
] | null | null | null | import setuptools
from os import path
this_directory=path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name="pycentral",
version="0.0.2",
author="aruba-automation",
author_email="aruba-automation@hpe.com",
description="Aruba Central Python Package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/aruba/pycentral",
packages=setuptools.find_packages(exclude=['docs', 'tests', 'sample_scripts']),
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Intended Audience :: System Administrators',
'Topic :: System :: Networking',
'Development Status :: 4 - Beta'
],
python_requires='>=3.6',
install_requires=['requests', 'PyYAML', 'urllib3', 'certifi'],
extras_require = {
'colorLog': ["colorlog"]
}
) | 34.741935 | 83 | 0.661096 |
201d5b01be5b85ee551b03c86f5da9f7f5b70370 | 563 | py | Python | imgcls/data/dataset_mapper.py | TuranSKT/detectron2_class | c90e68abbd39afa8c34d83ac760cabf3b5d02868 | [
"MIT"
] | null | null | null | imgcls/data/dataset_mapper.py | TuranSKT/detectron2_class | c90e68abbd39afa8c34d83ac760cabf3b5d02868 | [
"MIT"
] | null | null | null | imgcls/data/dataset_mapper.py | TuranSKT/detectron2_class | c90e68abbd39afa8c34d83ac760cabf3b5d02868 | [
"MIT"
] | null | null | null | '''
@Copyright (c) tkianai All Rights Reserved.
@Author : tkianai
@Github : https://github.com/tkianai
@Date : 2020-04-26 17:01:20
@FilePath : /ImageCls.detectron2/imgcls/data/dataset_mapper.py
@Description :
'''
from . import classification_utils as c_utils
from detectron2.data.dataset_mapper import DatasetMapper as _DatasetMapper
class DatasetMapper(_DatasetMapper):
def __init__(self, cfg, is_train=True):
super().__init__(cfg, is_train)
self.tfm_gens = c_utils.build_transform_gen(cfg, is_train) | 28.15 | 74 | 0.703375 |
bf559d40f487cc6b1d7a396e68e5c2e8cf5cd2e2 | 1,061 | py | Python | docs/recipes/record_to_bag.py | ChrisTimperley/roswire | 3220583305dc3e90b8cf0a7653cbc1b9c7fdb83b | [
"Apache-2.0"
] | 4 | 2019-09-22T18:38:33.000Z | 2021-04-02T01:37:10.000Z | docs/recipes/record_to_bag.py | ChrisTimperley/roswire | 3220583305dc3e90b8cf0a7653cbc1b9c7fdb83b | [
"Apache-2.0"
] | 208 | 2019-03-27T18:34:39.000Z | 2021-07-26T20:36:07.000Z | docs/recipes/record_to_bag.py | ChrisTimperley/roswire | 3220583305dc3e90b8cf0a7653cbc1b9c7fdb83b | [
"Apache-2.0"
] | null | null | null | import time
import roswire
FN_SITL = '/ros_ws/src/ArduPilot/build/sitl/bin/arducopter'
FN_PARAMS = '/ros_ws/src/ArduPilot/copter.parm'
rsw = roswire.ROSWire()
sources = ['/opt/ros/indigo/setup.bash', '/ros_ws/devel/setup.bash']
with rsw.launch('roswire/example:mavros', sources) as system:
with system.roscore() as ros:
# for this example, we need to separately launch a software-in-the-loop
# simulator for the robot platform
ps_sitl = system.shell.popen(f'{FN_SITL} --model copter --defaults {FN_PARAMS}')
# use roslaunch to launch the application inside the ROS session
ros.roslaunch('apm.launch', package='mavros', args={'fcu_url': 'tcp://127.0.0.1:5760@5760'})
# to record all ROS topic data for 300 seconds
with ros.record('filepath-on-host-machine.bag') as recorder:
time.sleep(300)
# alternatively, we can use a recorder directly
recorder = ros.record('filepath-on-host-machine2.bag')
recorder.start()
time.sleep(300)
recorder.stop()
| 36.586207 | 100 | 0.671065 |
97e357a31cd81f55fc5015d08d9f1dd7cd16edb4 | 5,672 | py | Python | test.py | HysMagus/Profesor2Cash | 990a88be41546f4d073ceeaeda0c148e9588374b | [
"MIT"
] | 47 | 2017-12-24T16:39:06.000Z | 2021-12-19T21:56:09.000Z | test.py | HysMagus/Profesor2Cash | 990a88be41546f4d073ceeaeda0c148e9588374b | [
"MIT"
] | 5 | 2017-12-25T06:21:02.000Z | 2021-09-07T23:34:11.000Z | test.py | HysMagus/Profesor2Cash | 990a88be41546f4d073ceeaeda0c148e9588374b | [
"MIT"
] | 21 | 2017-12-24T22:53:32.000Z | 2021-04-17T12:05:51.000Z | #!/usr/bin/env python
import main
import json
import sys
import telepot
from telegram import TelegramBot
from twitter import Twitter
import time
def test_get_coins_bittrex():
main.get_coins_bittrex()
assert len(main.symbol_name) > 0
assert len(main.name_symbol) > 0
assert main.symbol_name["BTC"] == "bitcoin"
assert main.name_symbol["bitcoin"] == "BTC"
def test_extract_symbols():
main.get_coins_bittrex()
texts = [
'Coin of the day: Digibyte (DGB). Using a Blockchain which is 40 times faster than Bitcoin and having one of the most decentralized mining systems in the world - based on 5 different synergistic algorithms. DGB adherents call the coin "The Sleeping Giant".',
'Yes, there are 1,500+ coins now. And yes, most are jokes or outright scams. But among those coins are Ethereum, Monero, Litecoin and other proven winners. By implying Sether is a joke is a huge mistake. Go to sether.io and read it. You will see it is in the mold of a winner.',
'Coin of the day: BURST -- First truly Green coin and most overlooked coin. Uses 400 times less power than Bitcoin. Super secure and private. Includes smart contracts, encrypted messaging, decentralized wallet, libertine blockchain. Most undervalued coin. https://www.burst-coin.org ',
'Coin of the day: $DOGE'
]
symbols = [
set([('BTC', 'bitcoin'), ('DGB', 'digibyte')]),
set([('XMR', 'monero'), ('LTC', 'litecoin'), ('ETH', 'ethereum')]),
set([('BURST', 'burstcoin'), ('BTC', 'bitcoin')]),
set([('DOGE', 'dogecoin')])
]
for i, text in enumerate(texts):
extracted = main.extract_symbols(text)
try:
assert extracted == symbols[i]
except AssertionError as e:
print(extracted, symbols[i])
raise e
def test_get_sentiment_analysis():
coins = [
("BTC", "bitcoin"),
("BCH", "bitcoin cash")
]
text = "Bitcoin is good. BCH is bad."
sentiment, overall = main.get_sentiment_analysis(text, coins)
assert coins[0] in sentiment
assert coins[1] in sentiment
assert sentiment[coins[0]] > 0
assert sentiment[coins[1]] < 0
coins = [
("DGB", "digibyte")
]
text = 'Coin of the day: Digibyte (DGB). Using a Blockchain which is 40 times faster than Bitcoin and having one of the most decentralized mining systems in the world - based on 5 different synergistic algorithms. DGB adherents call the coin "The Sleeping Giant".'
sentiment, overall = main.get_sentiment_analysis(text, coins)
assert overall > 0
def test_get_verdict():
sentiment = {
('DGB', 'digibyte'): 0.0,
('BCH', 'bitcoin cash'): -0.6
}
overall = 0.167
to_buy = main.get_verdict(sentiment, overall)
assert to_buy == [('DGB', 'digibyte')]
def test_analyze():
main.get_coins_bittrex()
# Negative sentiment
text = "do not buy dogecoin, it is bad"
to_buy = main.analyze(text)
assert len(to_buy) == 0
# Positive sentiment
text = "please buy dogecoin"
to_buy = main.analyze(text)
assert len(to_buy) == 1
def test_twitter_tweet_callback(run_forever):
main.get_coins_bittrex()
main.bot = TelegramBot()
text = "please buy doge"
user = "mcafee2cash"
link = "https://twitter.com/mcafee2cash/status/944746808466698240"
try:
main.twitter_tweet_callback(text, user, link)
except Exception as e:
raise AssertionError(e)
if run_forever:
while True:
time.sleep(1)
def test_telegram_summary():
main.get_coins_bittrex()
main.bot = TelegramBot()
query_data = "summary_doge"
try:
replies = main.bot.get_query_replies(query_data)
assert len(replies) > 0
assert len(replies[0]) == 2
assert type(replies[0][0]) is str
assert type(replies[0][1]) is telepot.namedtuple.InlineKeyboardMarkup
except Exception as e:
raise AssertionError(e)
def test_telegram_buy():
main.get_coins_bittrex()
main.bot = TelegramBot()
query_data = "buy_doge"
try:
replies = main.bot.get_query_replies(query_data)
assert len(replies) > 0
assert len(replies[0]) == 2
assert type(replies[0][0]) is str
except Exception as e:
raise AssertionError(e)
def test_tweet_handler():
with open("test-data.json") as f:
sample_tweets = json.load(f)
Twitter.handle_tweet(None, tweet_json=sample_tweets["tweet_image"])
def test_main():
with open("test-data.json") as f:
sample_tweets = json.load(f)
# Populate coins
main.get_coins_bittrex()
# Telegram bot
bot = TelegramBot()
# Twitter stream
class MockTwitter:
def tweet_callback(text, user, link):
to_buy = main.analyze(text)
assert len(to_buy) > 0
bot.notify_tweet(text, user, link, to_buy)
tweets = [
sample_tweets["tweet_image"],
sample_tweets["tweet_text"]
]
count = 0
for tweet in tweets:
Twitter.handle_tweet(MockTwitter, tweet_json=tweet)
count += 1
while count < len(tweets):
time.sleep(1)
def test_twitter():
twitter = Twitter()
if __name__ == "__main__":
tests = {
"get_coins_bittrex": test_get_coins_bittrex,
"extract_symbols": test_extract_symbols,
"get_sentiment_analysis": test_get_sentiment_analysis,
"get_verdict": test_get_verdict,
"analyze": test_analyze,
"telegram_summary": test_telegram_summary,
"telegram_buy": test_telegram_buy,
"tweet_handler": test_tweet_handler
}
test_queue = {}
try:
if "(" in sys.argv[1]:
eval(sys.argv[1])
print("Test passed.")
sys.exit()
elif len(sys.argv[1:]) == 0:
raise IndexError
for test_name in sys.argv[1:]:
test_queue[test_name] = tests[test_name]
except KeyError as e:
raise e
except IndexError:
test_queue = tests
except Exception as e:
raise e
for test_name in test_queue.keys():
try:
test_queue[test_name]()
except AssertionError as e:
print(f'Test: {test_name} failed')
raise e
print(f'\tTest: {test_name} passed')
print(f'{len(test_queue)} tests passed.') | 28.791878 | 287 | 0.715797 |
334d3900b722f0846bbededd958eedce23100053 | 592 | py | Python | setup.py | laureho/cellx | 425d1e6bb21e9243f8cd5be46a9235d23f3df2fc | [
"MIT"
] | null | null | null | setup.py | laureho/cellx | 425d1e6bb21e9243f8cd5be46a9235d23f3df2fc | [
"MIT"
] | null | null | null | setup.py | laureho/cellx | 425d1e6bb21e9243f8cd5be46a9235d23f3df2fc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
setup(name='cellx',
version='0.1',
description='CellX libraries',
author='Alan R. Lowe, Christopher Soelistyo, Laure Ho',
author_email='a.lowe@ucl.ac.uk',
url='https://github.com/quantumjot/cellx',
packages=['cellx'],
install_requires=['matplotlib',
'numpy',
'scikit-image',
'scikit-learn',
'scipy',
'tensorflow',
'tqdm'],
python_requires='>=3.6')
| 31.157895 | 61 | 0.486486 |
feca9a83ed470257fa32809a538a4c53ae9ef56c | 10,048 | py | Python | miners/xlm.py | parall4x/bittensor | abacb0b0f1b078d3103f516aff1328f049f9dc34 | [
"MIT"
] | null | null | null | miners/xlm.py | parall4x/bittensor | abacb0b0f1b078d3103f516aff1328f049f9dc34 | [
"MIT"
] | null | null | null | miners/xlm.py | parall4x/bittensor | abacb0b0f1b078d3103f516aff1328f049f9dc34 | [
"MIT"
] | null | null | null | #!/bin/python3.7
# The MIT License (MIT)
# Copyright © 2021 Yuma Rao
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""XLM Language Modelling miner
This file demonstrates training the XLM miner with language modelling.
Example:
$ python miners/text/xlm.py
To run with a config file and debug
$ python miners/text/xlm.py --debug --config <path to config file>
"""
import argparse
import copy
import os
import math
import random
import torch
import sys
import torch.nn.functional as F
import bittensor
from tqdm import tqdm
from munch import Munch
from termcolor import colored
from types import SimpleNamespace
from synapses.xlm import XLMSynapse
from typing import Tuple, List, Optional
from bittensor.dataloaders.text_dataloader import GenesisTextDataloader
from pytorch_transformers import WarmupCosineWithHardRestartsSchedule
from loguru import logger
logger = logger.opt(colors=True)
class Miner( bittensor.miner.BaseMiner ):
def __init__(
self,
config: Munch = None,
**kwargs
):
# ---- Load Config ----
if config == None:
config = Miner.default_config();
config = copy.deepcopy(config); bittensor.config.Config.update_with_kwargs(config, kwargs )
Miner.check_config( config )
logger.info( bittensor.config.Config.toString( config ) )
self.config = config
# ---- Row Weights ----
self.row_weights = torch.ones([1])
# ---- Nucleus ----
self.synapse = XLMSynapse( self.config )
# ---- Optimizer ----
self.optimizer = torch.optim.SGD(self.synapse.parameters(), lr = self.config.miner.learning_rate, momentum=self.config.miner.momentum)
self.scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, 50, 300)
# ---- Dataset ----
self.dataset = GenesisTextDataloader( self.config.miner.batch_size_train, 20 )
super(Miner, self).__init__( self.config, **kwargs )
@staticmethod
def default_config() -> Munch:
parser = argparse.ArgumentParser();
Miner.add_args( parser )
config = bittensor.config.Config.to_config( parser );
return config
@staticmethod
def add_args( parser: argparse.ArgumentParser ):
parser.add_argument('--miner.learning_rate', default=0.01, type=float, help='Training initial learning rate.')
parser.add_argument('--miner.momentum', default=0.98, type=float, help='Training initial momentum for SGD.')
parser.add_argument('--miner.epoch_length', default=500, type=int, help='Iterations of training per epoch')
parser.add_argument('--miner.n_epochs', default=-1, type=int, help='Number of training epochs, if < 0 runs for ever.')
parser.add_argument('--miner.batch_size_train', default=1, type=int, help='Training batch size.')
parser.add_argument('--miner.name', default='xlm', type=str, help='Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name ')
XLMSynapse.add_args( parser )
bittensor.miner.BaseMiner.add_args( parser )
GenesisTextDataloader.add_args( parser )
@staticmethod
def check_config(config: Munch):
assert config.miner.momentum > 0 and config.miner.momentum < 1, "momentum must be a value between 0 and 1"
assert config.miner.batch_size_train > 0, "batch_size_train must be a positive value"
assert config.miner.learning_rate > 0, "learning_rate must be a positive value."
XLMSynapse.check_config( config )
bittensor.miner.BaseMiner.check_config( config )
GenesisTextDataloader.check_config( config )
def should_run( self, epoch: int ) -> bool:
r""" Called by miner.run() every epoch, if the response is false, training stops.
"""
if self.config.miner.n_epochs < 0:
return True
elif epoch < self.config.miner.n_epochs:
return True
else:
return False
def should_save( self ) -> bool:
r""" Called by miner.run() after every epoch.
If this function returns True, the model is saved to disk and can be reloaded later.
Returns:
should_save (bool):
True by default. Saves model after each epoch.
"""
if self.epoch_loss < self.last_saved_loss:
return True
else:
return False
def should_reload(self) -> bool:
r""" Called by miner.run() after every epoch.
If the function returns True the model state dict is saved to miner.full_path.
Returns:
should_reload (bool):
False by default. Does not reload the model after each epoch.
"""
if torch.any(torch.isnan(torch.cat([param.view(-1) for param in self.synapse.parameters()]))):
return True
def get_state_dict( self ) -> dict:
r""" Called by miner.save_model().
Returns a state dict which can be passed to miner.reload_from_state_dict on reload.
Returns:
state_dict (:obj:`dict`):
Dictionary containing run state information such as the model parameters.
"""
return {
'synapse_state': self.synapse.state_dict(),
'optimizer_state': self.optimizer.state_dict(),
}
def reload_from_state_dict( self, state_dict: dict):
r""" Called by miner.reload_model().
Reloads the training state from the passed state_dict.
Args:
state_dict (:obj:`dict`):
Dictionary containing run state information such as the model parameters. Output
of get_state_dict.
"""
self.synapse.load_state_dict( state_dict['synapse_state'] )
self.optimizer.load_state_dict( state_dict['optimizer_state'] )
# ---- Get Row Weights ----
def get_row_weights( self ) -> torch.FloatTensor:
r""" Called after each training epoch. Returns row_weights to be set on chain.
Returns:
row_weights ( torch.FloatTensor, shape=(self.metagraph.n) ):
torch row_weights matching the metagraph size.
weight values should be normalized and be in range [0,1].
"""
self.row_weights = torch.nn.functional.pad( self.row_weights, pad = [0, self.metagraph.n - self.row_weights.numel()] )
self.row_weights = F.normalize( self.row_weights, p = 1, dim = 0) # Ensure normalization.
return self.row_weights
# ---- Get epoch batches ----
def get_epoch_batches( self, epoch:int ) -> List[ dict ]:
r""" Returns training batches for each epoch.
Returns:
batches ( List[dict], shape=(self.config.miner.epoch_length) ):
List of batches as dictionary containing tokenized sentences
'inputs' = torch.LongTensor.
"""
batches = []
epoch_data = self.dataset.dataloader( self.config.miner.epoch_length )
for iteration, inputs in tqdm( enumerate( epoch_data ) ):
batch = { 'inputs': inputs }
batches.append( batch )
if iteration == self.config.miner.epoch_length:
break
return batches
# ---- Training call ----
def training_call( self, batch: dict ) -> SimpleNamespace:
r""" Runs a single training batch through the nucleus and applies a gradient update.
Args:
batch ( dict, `required`):
training batch dictionary as returned from get_epoch_batches
Returns:
outputs ( SimpleNamespace ):
SimpleNamespace output as returned by a nucleus forward call.
Must include fields local_loss, remote_loss, distillation_loss
"""
# ---- Forward pass ----
inputs = batch['inputs'].to( self.synapse.device )
output = self.synapse.remote_forward(
neuron = self,
inputs = inputs,
training = True,
)
# ---- Backward pass ----
output.loss = output.local_target_loss + output.distillation_loss + output.remote_target_loss
output.loss.backward() # Accumulates gradients on the nucleus.
self.optimizer.step() # Applies accumulated gradients.
self.optimizer.zero_grad() # Zeros out gradients for next accummulation
# ---- Train row weights ----
batch_weights = torch.mean(output.router.weights, axis = 0).to( self.synapse.device ) # Average over batch.
self.row_weights = (1 - 0.03) * self.row_weights + 0.03 * batch_weights # Moving avg update.
self.row_weights = F.normalize( self.row_weights, p = 1, dim = 0) # Ensure normalization.
# ---- Update global loss ----
return output
if __name__ == "__main__":
# ---- Build and Run ----
miner = Miner()
miner.run()
| 43.124464 | 158 | 0.642416 |
6502fd42b8998f6d0f73ba1b82be5b22ffd84bd8 | 1,164 | py | Python | var/spack/repos/builtin/packages/py-multidict/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/py-multidict/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/py-multidict/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMultidict(PythonPackage):
"""Multidict is dict-like collection of key-value pairs where key
might be occurred more than once in the container."""
homepage = "https://github.com/aio-libs/multidict"
pypi = "multidict/multidict-6.0.2.tar.gz"
version('6.0.2', sha256='5ff3bd75f38e4c43f1f470f2df7a4d430b821c4ce22be384e1459cb57d6bb013')
version('5.2.0', sha256='0dd1c93edb444b33ba2274b66f63def8a327d607c6c790772f448a53b6ea59ce')
version('5.1.0', sha256='25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5')
version('4.7.6', sha256='fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430')
depends_on('python@3.7:', when='@6:', type=('build', 'run'))
depends_on('python@3.6:', when='@5.1:', type=('build', 'run'))
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-pip@18:', when='@:4', type='build')
depends_on('py-setuptools@40:', type='build')
| 44.769231 | 95 | 0.725086 |
576f1b16ab7ab486a063b6563cf531b50a7d2b35 | 1,007 | py | Python | doctor_tests/installer/common/restore_congress.py | opnfv/doctor | 72a1f8c92f1692f1ea8dcb5bc706ec9939c30e0a | [
"Apache-2.0"
] | 3 | 2017-04-05T19:09:36.000Z | 2019-02-28T05:27:21.000Z | doctor_tests/installer/common/restore_congress.py | opnfv/doctor | 72a1f8c92f1692f1ea8dcb5bc706ec9939c30e0a | [
"Apache-2.0"
] | 2 | 2017-09-20T07:31:39.000Z | 2018-04-09T09:38:45.000Z | doctor_tests/installer/common/restore_congress.py | opnfv/doctor | 72a1f8c92f1692f1ea8dcb5bc706ec9939c30e0a | [
"Apache-2.0"
] | 7 | 2017-05-24T02:21:08.000Z | 2019-12-09T16:54:02.000Z | ##############################################################################
# Copyright (c) 2017 ZTE Corporation and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import os
import shutil
def restore_drivers_config():
co_base = "/var/lib/config-data/puppet-generated/congress"
if not os.path.isdir(co_base):
co_base = ""
co_conf = co_base + "/etc/congress/congress.conf"
co_conf_bak = co_base + "/etc/congress/congress.conf.bak"
if not os.path.isfile(co_conf_bak):
print('Bak_file:%s does not exist.' % co_conf_bak)
else:
print('restore: %s' % co_conf)
shutil.copyfile(co_conf_bak, co_conf)
os.remove(co_conf_bak)
return
restore_drivers_config()
| 33.566667 | 78 | 0.594836 |
7442059ba07b2ed1d7164b9be60b8bbc92fec651 | 1,391 | py | Python | python/paddle/fluid/contrib/__init__.py | panyx0718/Paddle | 1ebd7434d545f8c439792468298f1108b631668e | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/contrib/__init__.py | panyx0718/Paddle | 1ebd7434d545f8c439792468298f1108b631668e | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/contrib/__init__.py | panyx0718/Paddle | 1ebd7434d545f8c439792468298f1108b631668e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from . import decoder
from .decoder import *
from . import memory_usage_calc
from .memory_usage_calc import *
from . import op_frequence
from .op_frequence import *
from . import quantize
from .quantize import *
from . import int8_inference
from .int8_inference import *
from . import reader
from .reader import *
from . import slim
from .slim import *
from . import utils
from .utils import *
from . import extend_optimizer
from .extend_optimizer import *
__all__ = []
__all__ += decoder.__all__
__all__ += memory_usage_calc.__all__
__all__ += op_frequence.__all__
__all__ += quantize.__all__
__all__ += int8_inference.__all__
__all__ += reader.__all__
__all__ += slim.__all__
__all__ += utils.__all__
__all__ += extend_optimizer.__all__
| 30.23913 | 74 | 0.775701 |
acc6590649121405f2beeb8ac5401d44fd7c0200 | 878 | py | Python | discordbot/commands/minigames/akinator_cmd.py | AXXIAR/MiniGamesBot | 7d2e15dfafbe703949326545177ea5782d52a294 | [
"MIT"
] | null | null | null | discordbot/commands/minigames/akinator_cmd.py | AXXIAR/MiniGamesBot | 7d2e15dfafbe703949326545177ea5782d52a294 | [
"MIT"
] | null | null | null | discordbot/commands/minigames/akinator_cmd.py | AXXIAR/MiniGamesBot | 7d2e15dfafbe703949326545177ea5782d52a294 | [
"MIT"
] | null | null | null | from discordbot.categories.minigames import Minigames
from discordbot.commands.command import Command
from discordbot.gamemanager import GameManager
from discordbot.user.discord_games.akinator_dc import AkinatorDisc
from discordbot.user.session import Session
class AkinatorCommand(Command):
bot = None
name = "akinator"
help = "Start the akinator to guess with yes/no questions what character you are thinking of. Character can be fictional or real."
brief = "Start the akinator to guess with yes/no questions what character you are thinking of."
args = ""
category = Minigames
@classmethod
async def handler(cls, context):
msg = await context.channel.send("Starting **akinator** minigame")
session = Session(cls.bot, context, msg, "akinator", AkinatorDisc, [context.author])
await GameManager.start_session(session)
| 39.909091 | 134 | 0.751708 |
ad65998936d409d870cc039266c379615e86f31a | 9,493 | py | Python | docs/conf.py | criticalhop/ordered | 8783bd61fa3496e929c507f474340daecbcc2ae2 | [
"MIT"
] | 33 | 2021-08-16T08:21:14.000Z | 2022-01-04T03:24:04.000Z | docs/conf.py | criticalhop/ordered | 8783bd61fa3496e929c507f474340daecbcc2ae2 | [
"MIT"
] | 3 | 2021-08-13T18:55:40.000Z | 2021-08-16T06:44:15.000Z | docs/conf.py | criticalhop/ordered | 8783bd61fa3496e929c507f474340daecbcc2ae2 | [
"MIT"
] | 2 | 2021-09-01T17:47:11.000Z | 2021-09-19T04:48:48.000Z | # This file is execfile()d with the current directory set to its containing dir.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
# -- Path setup --------------------------------------------------------------
__location__ = os.path.join(
os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))
)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, "../ordered"))
# -- Run sphinx-apidoc -------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../ordered")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
cmd_line_template = (
"sphinx-apidoc --implicit-namespaces -f -o {outputdir} {moduledir}"
)
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if tuple(sphinx.__version__.split(".")) >= ("1", "7"):
# This is a rudimentary parse_version to avoid external dependencies
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.ifconfig",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"m2r2"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "ordered"
copyright = "2021, CriticalHop Inc."
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "" # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = "" # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"sidebar_width": "300px",
"page_width": "1200px"
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from ordered import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "ordered-doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
# "papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
# "pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
# "preamble": "",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "user_guide.tex", "ordered Documentation", "Andrew Gree", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping --------------------------------------------------------
python_version = ".".join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
"sphinx": ("http://www.sphinx-doc.org/en/stable", None),
"python": ("https://docs.python.org/" + python_version, None),
"matplotlib": ("https://matplotlib.org", None),
"numpy": ("https://docs.scipy.org/doc/numpy", None),
"sklearn": ("https://scikit-learn.org/stable", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"pyscaffold": ("https://pyscaffold.org/en/stable", None),
}
| 33.426056 | 83 | 0.694617 |
beecdc7c6a002bd58f1c7f965469edcf47d60149 | 1,098 | py | Python | src/snapshot_data.py | deepakas/dstoolkit-mlops-base | 405d792c7448fc82734a2c16a4e5be780de2d0e2 | [
"MIT"
] | null | null | null | src/snapshot_data.py | deepakas/dstoolkit-mlops-base | 405d792c7448fc82734a2c16a4e5be780de2d0e2 | [
"MIT"
] | null | null | null | src/snapshot_data.py | deepakas/dstoolkit-mlops-base | 405d792c7448fc82734a2c16a4e5be780de2d0e2 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import argparse
from azureml.core import Datastore
import utils
def main(datastore, data_path):
# Get snapshot of your data and save it in datastore
os.makedirs(data_path, exist_ok=True)
with open(os.path.join(data_path, 'data.csv'), 'w') as f:
f.write('column1,column2,column3\n1,2,3\n4,5,6\n7,8,9\n')
ws = utils.retrieve_workspace()
datastore = Datastore(ws, name=datastore)
datastore.upload(
src_dir=data_path,
target_path=data_path,
overwrite=False
)
print(f'Snapshot saved in datastore {datastore}, path {data_path}')
def parse_args(args_list=None):
parser = argparse.ArgumentParser()
parser.add_argument('--datastore', type=str, required=True)
parser.add_argument('--path', type=str, required=True)
args_parsed = parser.parse_args(args_list)
return args_parsed
if __name__ == "__main__":
args = parse_args()
main(
datastore=args.datastore,
data_path=args.path
)
| 23.869565 | 71 | 0.687614 |
2d3b0da1f5ab68cfd171a61ed0f4d50998b4c160 | 387 | py | Python | count_1_in_binary.py | Jwy-jump/python_codesets | bb9a38d5dbf7be4d34b6b502ee684bb48dcfcd31 | [
"Apache-2.0"
] | null | null | null | count_1_in_binary.py | Jwy-jump/python_codesets | bb9a38d5dbf7be4d34b6b502ee684bb48dcfcd31 | [
"Apache-2.0"
] | null | null | null | count_1_in_binary.py | Jwy-jump/python_codesets | bb9a38d5dbf7be4d34b6b502ee684bb48dcfcd31 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
class Solution:
# @param num: an integer
# @return: an integer, the number of ones in num
def countOnes(self, num):
# write your code here
ret = 0
num &= 0xffffffff
while num != 0:
if num & 1:
ret += 1
num >>= 1
return ret
# easy: http://lintcode.com/problem/count-1-in-binary
| 22.764706 | 53 | 0.516796 |
e9d526ad48344c8cf91c79d9a383cb70b8f18c3c | 1,553 | py | Python | emukit/core/initial_designs/latin_design.py | mmahsereci/emukit | 5f6cd941784dcc3a9e1eb82466b306bf98e17a74 | [
"Apache-2.0"
] | 1 | 2020-03-07T08:26:12.000Z | 2020-03-07T08:26:12.000Z | emukit/core/initial_designs/latin_design.py | aerometu/emukit | 3e39e6000b90dd926d2e026fc2d42b6dea5beb04 | [
"Apache-2.0"
] | null | null | null | emukit/core/initial_designs/latin_design.py | aerometu/emukit | 3e39e6000b90dd926d2e026fc2d42b6dea5beb04 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
try:
import pyDOE
except ImportError:
raise ImportError('pyDOE needs to be installed in order to use latin design')
from .base import ModelFreeDesignBase
from .. import ParameterSpace
class LatinDesign(ModelFreeDesignBase):
"""
Latin hypercube experiment design.
Based on pyDOE implementation. For further reference see https://pythonhosted.org/pyDOE/randomized.html#latin-hypercube
"""
def __init__(self, parameter_space: ParameterSpace) -> None:
"""
:param parameter_space: The parameter space to generate design for.
"""
super(LatinDesign, self).__init__(parameter_space)
def get_samples(self, point_count: int) -> np.ndarray:
"""
Generates requested amount of points.
:param point_count: Number of points required.
:return: A numpy array with shape (point_count x space_dim)
"""
bounds = self.parameter_space.get_bounds()
X_design_aux = pyDOE.lhs(len(bounds), point_count, criterion='center')
ones = np.ones((X_design_aux.shape[0], 1))
lower_bound = np.asarray(bounds)[:, 0].reshape(1, len(bounds))
upper_bound = np.asarray(bounds)[:, 1].reshape(1, len(bounds))
diff = upper_bound - lower_bound
X_design = np.dot(ones, lower_bound) + X_design_aux * np.dot(ones, diff)
samples = self.parameter_space.round(X_design)
return samples
| 33.042553 | 123 | 0.68255 |
c5e3931f12d3dd8efb819b2fcfddf3288568b33a | 1,002 | py | Python | apps/tts_tests/migrations/0001_initial.py | michaldomino/Voice-interface-optimization-server | fff59d4c5db599e35d4b5f3915bbb272d2000a26 | [
"MIT"
] | null | null | null | apps/tts_tests/migrations/0001_initial.py | michaldomino/Voice-interface-optimization-server | fff59d4c5db599e35d4b5f3915bbb272d2000a26 | [
"MIT"
] | null | null | null | apps/tts_tests/migrations/0001_initial.py | michaldomino/Voice-interface-optimization-server | fff59d4c5db599e35d4b5f3915bbb272d2000a26 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2021-03-21 11:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TtsTest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('volume', models.FloatField()),
('pitch', models.FloatField()),
('rate', models.FloatField()),
],
),
migrations.CreateModel(
name='TtsTestResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.BooleanField()),
('tts_test', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='tts_tests.ttstest')),
],
),
]
| 30.363636 | 118 | 0.562874 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.