hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6fe6ca435b48ca2b52fd3def00654475e0975144 | 102,677 | py | Python | python/paddle/fluid/transpiler/distribute_transpiler.py | xiteng1988/Paddle | 5365cd2f14e5ae12ca41ef061882f56e33775c13 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/transpiler/distribute_transpiler.py | xiteng1988/Paddle | 5365cd2f14e5ae12ca41ef061882f56e33775c13 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/transpiler/distribute_transpiler.py | xiteng1988/Paddle | 5365cd2f14e5ae12ca41ef061882f56e33775c13 | [
"Apache-2.0"
] | 4 | 2019-09-30T02:15:34.000Z | 2019-09-30T02:41:30.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
"""
Steps to transpile trainer:
1. split variable to multiple blocks, aligned by product(dim[1:]) (width).
2. rename splited grad variables to add trainer_id suffix ".trainer_%d".
3. modify trainer program add split_op to each grad variable.
4. append send_op to send splited variables to server and
5. add recv_op to fetch params(splited blocks or origin param) from server.
6. append concat_op to merge splited blocks to update local weights.
Steps to transpile pserver:
1. create new program for parameter server.
2. create params and grad variables that assigned to current server instance.
3. create a sub-block in the server side program
4. append ops that should run on current server instance.
5. add listen_and_serv op
"""
import sys
import math
from functools import reduce
import collections
import six
import logging
import numpy as np
from .ps_dispatcher import RoundRobin, PSDispatcher
from .. import core, framework, unique_name
from ..framework import Program, default_main_program, \
default_startup_program, Block, Parameter, grad_var_name
from .details import wait_server_ready, UnionFind, VarStruct, VarsDistributed
from .details import delete_ops, find_op_by_output_arg
from ..distribute_lookup_table import find_distributed_lookup_table
from . import collective
LOOKUP_TABLE_TYPE = "lookup_table"
LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad"
OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
RPC_OP_ROLE_ATTR_NAME = op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName(
)
OPT_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Optimize
RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC
DIST_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Dist
LR_SCHED_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.LRSched
PRINT_LOG = False
def log(*args):
if PRINT_LOG:
print(args)
class VarBlock:
def __init__(self, varname, offset, size):
self.varname = varname
# NOTE: real offset is offset * size
self.offset = offset
self.size = size
def __str__(self):
return "%s:%d:%d" % (self.varname, self.offset, self.size)
def same_or_split_var(p_name, var_name):
return p_name == var_name or p_name.startswith(var_name + ".block")
def slice_variable(var_list, slice_count, min_block_size):
"""
We may need to split dense tensor to one or more blocks and put
them equally onto parameter server. One block is a sub-tensor
aligned by dim[0] of the tensor.
We need to have a minimal block size so that the calculations in
the parameter server side can gain better performance. By default
minimum block size 8K elements (maybe 16bit or 32bit or 64bit).
Args:
var_list (list): List of variables.
slice_count (int): Numel of count that variables will be sliced, which
could be the pserver services' count.
min_block_size (int): Minimum splitted block size.
Returns:
blocks (list[(varname, block_id, current_block_size)]): A list
of VarBlocks. Each VarBlock specifies a shard of the var.
"""
blocks = []
for var in var_list:
split_count = slice_count
var_numel = reduce(lambda x, y: x * y, var.shape)
max_pserver_count = int(math.floor(var_numel / float(min_block_size)))
if max_pserver_count == 0:
max_pserver_count = 1
if max_pserver_count < slice_count:
split_count = max_pserver_count
block_size = int(math.ceil(var_numel / float(split_count)))
if len(var.shape) >= 2:
# align by dim1(width)
dim1 = reduce(lambda x, y: x * y, var.shape[1:])
remains = block_size % dim1
if remains != 0:
block_size += dim1 - remains
# update split_count after aligning
split_count = int(math.ceil(var_numel / float(block_size)))
for block_id in range(split_count):
curr_block_size = min(block_size, var_numel - (
(block_id) * block_size))
block = VarBlock(var.name, block_id, curr_block_size)
blocks.append(str(block))
return blocks
class DistributeTranspilerConfig(object):
"""
A configuration class that provide support for distributed jobs.
Some important parameters are explained as follows:
.. py:attribute:: slice_var_up (bool)
Whether to do Tensor slice for parameter servers, default is True.
.. py:attribute:: split_method (PSDispatcher)
Methods of dispatching parameters for server,
:ref:`api_fluid_transpiler_RoundRobin` or
:ref:`api_fluid_transpiler_HashName` can be used and default is RoundRobin.
Try to choose the best method to balance loads for parameter servers.
.. py:attribute:: min_block_size (int)
Minimum number of splitted elements in block, default is 8192.
According to : https://github.com/PaddlePaddle/Paddle/issues/8638#issuecomment-369912156
We can use bandwidth effiently when data size is larger than 2MB.If you
want to change it, please be sure you have read the slice_variable function. You can find
the definition of slice_variable in
https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/transpiler/distribute_transpiler.py
.
Examples:
.. code-block:: python
from paddle.fluid.transpiler.ps_dispatcher import RoundRobin
import paddle.fluid as fluid
config = fluid.DistributeTranspilerConfig()
config.slice_var_up = True
config.split_method = RoundRobin
config.min_block_size = 81920
"""
slice_var_up = True
split_method = None
min_block_size = 8192
enable_dc_asgd = False
# supported modes: pserver, nccl2, collective
mode = "pserver"
print_log = False
wait_port = True
# split the send recv var in runtime
_runtime_split_send_recv = False
_sync_mode = True
nccl_comm_num = 1
#The picture here illustrates the principle:
#https://github.com/PaddlePaddle/Paddle/pull/17263#discussion_r285411396
use_hierarchical_allreduce = False
#Nccl ranks in a node when use hierarchical allreduce, it's setted to gpu cards' number in most cases.
hierarchical_allreduce_inter_nranks = 0
# if mode is collective
# supported modes: grad_allreduce, local_sgd
collective_mode = None
def __init__(self):
pass
@property
def runtime_split_send_recv(self):
return self._runtime_split_send_recv
@runtime_split_send_recv.setter
def runtime_split_send_recv(self, value):
if value is None:
raise ValueError("runtime_split_send_recv can't be None")
if value and self._sync_mode:
raise ValueError(
"if you want to set runtime_split_send_recv to be true, make ensure config.sync_mode is false at first"
)
self._runtime_split_send_recv = value
@property
def sync_mode(self):
return self._sync_mode
@sync_mode.setter
def sync_mode(self, value):
if value is None:
raise ValueError("sync_mode can't be None")
if value and self._runtime_split_send_recv:
raise ValueError(
"if you want to set sync_mode to be true, make ensure config.runtime_split_send_recv is false at first"
)
self._sync_mode = value
class DistributeTranspiler(object):
"""
**DistributeTranspiler**
Convert the fluid program to distributed data-parallelism programs.
Supports two modes: pserver mode and nccl2 mode.
In pserver mode, the main_program will be transformed to use a remote
parameter server to do parameter optimization. And the optimization
graph will be put into a parameter server program.
In nccl2 mode, the transpiler will append a NCCL_ID broadcasting
op in startup_program to share the NCCL_ID across the job nodes.
After transpile_nccl2 called, you ***must*** pass trainer_id and
num_trainers argument to ParallelExecutor to enable NCCL2 distributed
mode.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_loss = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
sgd_optimizer.minimize(avg_loss)
# for pserver mode
pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174"
trainer_endpoints = "192.168.0.1:6174,192.168.0.2:6174"
current_endpoint = "192.168.0.1:6174"
trainer_id = 0
trainers = 4
role = "PSERVER"
t = fluid.DistributeTranspiler()
t.transpile(
trainer_id, pservers=pserver_endpoints, trainers=trainers)
if role == "PSERVER":
pserver_program = t.get_pserver_program(current_endpoint)
pserver_startup_program = t.get_startup_program(current_endpoint,
pserver_program)
elif role == "TRAINER":
trainer_program = t.get_trainer_program()
# for nccl2 mode
trainer_num = 2
trainer_id = 0
config = fluid.DistributeTranspilerConfig()
config.mode = "nccl2"
trainer_endpoints = "192.168.0.1:6174,192.168.0.2:6174"
t = fluid.DistributeTranspiler(config=config)
t.transpile(trainer_id=trainer_id, trainers=trainer_endpoints, current_endpoint="192.168.0.1:6174")
exe = fluid.ParallelExecutor(
use_cuda=True,
loss_name=avg_loss.name,
num_trainers=trainer_num,
trainer_id=trainer_id
)
"""
def __init__(self, config=None):
if config is not None:
self.config = config
else:
self.config = DistributeTranspilerConfig()
if self.config.split_method is None:
self.config.split_method = RoundRobin
global PRINT_LOG
if self.config.print_log:
PRINT_LOG = True
assert (self.config.min_block_size >= 8192)
assert (self.config.split_method.__bases__[0] == PSDispatcher)
def _transpile_nccl2(self,
trainer_id,
trainers,
current_endpoint,
startup_program=None,
wait_port=True):
if not startup_program:
startup_program = default_startup_program()
if trainer_id >= 0:
worker_endpoints = trainers.split(",")
# send NCCL_ID to others or recv from trainer 0
worker_endpoints.remove(current_endpoint)
if trainer_id == 0 and wait_port:
wait_server_ready(worker_endpoints)
nccl_id_var = startup_program.global_block().create_var(
name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW)
for i in range(1, self.config.nccl_comm_num):
startup_program.global_block().create_var(
name="NCCLID_{}".format(i),
persistable=True,
type=core.VarDesc.VarType.RAW)
if self.config.use_hierarchical_allreduce:
for i in range(0, self.config.nccl_comm_num):
startup_program.global_block().create_var(
name="Hierarchical_inter_NCCLID_{}".format(i),
persistable=True,
type=core.VarDesc.VarType.RAW)
startup_program.global_block().create_var(
name="Hierarchical_exter_NCCLID_{}".format(i),
persistable=True,
type=core.VarDesc.VarType.RAW)
startup_program.global_block().append_op(
type="gen_nccl_id",
inputs={},
outputs={"NCCLID": nccl_id_var},
attrs={
"trainers": trainers.split(","),
"trainer_id": trainer_id,
"nccl_comm_num": self.config.nccl_comm_num,
"use_hierarchical_allreduce":
self.config.use_hierarchical_allreduce,
"hierarchical_allreduce_inter_nranks":
self.config.hierarchical_allreduce_inter_nranks
})
return nccl_id_var
else:
raise ValueError("must set trainer_id > 0")
def _transpile_collective(self,
collective_mode,
trainer_id,
trainers,
current_endpoint,
startup_program=None,
main_program=None,
wait_port=True):
if isinstance(trainers, str):
endpoints = trainers.split(",")
elif isinstance(trainers, list):
endpoints = trainers
else:
raise ValueError('invalid trainers config: ' + str(trainers))
if len(endpoints) == 1:
raise ValueError('invalid trainer number in distributed: 1')
if startup_program is None:
startup_program = default_startup_program()
if main_program is None:
main_program = default_main_program()
transpiler = None
if collective_mode == 'grad_allreduce':
transpiler = collective.GradAllReduce(self.config.nccl_comm_num)
elif collective_mode == 'local_sgd':
transpiler = collective.LocalSGD(self.config.nccl_comm_num)
else:
raise ValueError('invalid collective_mode: %s' % collective_mode)
transpiler.transpile(
startup_program=startup_program,
main_program=main_program,
rank=trainer_id,
endpoints=endpoints,
current_endpoint=current_endpoint,
wait_port=wait_port)
def _get_all_remote_sparse_update_op(self, main_program):
sparse_update_ops = []
sparse_update_op_types = ["lookup_table", "nce", "hierarchical_sigmoid"]
for op in main_program.global_block().ops:
if op.type in sparse_update_op_types and op.attr(
'remote_prefetch') is True:
sparse_update_ops.append(op)
return sparse_update_ops
def _update_remote_sparse_update_op(self, program,
need_sparse_update_params):
for param_varname, attrs in need_sparse_update_params.items():
height_sections = self.sparse_param_to_height_sections[
param_varname]
endpoints = attrs[0]
table_names = attrs[1]
ops = []
op_type = ""
used_ops = []
for idx, op in enumerate(self.sparse_update_ops):
if param_varname in op.input_arg_names and op_type == "":
op_type = op.type
ops.append(op)
used_ops.append(idx)
elif param_varname in op.input_arg_names and op_type == op.type:
ops.append(op)
used_ops.append(idx)
if op_type == "lookup_table":
all_ops = program.global_block().ops
op_idxs = [all_ops.index(op) for op in ops]
inputs = [
program.global_block().vars[op.input("Ids")[0]]
for op in ops
]
w = program.global_block().vars[ops[0].input("W")[0]]
padding_idx = ops[0].attr("padding_idx")
outputs = [
program.global_block().vars[op.output("Out")[0]]
for op in ops
]
for idx in op_idxs[::-1]:
program.global_block()._remove_op(idx)
inputs_idxs = [-1] * len(inputs)
outputs_idxs = [-1] * len(outputs)
for idx, op in enumerate(program.global_block().ops):
for i in range(0, len(op.output_names)):
outs = op.output(op.output_names[i])
for in_id, in_var in enumerate(inputs):
if in_var.name in outs:
inputs_idxs[in_id] = idx
for i in range(0, len(op.input_names)):
ins = op.input(op.input_names[i])
for out_id, out_var in enumerate(outputs):
if out_var.name in ins:
outputs_idxs[out_id] = idx
if min(outputs_idxs) - max(inputs_idxs) >= 1:
distributed_idx = max(inputs_idxs) + 1
program.global_block()._insert_op(
index=distributed_idx,
type="distributed_lookup_table",
inputs={"Ids": inputs,
'W': w},
outputs={"Outputs": outputs},
attrs={
"table_names": table_names,
"height_sections": height_sections,
"endpoints": endpoints,
"padding_idx": padding_idx,
"trainer_id": self.trainer_id
})
else:
raise ValueError(
"something wrong with distribute_transpiler, submit a issue is recommended"
)
for idx in used_ops[::-1]:
self.sparse_update_ops.pop(idx)
def _is_input_of_remote_sparse_update_op(self, param_name):
for op in self.sparse_update_ops:
if param_name in op.input_arg_names:
return True
return False
def transpile(self,
trainer_id,
program=None,
pservers="127.0.0.1:6174",
trainers=1,
sync_mode=True,
startup_program=None,
current_endpoint="127.0.0.1:6174"):
"""
Run the transpiler. Transpile the input program.
Args:
trainer_id (int): id for current trainer worker, if you have
n workers, the id may range from 0 ~ n-1
program (Program|None): program to transpile,
default is fluid.default_main_program().
startup_program (Program|None): startup_program to transpile,
default is fluid.default_startup_program().
pservers (str): comma separated ip:port string for the pserver
list.
trainers (int|str): in pserver mode this is the number of
trainers, in nccl2 mode this is a string of trainer
endpoints.
sync_mode (bool): Do sync training or not, default is True.
startup_program (Program|None): startup_program to transpile,
default is fluid.default_main_program().
current_endpoint (str): need pass current endpoint when
transpile as nccl2 distributed mode. In pserver mode
this argument is not used.
Examples:
.. code-block:: python
transpiler = fluid.DistributeTranspiler()
t.transpile(
trainer_id=0,
pservers="127.0.0.1:7000,127.0.0.1:7001",
trainers=2,
sync_mode=False,
current_endpoint="127.0.0.1:7000")
"""
if program is None:
program = default_main_program()
if startup_program is None:
startup_program = default_startup_program()
self.origin_program = program
self.startup_program = startup_program
self.origin_startup_program = self.startup_program.clone()
if self.config.mode == "nccl2":
assert (isinstance(trainers, str))
self.origin_program._trainers_endpoints = trainers.split(",")
self.origin_program._nccl_comm_num = self.config.nccl_comm_num
self.origin_program._use_hierarchical_allreduce = self.config.use_hierarchical_allreduce
# check use_hierarchical_allreduce options
if self.config.use_hierarchical_allreduce:
trainers_num = len(self.origin_program._trainers_endpoints)
# selected automaticly
if self.config.hierarchical_allreduce_inter_nranks <= 1:
self.config.hierarchical_allreduce_inter_nranks = core.get_cuda_device_count(
)
assert trainers_num > self.config.hierarchical_allreduce_inter_nranks, \
"trainers_num:{} < hierarchical_allreduce_inter_nranks:{}".format(trainers_num, self.config.hierarchical_allreduce_inter_nranks)
assert trainers_num % self.config.hierarchical_allreduce_inter_nranks == 0, \
"trainers_num:{} mod hierarchical_allreduce_inter_nranks:{} != 0".format(trainers_num, self.config.hierarchical_allreduce_inter_nranks)
self.origin_program._hierarchical_allreduce_inter_nranks = \
int(self.config.hierarchical_allreduce_inter_nranks)
self._transpile_nccl2(
trainer_id,
trainers,
current_endpoint,
startup_program=startup_program,
wait_port=self.config.wait_port)
return
if self.config.mode == "collective":
self._transpile_collective(
collective_mode=self.config.collective_mode,
trainer_id=trainer_id,
trainers=trainers,
current_endpoint=current_endpoint,
startup_program=startup_program,
main_program=program,
wait_port=self.config.wait_port)
return
self.trainer_num = trainers
self.sync_mode = sync_mode
self.trainer_id = trainer_id
pserver_endpoints = pservers.split(",")
self.pserver_endpoints = pserver_endpoints
self.vars_overview = VarsDistributed()
self.optimize_ops, self.params_grads = self._get_optimize_pass()
ps_dispatcher = self.config.split_method(self.pserver_endpoints)
self.table_name = find_distributed_lookup_table(self.origin_program)
self.has_distributed_lookup_table = self.table_name != None
self.param_name_to_grad_name = dict()
self.grad_name_to_param_name = dict()
for param_var, grad_var in self.params_grads:
self.param_name_to_grad_name[param_var.name] = grad_var.name
self.grad_name_to_param_name[grad_var.name] = param_var.name
# get all sparse update ops
self.sparse_update_ops = self._get_all_remote_sparse_update_op(
self.origin_program)
# use_sparse_update_param_name -> split_height_section
self.sparse_param_to_height_sections = dict()
# add distributed attrs to program
self.origin_program._is_distributed = True
self.origin_program._endpoints = self.pserver_endpoints
self.origin_program._ps_endpoint = current_endpoint
self.origin_program._is_chief = self.trainer_id == 0
self.origin_program._distributed_lookup_table = self.table_name if self.table_name else None
# split and create vars, then put splited vars in dicts for later use.
# step 1: split and create vars, then put splited vars in dicts for later use.
self._init_splited_vars()
# step 2: insert send op to send gradient vars to parameter servers
ps_dispatcher.reset()
send_vars = []
# in general cases, the number of pservers is times of 2, and this
# will lead to uneven distribution among weights and bias:
# fc_w@GRAD_trainer_0, fc_w@GRAD_trainer_1 --> pserver1
# fc_b@GRAD_trainer_0, fc_b@GRAD_trainer_1 --> pserver2
# shuffle the map will avoid the uneven distribution above
grad_var_mapping_items = list(six.iteritems(self.grad_var_mapping))
if not self.config.slice_var_up:
np.random.seed(self.origin_program.random_seed)
np.random.shuffle(grad_var_mapping_items)
self.grad_name_to_send_dummy_out = dict()
for grad_varname, splited_vars in grad_var_mapping_items:
eplist = ps_dispatcher.dispatch(splited_vars)
if not self.config.slice_var_up:
assert (len(splited_vars) == 1)
splited_grad_varname = grad_varname
if len(splited_vars) == 1:
splited_grad_varname = splited_vars[0].name
index = find_op_by_output_arg(
program.global_block(), splited_grad_varname, reverse=True)
elif len(splited_vars) > 1:
orig_var = program.global_block().vars[splited_grad_varname]
index = find_op_by_output_arg(
program.global_block(), splited_grad_varname, reverse=True)
if not self.config.runtime_split_send_recv:
self._insert_split_op(program, orig_var, index,
splited_vars)
index += 1
else:
AssertionError("Can not insert the send op by original "
"variable name :", splited_grad_varname)
if splited_vars[0].type == core.VarDesc.VarType.SELECTED_ROWS:
sparse_param_name = self.grad_name_to_param_name[grad_varname]
if self._is_input_of_remote_sparse_update_op(sparse_param_name):
self.sparse_param_to_height_sections[sparse_param_name] = [
splited_var.shape[0] for splited_var in splited_vars
]
dummy_output = program.global_block().create_var(
name=framework.generate_control_dev_var_name())
self.grad_name_to_send_dummy_out[grad_varname] = dummy_output
if self.config.runtime_split_send_recv:
send_input_vars = [
program.global_block().vars[splited_grad_varname]
]
sections = self._get_splited_var_sections(splited_vars)
send_varnames = [var.name for var in splited_vars]
else:
send_input_vars = splited_vars
sections = []
send_varnames = []
# get send op_role_var, if not splited, the grad should have .trainer suffix
# if splited, grad should be the original grad var name (split_by_ref and send
# will be on the same place). ParallelExecutor
# will use op_role_var to get expected device place to run this op.
program.global_block()._insert_op(
index=index + 1,
type="send",
inputs={"X": send_input_vars},
outputs={"Out": dummy_output},
attrs={
"epmap": eplist,
"sections": sections,
"send_varnames": send_varnames,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE,
OP_ROLE_VAR_ATTR_NAME: [
self.grad_name_to_param_name[grad_varname],
splited_grad_varname
]
})
for _, var in enumerate(splited_vars):
send_vars.append(var)
if self.sync_mode:
send_barrier_out = program.global_block().create_var(
name=framework.generate_control_dev_var_name())
if self.has_distributed_lookup_table:
self.grad_name_to_send_dummy_out[
self.table_name] = program.global_block().create_var(
name=framework.generate_control_dev_var_name())
input_deps = list(self.grad_name_to_send_dummy_out.values())
program.global_block().append_op(
type="send_barrier",
inputs={"X": list(input_deps)},
outputs={"Out": send_barrier_out},
attrs={
"endpoints": pserver_endpoints,
"trainer_id": self.trainer_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
# step 3: insert recv op to receive parameters from parameter server
recv_vars = []
for _, var in enumerate(send_vars):
recv_vars.append(self.grad_param_mapping[var])
ps_dispatcher.reset()
eplist = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eplist):
self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i])
self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i])
distributed_var = self.vars_overview.get_distributed_var_by_slice(
recv_vars[i].name)
distributed_var.endpoint = ep
need_sparse_update_params = {}
# step4: Concat the parameters splits together after recv.
all_recv_outputs = []
for param_varname, splited_var in six.iteritems(self.param_var_mapping):
eps = []
table_names = []
for var in splited_var:
index = [v.name for v in recv_vars].index(var.name)
eps.append(eplist[index])
table_names.append(var.name)
if self.sync_mode:
recv_dep_in = send_barrier_out
else:
# connect deps to send op in async mode
recv_dep_in = self.grad_name_to_send_dummy_out[
self.param_name_to_grad_name[param_varname]]
# get recv op_role_var, if not splited, the grad should have .trainer suffix
# if splited, grad should be the original grad var name. ParallelExecutor
# will use op_role_var to get expected device place to run this op.
orig_grad_name = self.param_name_to_grad_name[param_varname]
recv_op_role_var_name = orig_grad_name
splited_trainer_grad = self.grad_var_mapping[orig_grad_name]
if len(splited_trainer_grad) == 1:
recv_op_role_var_name = splited_trainer_grad[0].name
if param_varname in self.sparse_param_to_height_sections:
for table_name in table_names:
distributed_var = self.vars_overview.get_distributed_var_by_slice(
table_name)
distributed_var.vtype = "RemotePrefetch"
need_sparse_update_params[param_varname] = (eps, table_names)
else:
recv_varnames = []
if self.config.runtime_split_send_recv:
orig_param = program.global_block().vars[param_varname]
recv_varnames = [var.name for var in splited_var]
splited_var = [orig_param]
all_recv_outputs.extend(splited_var)
program.global_block().append_op(
type="recv",
inputs={"X": [recv_dep_in]},
outputs={"Out": splited_var},
attrs={
"epmap": eps,
"recv_varnames": recv_varnames,
"trainer_id": self.trainer_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE,
OP_ROLE_VAR_ATTR_NAME:
[param_varname, recv_op_role_var_name]
})
if self.sync_mode:
# form a WAW dependency
program.global_block().append_op(
type="fetch_barrier",
inputs={},
outputs={"Out": all_recv_outputs},
attrs={
"endpoints": pserver_endpoints,
"trainer_id": self.trainer_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
for param_varname, splited_var in six.iteritems(self.param_var_mapping):
if len(splited_var) <= 1:
continue
orig_param = program.global_block().vars[param_varname]
if param_varname not in self.sparse_param_to_height_sections:
if not self.config.runtime_split_send_recv:
program.global_block().append_op(
type="concat",
inputs={"X": splited_var},
outputs={"Out": [orig_param]},
attrs={
"axis": 0,
RPC_OP_ROLE_ATTR_NAME: DIST_OP_ROLE_ATTR_VALUE
})
self._update_remote_sparse_update_op(program,
need_sparse_update_params)
self._get_trainer_startup_program(recv_vars=recv_vars, eplist=eplist)
if self.has_distributed_lookup_table:
self._replace_lookup_table_op_with_prefetch(program,
pserver_endpoints)
self._split_table_grad_and_add_send_vars(program, pserver_endpoints)
self._get_distributed_optimizer_vars()
self.origin_program._parameters_on_pservers = self.vars_overview
def get_trainer_program(self, wait_port=True):
"""
Get transpiled trainer side program.
Returns:
Program: trainer side program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
#this is an example, find available endpoints in your case
pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174"
trainer_id = 0
trainers = 4
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, trainers=trainers, pservers=pserver_endpoints)
trainer_program = t.get_trainer_program()
"""
# remove optimize ops and add a send op to main_program
# FIXME(typhoonzero): Also ops like clip_gradient, lrn_decay?
lr_ops = self._get_lr_ops()
delete_ops(self.origin_program.global_block(), self.optimize_ops)
delete_ops(self.origin_program.global_block(), lr_ops)
# delete table init op
if self.has_distributed_lookup_table:
table_var = self.startup_program.global_block().vars[
self.table_name]
table_param_init_op = []
for op in self.startup_program.global_block().ops:
if self.table_name in op.output_arg_names:
table_param_init_op.append(op)
init_op_num = len(table_param_init_op)
if init_op_num != 1:
raise ValueError("table init op num should be 1, now is " + str(
init_op_num))
table_init_op = table_param_init_op[0]
self.startup_program.global_block().append_op(
type="fake_init",
inputs={},
outputs={"Out": table_var},
attrs={"shape": table_init_op.attr('shape')})
delete_ops(self.startup_program.global_block(), table_param_init_op)
self.origin_program.__str__()
if wait_port:
wait_server_ready(self.pserver_endpoints)
return self.origin_program
def _get_trainer_startup_program(self, recv_vars, eplist):
"""
Get transpiled trainer side startup program.
Args:
recv_vars (list): Variable list to recv for current trainer_id
eplist (list): A list of strings indicating
Returns:
Program: trainer side startup program.
"""
startup_program = self.startup_program
# FIXME(gongwb): delete not need ops.
# note that: some parameter is not trainable and those ops can't be deleted.
for varname, splited_var in six.iteritems(self.param_var_mapping):
# Get the eplist of recv vars
eps = []
for var in splited_var:
index = [v.name for v in recv_vars].index(var.name)
eps.append(eplist[index])
for var in splited_var:
if startup_program.global_block().has_var(var.name):
continue
startup_program.global_block().create_var(
name=var.name,
persistable=False,
type=var.type,
dtype=var.dtype,
shape=var.shape,
lod_level=var.lod_level)
op = startup_program.global_block().append_op(
type="recv",
inputs={"X": []},
outputs={"Out": splited_var},
attrs={
"epmap": eps,
"trainer_id": self.trainer_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
fetch_barrier_out = startup_program.global_block().create_var(
name=framework.generate_control_dev_var_name())
startup_program.global_block().append_op(
type="fetch_barrier",
inputs={},
outputs={"Out": fetch_barrier_out},
attrs={
"endpoints": self.pserver_endpoints,
"trainer_id": self.trainer_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
for varname, splited_var in six.iteritems(self.param_var_mapping):
# add concat ops to merge splited parameters received from parameter servers.
if len(splited_var) <= 1:
continue
# NOTE: if enable memory optimization, origin vars maybe removed.
if varname in startup_program.global_block().vars:
orig_param = startup_program.global_block().vars[varname]
else:
origin_param_var = self.origin_program.global_block().vars[
varname]
orig_param = startup_program.global_block().create_var(
name=varname,
persistable=origin_param_var.persistable,
type=origin_param_var.type,
dtype=origin_param_var.dtype,
shape=origin_param_var.shape)
startup_program.global_block().append_op(
type="concat",
inputs={"X": splited_var},
outputs={"Out": [orig_param]},
attrs={"axis": 0})
return startup_program
def get_pserver_program(self, endpoint):
"""
Get parameter server side program.
Args:
endpoint (str): current parameter server endpoint.
Returns:
Program: the program for current parameter server to run.
Examples:
.. code-block:: python
import paddle.fluid as fluid
#this is an example, find available endpoints in your case
pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174"
current_endpoint = "192.168.0.1:6174"
trainer_id = 0
trainers = 4
t = fluid.DistributeTranspiler()
t.transpile(
trainer_id, pservers=pserver_endpoints, trainers=trainers)
pserver_program = t.get_pserver_program(current_endpoint)
"""
# TODO(panyx0718): Revisit this assumption. what if #blocks > #pservers.
# NOTE: assume blocks of the same variable is not distributed
# on the same pserver, only change param/grad varnames for
# trainers to fetch.
sys.stderr.write(
"get_pserver_program() is deprecated, call get_pserver_programs() to get pserver main and startup in a single call.\n"
)
# step1
pserver_program = Program()
pserver_program.random_seed = self.origin_program.random_seed
pserver_program._copy_dist_param_info_from(self.origin_program)
# step2: Create vars to receive vars at parameter servers.
recv_inputs = []
for v in self.param_grad_ep_mapping[endpoint]["params"]:
self._clone_var(pserver_program.global_block(), v)
for v in self.param_grad_ep_mapping[endpoint]["grads"]:
# create vars for each trainer in global scope, so
# we don't need to create them when grad arrives.
# change client side var name to origin name by
# removing ".trainer_%d" suffix
suff_idx = v.name.find(".trainer_")
if suff_idx >= 0:
orig_var_name = v.name[:suff_idx]
else:
orig_var_name = v.name
# NOTE: single_trainer_var must be created for multi-trainer
# case to merge grads from multiple trainers
single_trainer_var = \
pserver_program.global_block().create_var(
name=orig_var_name,
persistable=True,
type=v.type,
dtype=v.dtype,
shape=v.shape)
if self.sync_mode and self.trainer_num > 1:
for trainer_id in range(self.trainer_num):
var = pserver_program.global_block().create_var(
name="%s.trainer_%d" % (orig_var_name, trainer_id),
persistable=False,
type=v.type,
dtype=v.dtype,
shape=v.shape)
recv_inputs.append(var)
else:
recv_inputs.append(single_trainer_var)
# step 3
# Create a union-find data structure from optimize ops,
# If two ops are connected, we could add these two ops
# into one set.
ufind = self._create_ufind(self.optimize_ops)
# step 3.2
# Iterate through the ops and append optimize op which
# located on current pserver
opt_op_on_pserver = []
for _, op in enumerate(self.optimize_ops):
if self._is_optimizer_op(op) and self._is_opt_op_on_pserver(
endpoint, op):
opt_op_on_pserver.append(op)
# step 3.3
# prepare if dc asgd is enabled
if self.config.enable_dc_asgd == True:
assert (self.sync_mode == False)
self.param_bak_list = []
# add param_bak for each trainer
for p in self.param_grad_ep_mapping[endpoint]["params"]:
# each parameter should have w_bak for each trainer id
for i in range(self.trainer_num):
param_bak_name = "%s.trainer_%d_bak" % (p.name, i)
tmpvar = pserver_program.global_block().create_var(
# NOTE: this var name format is used in `request_get_handler`
name=param_bak_name,
type=p.type,
shape=p.shape,
dtype=p.dtype)
self.param_bak_list.append((p, tmpvar))
# step 3.4
# Iterate through the ops, and if an op and the optimize ops
# which located on current pserver are in one set, then
# append it into the sub program.
global_ops = []
# sparse grad name to param name
sparse_grad_to_param = []
def __append_optimize_op__(op, block, grad_to_block_id, merged_var,
lr_ops):
if self._is_optimizer_op(op):
self._append_pserver_ops(block, op, endpoint, grad_to_block_id,
self.origin_program, merged_var,
sparse_grad_to_param)
elif op not in lr_ops:
self._append_pserver_non_opt_ops(block, op)
def __clone_lr_op_sub_block__(op, program, lr_block):
if not op.has_attr('sub_block'):
return
origin_block_desc = op.attr('sub_block')
origin_block = self.origin_program.block(origin_block_desc.id)
assert isinstance(origin_block, Block)
# we put the new sub block to new block to follow the block
# hierarchy of the original blocks
new_sub_block = program._create_block(lr_block.idx)
# clone vars
for var in origin_block.vars:
new_sub_block._clone_variable(var)
# clone ops
for origin_op in origin_block.ops:
cloned_op = self._clone_lr_op(program, new_sub_block, origin_op)
# clone sub_block of op
__clone_lr_op_sub_block__(cloned_op, program, new_sub_block)
# reset the block of op
op._set_attr('sub_block', new_sub_block)
# append lr decay ops to the child block if exists
lr_ops = self._get_lr_ops()
# record optimize blocks and we can run them on pserver parallel
optimize_blocks = []
if len(lr_ops) > 0:
lr_decay_block = pserver_program._create_block(
pserver_program.num_blocks - 1)
optimize_blocks.append(lr_decay_block)
for _, op in enumerate(lr_ops):
cloned_op = self._append_pserver_non_opt_ops(lr_decay_block, op)
# append sub blocks to pserver_program in lr_decay_op
__clone_lr_op_sub_block__(cloned_op, pserver_program,
lr_decay_block)
# append op to the current block
grad_to_block_id = []
pre_block_idx = pserver_program.num_blocks - 1
for idx, opt_op in enumerate(opt_op_on_pserver):
per_opt_block = pserver_program._create_block(pre_block_idx)
optimize_blocks.append(per_opt_block)
optimize_target_param_name = opt_op.attr(OP_ROLE_VAR_ATTR_NAME)[0]
# append grad merging ops before clip and weight decay
# e.g. merge grad -> L2Decay op -> clip op -> optimize
merged_var = None
for _, op in enumerate(self.optimize_ops):
# find the origin grad var before clipping/L2Decay,
# merged_var should be the input var name of L2Decay
grad_varname_for_block = op.attr(OP_ROLE_VAR_ATTR_NAME)[1]
if op.attr(OP_ROLE_VAR_ATTR_NAME)[
0] == optimize_target_param_name:
merged_var = self._append_pserver_grad_merge_ops(
per_opt_block, grad_varname_for_block, endpoint,
grad_to_block_id, self.origin_program)
if merged_var:
break # append optimize op once then append other ops.
if merged_var:
for _, op in enumerate(self.optimize_ops):
# optimizer is connected to itself
if op.attr(OP_ROLE_VAR_ATTR_NAME)[0] == optimize_target_param_name and \
op not in global_ops:
log("append opt op: ", op.type, op.input_arg_names,
merged_var)
__append_optimize_op__(op, per_opt_block,
grad_to_block_id, merged_var,
lr_ops)
# dedup grad to ids list
grad_to_block_id = list(set(grad_to_block_id))
# append global ops
if global_ops:
opt_state_block = pserver_program._create_block(
pserver_program.num_blocks - 1)
optimize_blocks.append(opt_state_block)
for glb_op in global_ops:
__append_optimize_op__(glb_op, opt_state_block,
grad_to_block_id, None, lr_ops)
# process distributed lookup_table
prefetch_var_name_to_block_id = []
if self.has_distributed_lookup_table:
pserver_index = self.pserver_endpoints.index(endpoint)
table_opt_block = self._create_table_optimize_block(
pserver_index, pserver_program, pre_block_idx, grad_to_block_id)
optimize_blocks.append(table_opt_block)
lookup_table_var_name_to_block_id = self._create_prefetch_block(
pserver_index, pserver_program, table_opt_block)
checkpoint_block_id = self._create_checkpoint_save_block(
pserver_program, table_opt_block.idx)
pserver_program._distributed_lookup_table = self.table_name
prefetch_var_name_to_block_id.extend(
lookup_table_var_name_to_block_id)
if len(optimize_blocks) == 0:
logging.warn("pserver [" + str(endpoint) +
"] has no optimize block!!")
pre_block_idx = pserver_program.num_blocks - 1
empty_block = pserver_program._create_block(pre_block_idx)
optimize_blocks.append(empty_block)
# In some case, some parameter server will have no parameter to optimize
# So we give an empty optimize block to parameter server.
attrs = {
"optimize_blocks": optimize_blocks,
"endpoint": endpoint,
"Fanin": self.trainer_num,
"sync_mode": self.sync_mode,
"grad_to_block_id": grad_to_block_id,
"sparse_grad_to_param": sparse_grad_to_param,
}
if self.has_distributed_lookup_table:
attrs['checkpint_block_id'] = checkpoint_block_id
if self.config.enable_dc_asgd:
attrs['dc_asgd'] = True
if len(prefetch_var_name_to_block_id) > 0:
attrs[
'prefetch_var_name_to_block_id'] = prefetch_var_name_to_block_id
# step5 append the listen_and_serv op
pserver_program.global_block().append_op(
type="listen_and_serv",
inputs={'X': recv_inputs},
outputs={},
attrs=attrs)
pserver_program._sync_with_cpp()
# save pserver program to generate pserver side startup relatively.
self.pserver_program = pserver_program
return pserver_program
def get_pserver_programs(self, endpoint):
"""
Get pserver side main program and startup program for distributed training.
Args:
endpoint (str): current pserver endpoint.
Returns:
tuple: (main_program, startup_program), of type "Program"
Examples:
.. code-block:: python
import paddle.fluid as fluid
#this is an example, find available endpoints in your case
pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174"
current_endpoint = "192.168.0.1:6174"
trainer_id = 0
trainers = 4
t = fluid.DistributeTranspiler()
t.transpile(
trainer_id, pservers=pserver_endpoints, trainers=trainers)
pserver_program, pserver_startup_program = t.get_pserver_programs(current_endpoint)
"""
pserver_prog = self.get_pserver_program(endpoint)
pserver_startup = self.get_startup_program(
endpoint, pserver_program=pserver_prog)
return pserver_prog, pserver_startup
def get_startup_program(self,
endpoint,
pserver_program=None,
startup_program=None):
"""
**Deprecated**
Get startup program for current parameter server.
Modify operator input variables if there are variables that
were split to several blocks.
Args:
endpoint (str): current pserver endpoint.
pserver_program (Program): deprecated, call get_pserver_program first.
startup_program (Program): deprecated, should pass startup_program
when initalizing
Returns:
Program: parameter server side startup program.
Examples:
.. code-block:: python
pserver_endpoints = "192.168.0.1:6174,192.168.0.2:6174"
trainer_endpoints = "192.168.0.1:6174,192.168.0.2:6174"
current_endpoint = "192.168.0.1:6174"
trainer_id = 0
trainers = 4
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
pserver_program = t.get_pserver_program(current_endpoint)
pserver_startup_program = t.get_startup_program(current_endpoint,
pserver_program)
"""
s_prog = Program()
orig_s_prog = self.startup_program
s_prog.random_seed = orig_s_prog.random_seed
params = self.param_grad_ep_mapping[endpoint]["params"]
def _get_splited_name_and_shape(varname):
for idx, splited_param in enumerate(params):
pname = splited_param.name
if same_or_split_var(pname, varname) and varname != pname:
return pname, splited_param.shape
return "", []
# 1. create vars in pserver program to startup program
pserver_vars = pserver_program.global_block().vars
created_var_map = collections.OrderedDict()
for _, var in six.iteritems(pserver_vars):
tmpvar = s_prog.global_block()._clone_variable(var)
created_var_map[var.name] = tmpvar
# 2. rename op outputs
for op in orig_s_prog.global_block().ops:
new_outputs = collections.OrderedDict()
# do not append startup op if var is not on this pserver
op_on_pserver = False
# TODO(gongwb): remove this line.
if op.type not in ["recv", "fetch_barrier", "concat"]:
for key in op.output_names:
newname, _ = _get_splited_name_and_shape(op.output(key)[0])
if newname:
op_on_pserver = True
new_outputs[key] = created_var_map[newname]
elif op.output(key)[0] in pserver_vars:
op_on_pserver = True
new_outputs[key] = pserver_vars[op.output(key)[0]]
if op_on_pserver:
# most startup program ops have no inputs
new_inputs = self._get_input_map_from_op(pserver_vars, op)
if op.type in [
"gaussian_random", "fill_constant", "uniform_random",
"truncated_gaussian_random"
]:
op._set_attr("shape", list(new_outputs["Out"].shape))
s_prog.global_block().append_op(
type=op.type,
inputs=new_inputs,
outputs=new_outputs,
attrs=op.all_attrs())
if self.config.enable_dc_asgd:
for p, p_bak in self.param_bak_list:
startup_param_var = s_prog.global_block().vars[p.name]
startup_tmpvar = s_prog.global_block().vars[p_bak.name]
# copy init random value to param_bak
s_prog.global_block().append_op(
type="assign",
inputs={"X": startup_param_var},
outputs={"Out": startup_tmpvar})
return s_prog
# ====================== private transpiler functions =====================
def _get_slice_var_info(self, slice_var):
block_suffix = "block"
block_idx = 0
offset = 0
is_slice = False
orig_var_name, block_name, _ = self._get_varname_parts(slice_var.name)
if not block_name:
return is_slice, block_idx, offset
block_idx = int(block_name.split(block_suffix)[1])
skip_dim0 = 0
slice_vars = self.param_var_mapping[orig_var_name]
orig_dim1_flatten = 1
if len(slice_vars[0].shape) >= 2:
orig_dim1_flatten = reduce(lambda x, y: x * y,
slice_vars[0].shape[1:])
for slice_var in slice_vars[:block_idx]:
skip_dim0 += slice_var.shape[0]
offset = skip_dim0 * orig_dim1_flatten
is_slice = True
return is_slice, block_idx, offset
def _get_distributed_optimizer_vars(self):
def _get_distributed_optimizer_var(endpoint):
opt_op_on_pserver = []
for _, op in enumerate(self.optimize_ops):
if self._is_optimizer_op(op) and self._is_opt_op_on_pserver(
endpoint, op):
opt_op_on_pserver.append(op)
for opt_op in opt_op_on_pserver:
dist_var = None
for key in opt_op.input_names:
if key == "Param":
param_name = opt_op.input(key)[0]
dist_var = self.vars_overview.get_distributed_var_by_origin_and_ep(
param_name, endpoint)
break
for key in opt_op.input_names:
if key in ["Param", "Grad", "LearningRate"]:
continue
origin_var = self.origin_program.global_block().vars[
opt_op.input(key)[0]]
# update accumulator variable shape
new_shape = self._get_optimizer_input_shape(
opt_op.type, key, origin_var.shape,
dist_var.slice.shape)
if new_shape == dist_var.slice.shape:
splited_var = VarStruct(
name=origin_var.name,
shape=new_shape,
dtype=origin_var.dtype,
type=origin_var.type,
lod_level=origin_var.lod_level,
persistable=origin_var.persistable)
self.vars_overview.add_distributed_var(
origin_var=origin_var,
slice_var=splited_var,
is_slice=dist_var.is_slice,
block_id=dist_var.block_id,
offset=dist_var.offset,
vtype="Optimizer",
endpoint=endpoint)
else:
self.vars_overview.add_distributed_var(
origin_var=origin_var,
slice_var=origin_var,
is_slice=False,
block_id=0,
offset=0,
vtype="Optimizer",
endpoint=endpoint)
for ep in self.pserver_endpoints:
_get_distributed_optimizer_var(ep)
def _update_dist_lookup_table_vars(self, param_list, grad_list,
params_grads):
# TODO(wuyi): put find a way to put dist lookup table stuff all together.
# update self.table_param_grad and self.trainer_side_table_grad_list
program = self.origin_program
if self.has_distributed_lookup_table:
param_list = [
param for param in param_list if param.name != self.table_name
]
grad_list = [
grad for grad in grad_list
if grad.name != grad_var_name(self.table_name)
]
self.table_param_grad = [
param_grad for param_grad in params_grads
if param_grad[0].name == self.table_name
][0]
table_grad_var = self.table_param_grad[1]
if self.sync_mode:
self.trainer_side_table_grad_list = [
program.global_block().create_var(
name="%s.trainer_%d.pserver_%d" %
(table_grad_var.name, self.trainer_id, index),
type=table_grad_var.type,
shape=table_grad_var.shape,
dtype=table_grad_var.dtype)
for index in range(len(self.pserver_endpoints))
]
else:
self.trainer_side_table_grad_list = [
program.global_block().create_var(
name="%s.pserver_%d" % (table_grad_var.name, index),
type=table_grad_var.type,
shape=table_grad_var.shape,
dtype=table_grad_var.dtype)
for index in range(len(self.pserver_endpoints))
]
return param_list, grad_list
def _init_splited_vars(self):
# update these mappings for further transpile:
# 1. param_var_mapping: param var name -> [splited params vars]
# 2. grad_var_mapping: grad var name -> [splited grads vars]
# 3. grad_param_mapping: grad.blockx -> param.blockx
# 4. param_grad_ep_mapping: ep -> {"params": [], "grads": []}
param_list = []
grad_list = []
param_grad_set = set()
for p, g in self.params_grads:
# skip parameter marked not trainable
if type(p) == Parameter and p.trainable == False:
continue
if p.name not in param_grad_set:
param_list.append(p)
param_grad_set.add(p.name)
if g.name not in param_grad_set:
grad_list.append(g)
param_grad_set.add(g.name)
param_list, grad_list = self._update_dist_lookup_table_vars(
param_list, grad_list, self.params_grads)
if self.config.slice_var_up:
# when we slice var up into blocks, we will slice the var according to
# pserver services' count. A pserver may have two or more listening ports.
grad_blocks = slice_variable(grad_list,
len(self.pserver_endpoints),
self.config.min_block_size)
param_blocks = slice_variable(param_list,
len(self.pserver_endpoints),
self.config.min_block_size)
else:
# when we do NOT slice var up into blocks, we will always slice params
# grads into one block.
grad_blocks = slice_variable(grad_list, 1,
self.config.min_block_size)
param_blocks = slice_variable(param_list, 1,
self.config.min_block_size)
assert (len(grad_blocks) == len(param_blocks))
# origin_param_name -> [splited_param_vars]
self.param_var_mapping = self._create_vars_from_blocklist(
self.origin_program, param_blocks)
for orig_name, splited_vars in self.param_var_mapping.items():
orig_var = self.origin_program.global_block().var(orig_name)
for splited_var in splited_vars:
is_slice, block_id, offset = self._get_slice_var_info(
splited_var)
self.vars_overview.add_distributed_var(
origin_var=orig_var,
slice_var=splited_var,
block_id=block_id,
offset=offset,
is_slice=is_slice,
vtype="Param")
# origin_grad_name -> [splited_grad_vars]
self.grad_var_mapping = self._create_vars_from_blocklist(
self.origin_program,
grad_blocks,
add_trainer_suffix=self.trainer_num > 1)
# dict(grad_splited_var -> param_splited_var)
self.grad_param_mapping = collections.OrderedDict()
for g, p in zip(grad_blocks, param_blocks):
g_name, g_bid, _ = g.split(":")
p_name, p_bid, _ = p.split(":")
self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \
self.param_var_mapping[p_name][int(p_bid)]
# create mapping of endpoint -> split var to create pserver side program
self.param_grad_ep_mapping = collections.OrderedDict()
[
self.param_grad_ep_mapping.update({
ep: {
"params": [],
"grads": []
}
}) for ep in self.pserver_endpoints
]
# transpiler function for dis lookup_table
def _replace_lookup_table_op_with_prefetch(self, program,
pserver_endpoints):
# 1. replace lookup_table_op with split_ids_op -> prefetch_op -> sum_op
self.all_in_ids_vars = []
self.all_prefetch_input_vars = []
self.all_prefetch_output_vars = []
self.all_out_emb_vars = []
lookup_table_op_index = -1
continue_search_lookup_table_op = True
while continue_search_lookup_table_op:
continue_search_lookup_table_op = False
all_ops = program.global_block().ops
for op in all_ops:
if op.type == LOOKUP_TABLE_TYPE and self.table_name == op.input(
"W")[0]:
if not op.attr('is_distributed'):
raise RuntimeError(
"lookup_table_op that lookup an distributed embedding table"
"should set is_distributed to true")
continue_search_lookup_table_op = True
lookup_table_op_index = lookup_table_op_index if lookup_table_op_index != -1 else list(
all_ops).index(op)
ids_name = op.input("Ids")
out_name = op.output("Out")
ids_var = program.global_block().vars[ids_name[0]]
self.all_in_ids_vars.append(ids_var)
out_var = program.global_block().vars[out_name[0]]
self.all_out_emb_vars.append(out_var)
# delete lookup_table_op
delete_ops(program.global_block(), [op])
# break for loop
break
for index in range(len(self.pserver_endpoints)):
in_var = program.global_block().create_var(
name=str("prefetch_compress_in_tmp_" + str(index)),
type=self.all_in_ids_vars[0].type,
shape=self.all_in_ids_vars[0].shape,
dtype=self.all_in_ids_vars[0].dtype)
self.all_prefetch_input_vars.append(in_var)
out_var = program.global_block().create_var(
name=str("prefetch_compress_out_tmp_" + str(index)),
type=self.all_out_emb_vars[0].type,
shape=self.all_out_emb_vars[0].shape,
dtype=self.all_out_emb_vars[0].dtype)
self.all_prefetch_output_vars.append(out_var)
# insert split_ids_op
program.global_block()._insert_op(
index=lookup_table_op_index,
type="split_ids",
inputs={'Ids': self.all_in_ids_vars},
outputs={"Out": self.all_prefetch_input_vars})
# insert prefetch_op
program.global_block()._insert_op(
index=lookup_table_op_index + 1,
type="prefetch",
inputs={'X': self.all_prefetch_input_vars},
outputs={"Out": self.all_prefetch_output_vars},
attrs={
"epmap": pserver_endpoints,
# FIXME(qiao) temporarily disable this config because prefetch
# is not act as other rpc op, it's more like a forward op
# RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
})
# insert concat_op
program.global_block()._insert_op(
index=lookup_table_op_index + 2,
type="merge_ids",
inputs={
'Ids': self.all_in_ids_vars,
'Rows': self.all_prefetch_input_vars,
'X': self.all_prefetch_output_vars
},
outputs={"Out": self.all_out_emb_vars})
def _split_table_grad_and_add_send_vars(self, program, pserver_endpoints):
# 2. add split_ids_op and send_op to send gradient to pservers
# there should only be one table_name
all_ops = program.global_block().ops
table_grad_name = grad_var_name(self.table_name)
for op in all_ops:
if table_grad_name in op.output_arg_names:
op_index = list(all_ops).index(op)
# insert split_ids_op
program.global_block()._insert_op(
index=op_index + 1,
type="split_ids",
inputs={
'Ids': [program.global_block().vars[table_grad_name]]
},
outputs={"Out": self.trainer_side_table_grad_list},
attrs={RPC_OP_ROLE_ATTR_NAME: DIST_OP_ROLE_ATTR_VALUE})
program.global_block()._insert_op(
index=op_index + 2,
type="send",
inputs={'X': self.trainer_side_table_grad_list},
outputs={
'Out':
[self.grad_name_to_send_dummy_out[self.table_name]]
if self.sync_mode else []
},
attrs={
"epmap": pserver_endpoints,
"trainer_id": self.trainer_id,
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE,
OP_ROLE_VAR_ATTR_NAME: [
self.grad_name_to_param_name[table_grad_name],
table_grad_name
]
})
break
def _create_prefetch_block(self, pserver_index, pserver_program,
optimize_block):
# STEP: create prefetch block
table_var = pserver_program.global_block().vars[self.table_name]
prefetch_var_name_to_block_id = []
prefetch_block = pserver_program._create_block(optimize_block.idx)
trainer_ids = self.all_prefetch_input_vars[pserver_index]
pserver_ids = pserver_program.global_block().create_var(
name=trainer_ids.name,
type=trainer_ids.type,
shape=trainer_ids.shape,
dtype=trainer_ids.dtype)
trainer_out = self.all_prefetch_output_vars[pserver_index]
pserver_out = pserver_program.global_block().create_var(
name=trainer_out.name,
type=trainer_out.type,
shape=trainer_out.shape,
dtype=trainer_out.dtype)
prefetch_block.append_op(
type="lookup_sparse_table",
inputs={'Ids': pserver_ids,
"W": table_var},
outputs={"Out": pserver_out},
attrs={
"is_sparse": True, # has no effect on lookup_table op
"is_distributed": True,
"padding_idx": -1
})
prefetch_var_name_to_block_id.append(trainer_ids.name + ":" + str(
prefetch_block.idx))
return prefetch_var_name_to_block_id
def _create_table_optimize_block(self, pserver_index, pserver_program,
pre_block_idx, grad_to_block_id):
# STEP: create table optimize block
table_opt_block = pserver_program._create_block(pre_block_idx)
# create table param and grad var in pserver program
# create table optimize block in pserver program
table_opt_op = [
op for op in self.optimize_ops
if 'Param' in op.input_names and op.input("Param")[0] ==
self.table_name
][0]
origin_param_var = self.origin_program.global_block().vars[
self.table_name]
zero_dim = int(
math.ceil(origin_param_var.shape[0] / float(
len(self.pserver_endpoints))))
table_shape = list(origin_param_var.shape)
table_shape[0] = zero_dim
param_var = pserver_program.global_block().create_var(
name=origin_param_var.name,
shape=table_shape,
dtype=origin_param_var.dtype,
type=core.VarDesc.VarType.SELECTED_ROWS,
persistable=True)
# parameter must be selected rows
param_var.desc.set_type(core.VarDesc.VarType.SELECTED_ROWS)
grad_var = pserver_program.global_block()._clone_variable(
self.origin_program.global_block().vars[grad_var_name(
self.table_name)])
lr_var = pserver_program.global_block()._clone_variable(
self.origin_program.global_block().vars[table_opt_op.input(
"LearningRate")[0]])
if self.sync_mode:
# create grad vars in pserver program
table_grad_var = self.table_param_grad[1]
pserver_side_table_grad_list = [
pserver_program.global_block().create_var(
name="%s.trainer_%d.pserver_%d" %
(table_grad_var.name, index, pserver_index),
type=table_grad_var.type,
shape=table_grad_var.shape,
dtype=table_grad_var.dtype)
for index in range(self.trainer_num)
]
# append sum op for pserver_side_table_grad_list
table_opt_block.append_op(
type="sum",
inputs={"X": pserver_side_table_grad_list},
outputs={"Out": [grad_var]},
attrs={"use_mkldnn": False})
else:
# in async_mode, for table gradient, it also need to be splited to each parameter server
origin_grad_name = grad_var.name
splited_grad_name = self.trainer_side_table_grad_list[
pserver_index].name
if not splited_grad_name.startswith(origin_grad_name):
raise ValueError("origin_grad_var: " + splited_grad_name +
" grad_var:" + grad_var.name)
grad_var = pserver_program.global_block()._rename_var(
origin_grad_name, splited_grad_name)
inputs = {
"Param": [param_var],
"Grad": [grad_var],
"LearningRate": [lr_var]
}
outputs = {"ParamOut": [param_var]}
# only support sgd now
logging.warn(
"distribute lookup table only support sgd optimizer, change it's optimizer to sgd instead of "
+ table_opt_op.type)
table_opt_block.append_op(type="sgd", inputs=inputs, outputs=outputs)
# add table parameter gradient and it's block id to grad_to_block_id
grad_to_block_id.append(grad_var.name + ":" + str(table_opt_block.idx))
return table_opt_block
def _create_checkpoint_save_block(self, pserver_program, pre_block_idx):
"""
create a new block to handle save checkpoint.
"""
pserver_program.global_block().create_var(
name="kLookupTablePath",
persistable=True,
type=core.VarDesc.VarType.RAW)
checkpoint_save_block = pserver_program._create_block(pre_block_idx)
# this 'file_path' do not be used in save lookup table variable
checkpoint_save_block.append_op(
type='save',
inputs={'X': [self.table_name]},
outputs={},
attrs={'file_path': "none"})
return checkpoint_save_block.idx
def _create_vars_from_blocklist(self,
program,
block_list,
add_trainer_suffix=False):
"""
Create vars for each split.
NOTE: only grads need to be named for different trainers, use
add_trainer_suffix to rename the grad vars.
Args:
program (ProgramDesc): ProgramDesc which gradients blong.
block_list (list[(varname, block_id, block_size)]): List of gradient blocks.
add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True.
Returns:
var_mapping (collections.OrderedDict(varname->[new_varname_variable])):A dict mapping
from original var name to each var split.
"""
# varname->[(block_id, current_block_size)]
block_map = collections.OrderedDict()
var_mapping = collections.OrderedDict()
for block_str in block_list:
varname, offset, size = block_str.split(":")
if varname not in block_map:
block_map[varname] = []
block_map[varname].append((int(offset), int(size)))
for varname, splited in six.iteritems(block_map):
orig_var = program.global_block().var(varname)
if len(splited) == 1:
if self.sync_mode and add_trainer_suffix:
new_var_name = "%s.trainer_%d" % \
(orig_var.name, self.trainer_id)
program.global_block()._rename_var(varname, new_var_name)
var_mapping[varname] = \
[program.global_block().var(new_var_name)]
else:
var_mapping[varname] = \
[program.global_block().var(orig_var.name)]
continue
var_mapping[varname] = []
orig_shape = orig_var.shape
orig_dim1_flatten = 1
if len(orig_shape) >= 2:
orig_dim1_flatten = reduce(lambda x, y: x * y, orig_shape[1:])
for i, block in enumerate(splited):
size = block[1]
rows = size // orig_dim1_flatten
splited_shape = [rows]
if len(orig_shape) >= 2:
splited_shape.extend(orig_shape[1:])
new_var_name = ""
if self.sync_mode and add_trainer_suffix:
new_var_name = "%s.block%d.trainer_%d" % \
(varname, i, self.trainer_id)
else:
new_var_name = "%s.block%d" % \
(varname, i)
var = program.global_block().create_var(
name=new_var_name,
persistable=False,
dtype=orig_var.dtype,
type=orig_var.type,
shape=splited_shape) # flattend splited var
var_mapping[varname].append(var)
program.global_block()._sync_with_cpp()
return var_mapping
def _clone_var(self, block, var, persistable=True):
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=persistable)
@staticmethod
def _get_splited_var_sections(splited_vars):
height_sections = []
for v in splited_vars:
height_sections.append(v.shape[0])
return height_sections
def _insert_split_op(self, program, orig_var, index, splited_vars):
height_sections = self._get_splited_var_sections(splited_vars)
if orig_var.type == core.VarDesc.VarType.SELECTED_ROWS:
sparse_param_name = self.grad_name_to_param_name[orig_var.name]
if self._is_input_of_remote_sparse_update_op(sparse_param_name):
self.sparse_param_to_height_sections[
sparse_param_name] = height_sections
program.global_block()._insert_op(
index=index + 1,
type="split_selected_rows",
inputs={"X": orig_var},
outputs={"Out": splited_vars},
attrs={
"height_sections": height_sections,
RPC_OP_ROLE_ATTR_NAME: DIST_OP_ROLE_ATTR_VALUE
})
elif orig_var.type == core.VarDesc.VarType.LOD_TENSOR:
program.global_block()._insert_op(
index=index + 1,
type="split_byref",
inputs={"X": orig_var},
outputs={"Out": splited_vars},
attrs={
"sections": height_sections,
RPC_OP_ROLE_ATTR_NAME: DIST_OP_ROLE_ATTR_VALUE
})
else:
AssertionError("Variable type should be in set "
"[LOD_TENSOR, SELECTED_ROWS]")
def _get_optimizer_input_shape(self, op_type, varkey, orig_shape,
param_shape):
"""
Returns the shape for optimizer inputs that need to be reshaped when
Param and Grad is split to multiple servers.
"""
# HACK(typhoonzero): Should use functions of corresponding optimizer in
# optimizer.py to get the shape, do not bind this in the transpiler.
if op_type == "adam":
if varkey in ["Moment1", "Moment2"]:
return param_shape
elif op_type == "adagrad":
if varkey == "Moment":
return param_shape
elif op_type == "adamax":
if varkey in ["Moment", "InfNorm"]:
return param_shape
elif op_type in ["momentum", "lars_momentum"]:
if varkey == "Velocity":
return param_shape
elif op_type == "rmsprop":
if varkey in ["Moment", "MeanSquare"]:
return param_shape
elif op_type == "decayed_adagrad":
if varkey == "Moment":
return param_shape
elif op_type == "ftrl":
if varkey in ["SquaredAccumulator", "LinearAccumulator"]:
return param_shape
elif op_type == "sgd":
pass
else:
raise ValueError(
"Not supported optimizer for distributed training: %s" %
op_type)
return orig_shape
def _get_varname_parts(self, varname):
# returns origin, blockid, trainerid
orig_var_name = ""
trainer_part = ""
block_part = ""
trainer_idx = varname.find(".trainer_")
if trainer_idx >= 0:
trainer_part = varname[trainer_idx + 1:]
else:
trainer_idx = len(varname)
block_index = varname.find(".block")
if block_index >= 0:
block_part = varname[block_index + 1:trainer_idx]
else:
block_index = len(varname)
orig_var_name = varname[0:min(block_index, trainer_idx)]
return orig_var_name, block_part, trainer_part
def _orig_varname(self, varname):
orig, _, _ = self._get_varname_parts(varname)
return orig
def _append_pserver_grad_merge_ops(self, optimize_block,
grad_varname_for_block, endpoint,
grad_to_block_id, origin_program):
program = optimize_block.program
pserver_block = program.global_block()
grad_block = None
for g in self.param_grad_ep_mapping[endpoint]["grads"]:
if self._orig_varname(g.name) == \
self._orig_varname(grad_varname_for_block):
grad_block = g
break
if not grad_block:
# do not append this op if current endpoint
# is not dealing with this grad block
return None
orig_varname, block_name, trainer_name = self._get_varname_parts(
grad_block.name)
if block_name:
merged_var_name = '.'.join([orig_varname, block_name])
else:
merged_var_name = orig_varname
merged_var = pserver_block.vars[merged_var_name]
grad_to_block_id.append(merged_var.name + ":" + str(optimize_block.idx))
if self.sync_mode and self.trainer_num > 1:
vars2merge = []
for i in range(self.trainer_num):
per_trainer_name = "%s.trainer_%d" % \
(merged_var_name, i)
vars2merge.append(pserver_block.vars[per_trainer_name])
optimize_block.append_op(
type="sum",
inputs={"X": vars2merge},
outputs={"Out": merged_var},
attrs={"use_mkldnn": False})
optimize_block.append_op(
type="scale",
inputs={"X": merged_var},
outputs={"Out": merged_var},
attrs={"scale": 1.0 / float(self.trainer_num)})
return merged_var
def _append_dc_asgd_ops(self, block, param_var, grad_var):
# NOTE: can not use grammar candy here, should put ops in specific block
local_param_bak = block.create_var(
name="%s.local_bak" % param_var.name,
shape=param_var.shape,
type=param_var.type,
dtype=param_var.dtype,
persistable=False)
# trainer_id_var is block local
trainer_id_var = block.create_var(
name="@TRAINER_ID@",
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=core.VarDesc.VarType.INT64,
shape=[1],
persistable=False)
# ref_inputs = [x[1] for x in self.param_bak_list]
ref_inputs = []
for p, p_bak in self.param_bak_list:
if p.name == param_var.name:
ref_inputs.append(p_bak)
block.append_op(
type="ref_by_trainer_id",
inputs={"X": ref_inputs,
"TrainerId": trainer_id_var},
outputs={"Out": local_param_bak})
def __create_temp_var__():
return block.create_var(
name=unique_name.generate("tmp_dc_output"),
shape=param_var.shape,
type=param_var.type,
dtype=param_var.dtype,
persistable=False)
o1 = __create_temp_var__()
block.append_op(
type="elementwise_sub",
inputs={"X": param_var,
"Y": local_param_bak},
outputs={"Out": o1})
o2 = __create_temp_var__()
block.append_op(
type="elementwise_mul",
inputs={"X": o1,
"Y": grad_var},
outputs={"Out": o2})
o3 = __create_temp_var__()
block.append_op(
type="elementwise_mul",
inputs={"X": o2,
"Y": grad_var},
outputs={"Out": o3})
# TODO(typhoonzero): append scale
o4 = __create_temp_var__()
block.append_op(
type="elementwise_add",
inputs={"X": grad_var,
"Y": o3},
outputs={"Out": o4})
return o4
def _append_pserver_ops(self, optimize_block, opt_op, endpoint,
grad_to_block_id, origin_program, merged_var,
sparse_grad_to_param):
program = optimize_block.program
pserver_block = program.global_block()
new_inputs = collections.OrderedDict()
def _get_param_block(opt_op):
# param is already created on global program
param_block = None
for p in self.param_grad_ep_mapping[endpoint]["params"]:
if same_or_split_var(p.name, opt_op.input("Param")[0]):
param_block = p
break
return param_block
if self.config.enable_dc_asgd:
param_var = _get_param_block(opt_op)
dc = self._append_dc_asgd_ops(optimize_block, param_var, merged_var)
for key in opt_op.input_names:
if key == "Grad":
if self.config.enable_dc_asgd:
new_inputs[key] = dc
else:
# Note!! This is for l2decay on sparse gradient, because it will create a new tensor for
# decayed gradient but not inplace modify the origin one
origin_grad_name = opt_op.input(key)[0]
if core.kNewGradSuffix(
) in origin_grad_name and pserver_block.has_var(
origin_grad_name):
new_grad = pserver_block.var(origin_grad_name)
new_inputs[key] = new_grad
else:
new_inputs[key] = merged_var
elif key == "Param":
param_block = _get_param_block(opt_op)
if not param_block:
return
tmpvar = pserver_block.create_var(
name=param_block.name,
persistable=True,
dtype=param_block.dtype,
shape=param_block.shape)
new_inputs[key] = tmpvar
elif key == "LearningRate":
# learning rate variable has already be created by non-optimize op,
# don't create it once again.
lr_varname = opt_op.input(key)[0]
if lr_varname in pserver_block.vars:
new_inputs[key] = pserver_block.vars[opt_op.input(key)[0]]
else:
origin_var = origin_program.global_block().vars[lr_varname]
tmpvar = pserver_block.create_var(
name=origin_var.name,
persistable=origin_var.persistable,
dtype=origin_var.dtype,
shape=origin_var.shape)
new_inputs[key] = tmpvar
for key in opt_op.input_names:
new_shape = None
if key in ["Param", "Grad", "LearningRate"]:
continue
var = self.origin_program.global_block().vars[opt_op.input(key)[0]]
param_var = new_inputs["Param"]
# update accumulator variable shape
new_shape = self._get_optimizer_input_shape(
opt_op.type, key, var.shape, param_var.shape)
tmpvar = pserver_block.create_var(
name=var.name,
persistable=var.persistable,
dtype=var.dtype,
shape=new_shape)
new_inputs[key] = tmpvar
# change output's ParamOut variable
outputs = self._get_output_map_from_op(
self.origin_program.global_block().vars, opt_op)
outputs["ParamOut"] = new_inputs["Param"]
optimize_block.append_op(
type=opt_op.type,
inputs=new_inputs,
outputs=outputs,
attrs=opt_op.all_attrs())
# record sparse grad to param name
if new_inputs["Grad"].type == core.VarDesc.VarType.SELECTED_ROWS:
sparse_grad_to_param.append(
str(new_inputs["Grad"].name) + ":" + str(new_inputs["Param"]
.name))
def _get_pserver_grad_param_var(self, var, var_dict):
"""
Return pserver side grad/param variable, return None
if the variable is not grad/param, e.g.
a@GRAD -> a@GRAD.block0
a@GRAD -> a@GRAD (a is not splited)
fc_0.w_0 -> fc_0.w_0.block_0
fc_0.w_0 -> fc_0.w_0 (weight is not splited)
_generated_var_123 -> None
"""
grad_block = None
for _, g in six.iteritems(var_dict):
if self._orig_varname(g.name) == self._orig_varname(var.name):
# skip per trainer vars
if g.name.find(".trainer_") == -1:
# only param or grads have splited blocks
if self._orig_varname(g.name) in self.grad_name_to_param_name or \
self._orig_varname(g.name) in self.param_name_to_grad_name:
grad_block = g
break
return grad_block
def _clone_lr_op(self, program, block, op):
inputs = self._get_input_map_from_op(
self.origin_program.global_block().vars, op)
for key, varlist in six.iteritems(inputs):
if not isinstance(varlist, list):
varlist = [varlist]
for var in varlist:
if var not in program.global_block().vars:
block._clone_variable(var)
outputs = self._get_output_map_from_op(
self.origin_program.global_block().vars, op)
for key, varlist in six.iteritems(outputs):
if not isinstance(varlist, list):
varlist = [varlist]
for var in varlist:
if var not in program.global_block().vars:
block._clone_variable(var)
return block.append_op(
type=op.type, inputs=inputs, outputs=outputs, attrs=op.all_attrs())
def _append_pserver_non_opt_ops(self, optimize_block, opt_op):
program = optimize_block.program
# Append the ops for parameters that do not need to be optimized/updated
inputs = self._get_input_map_from_op(
self.origin_program.global_block().vars, opt_op)
for key, varlist in six.iteritems(inputs):
if not isinstance(varlist, list):
varlist = [varlist]
for i in range(len(varlist)):
var = varlist[i]
# for ops like clipping and weight decay, get the splited var (xxx.block0)
# for inputs/outputs
grad_block = self._get_pserver_grad_param_var(
var, program.global_block().vars)
if grad_block:
varlist[i] = grad_block
elif var.name not in program.global_block().vars:
tmpvar = program.global_block()._clone_variable(var)
varlist[i] = tmpvar
else:
varlist[i] = program.global_block().vars[var.name]
inputs[key] = varlist
outputs = self._get_output_map_from_op(
self.origin_program.global_block().vars, opt_op)
for key, varlist in six.iteritems(outputs):
if not isinstance(varlist, list):
varlist = [varlist]
for i in range(len(varlist)):
var = varlist[i]
grad_block = self._get_pserver_grad_param_var(
var, program.global_block().vars)
if grad_block:
varlist[i] = grad_block
elif var.name not in program.global_block().vars:
tmpvar = program.global_block()._clone_variable(var)
varlist[i] = tmpvar
else:
varlist[i] = program.global_block().vars[var.name]
outputs[key] = varlist
return optimize_block.append_op(
type=opt_op.type,
inputs=inputs,
outputs=outputs,
attrs=opt_op.all_attrs())
def _is_op_connected(self, op1, op2):
# If one op's input is another op's output or
# one op's output is another op's input, we say
# the two operator is connected.
if set(op1.desc.output_arg_names()) & set(op2.desc.input_arg_names()) or \
set(op1.desc.input_arg_names()) & set(op2.desc.output_arg_names()):
return True
return False
def _create_ufind(self, optimize_ops):
# Create a unit find data struct by optimize ops
ufind = UnionFind(optimize_ops)
for i in range(len(optimize_ops)):
for j in range(i, len(optimize_ops)):
op1 = optimize_ops[i]
op2 = optimize_ops[j]
if self._is_op_connected(op1, op2):
ufind.union(op1, op2)
return ufind
def _is_optimizer_op(self, op):
if "Param" in op.input_names and \
"LearningRate" in op.input_names:
return True
return False
def _is_opt_op_on_pserver(self, endpoint, op):
param_names = [
p.name for p in self.param_grad_ep_mapping[endpoint]["params"]
]
if op.input("Param")[0] in param_names:
return True
else:
for n in param_names:
param = op.input("Param")[0]
if same_or_split_var(n, param) and n != param:
return True
return False
def _get_input_map_from_op(self, varmap, op):
"""Returns a dict from op input name to the vars in varmap."""
iomap = collections.OrderedDict()
for key in op.input_names:
vars = []
for varname in op.input(key):
vars.append(varmap[varname])
if len(vars) == 1:
iomap[key] = vars[0]
else:
iomap[key] = vars
return iomap
def _get_output_map_from_op(self, varmap, op):
"""Returns a dict from op output name to the vars in varmap."""
iomap = collections.OrderedDict()
for key in op.output_names:
vars = []
for varname in op.output(key):
vars.append(varmap[varname])
if len(vars) == 1:
iomap[key] = vars[0]
else:
iomap[key] = vars
return iomap
def _get_lr_ops(self):
lr_ops = []
block = self.origin_program.global_block()
for op in block.ops:
role_id = int(op.attr(RPC_OP_ROLE_ATTR_NAME))
if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or \
role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) | \
int(OPT_OP_ROLE_ATTR_VALUE):
lr_ops.append(op)
log("append lr op: ", op.type)
return lr_ops
def _get_lr_ops_deprecated(self):
lr_ops = []
# find learning rate variables by optimize op
lr_vars = set()
for op in self.optimize_ops:
if self._is_optimizer_op(op):
lr_vars.add(op.input("LearningRate")[0])
find_ops = []
# find ops which output is lr var
block = self.origin_program.global_block()
for op in block.ops:
if set(op.output_arg_names) & lr_vars:
find_ops.append(op)
# make a union find struct by the ops in default_main_program
ufind = UnionFind(block.ops)
for op1 in block.ops:
for op2 in block.ops:
# NOTE: we need to skip all optimize ops, since it is connected
# with forward/backward ops and lr ops, we only need the lr ops.
if op1 != op2 and self._is_op_connected(op1, op2) and \
not self._is_optimizer_op(op1) and not self._is_optimizer_op(op2):
ufind.union(op1, op2)
# find all ops which is related with lr var
for op1 in block.ops:
for op2 in find_ops:
if ufind.is_connected(op1, op2):
lr_ops.append(op1)
# we only need to append op for once
break
return lr_ops
def _is_opt_role_op(self, op):
# NOTE: depend on oprole to find out whether this op is for
# optimize
op_maker = core.op_proto_and_checker_maker
optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize
if op_maker.kOpRoleAttrName() in op.attr_names and \
int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role):
return True
return False
def _get_optimize_pass(self):
"""
Get optimizer operators, parameters and gradients from origin_program
Returns:
opt_ops (list): optimize operators.
params_grads (dict): parameter->gradient.
"""
block = self.origin_program.global_block()
opt_ops = []
params_grads = []
# tmp set to dedup
optimize_params = set()
origin_var_dict = self.origin_program.global_block().vars
for op in block.ops:
if self._is_opt_role_op(op):
opt_ops.append(op)
if op.attr(OP_ROLE_VAR_ATTR_NAME):
param_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[0]
grad_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[1]
if not param_name in optimize_params:
optimize_params.add(param_name)
log("adding param_grad pair: ", param_name, grad_name)
params_grads.append([
origin_var_dict[param_name],
origin_var_dict[grad_name]
])
else:
pass
return opt_ops, params_grads
| 42.341031 | 155 | 0.571705 |
515bb5ba3549de8cfd0d3485a4c3a25fe773af10 | 817 | py | Python | jarviscli/utilities/animations.py | baptoutiego/Jarvis | 79c04845988d5f935338659899730ed882a6d76c | [
"MIT"
] | 2,605 | 2017-03-10T22:44:36.000Z | 2022-03-31T15:33:17.000Z | jarviscli/utilities/animations.py | baptoutiego/Jarvis | 79c04845988d5f935338659899730ed882a6d76c | [
"MIT"
] | 729 | 2017-03-11T00:06:46.000Z | 2022-03-31T22:04:44.000Z | jarviscli/utilities/animations.py | baptoutiego/Jarvis | 79c04845988d5f935338659899730ed882a6d76c | [
"MIT"
] | 1,181 | 2017-03-10T23:24:55.000Z | 2022-03-31T03:59:46.000Z | import sys
import time
import itertools
import threading
class SpinnerThread(threading.Thread):
"""SpinnerThread class to show a spinner on
command line while the program is running"""
def __init__(self, label="Hmmm... ", delay=0.2):
super(SpinnerThread, self).__init__()
self.label = label
self.delay = delay
self.running = False
def start(self):
self.running = True
super(SpinnerThread, self).start()
def run(self):
chars = itertools.cycle(r'-\|/')
while self.running:
sys.stdout.write('\r' + self.label + next(chars))
sys.stdout.flush()
time.sleep(self.delay)
def stop(self):
self.running = False
self.join()
sys.stdout.write('\r')
sys.stdout.flush()
| 24.757576 | 61 | 0.593635 |
24030b9502b44a8031675232c10119f83b87468c | 13,540 | py | Python | emission/planner.py | asbjorn/EmissionCalculatorLib | e03638c4ee6f502b89b648bc8c397f58c34ca2dc | [
"BSD-2-Clause"
] | 8 | 2017-10-27T14:52:17.000Z | 2020-07-22T23:18:40.000Z | emission/planner.py | NPRA/EmissionCalculatorLib | 750e7137c8115d26b2eec354ab3f5a65f76a8e21 | [
"BSD-2-Clause"
] | 4 | 2017-10-08T17:55:45.000Z | 2020-01-22T12:30:39.000Z | emission/planner.py | asbjorn/EmissionCalculatorLib | e03638c4ee6f502b89b648bc8c397f58c34ca2dc | [
"BSD-2-Clause"
] | 5 | 2017-09-12T09:08:13.000Z | 2019-07-29T19:40:37.000Z | import json
try:
from urllib.request import urlopen # Python 3
from urllib.parse import urlencode
except ImportError:
from urllib import urlopen # Python 2
from urllib import urlencode
import socket
import math
from . import vehicles, log
from . import EmissionsJsonParser
from .exceptions import RouteError
from . import models
from six.moves import urllib
def enum(**named_values):
return type('Enum', (), named_values)
# List of possible pollutant types
PollutantTypes = enum(
CH4='CH4',
CO='CO',
EC='EC',
NOx='NOx',
PM_EXHAUST='PM Exhaust',
VOC='VOC')
# URL to remote route webservice
ROUTE_URL_BASE = "https://www.vegvesen.no/ws/no/vegvesen/ruteplan/routingService_v1_0/routingService/"
class Route:
"""Represent a route object from the NVDB RoutingService"""
def __init__(self, distance, minutes, path, id):
self.distance = distance
self.minutes = minutes
self.path = path
self.pollutants = {}
self.distances = []
self.id = id
def hours_and_minutes(self):
"""Return hours:minutes as a string
representation, based on the total amount
of minutes for the route.
"""
hours, minutes = divmod(self.minutes, 60)
return "{}:{}".format(hours, minutes)
def velocity(self):
"""Calculate the velocity
"""
total_time = self.minutes * 60
return (self.distance / total_time) * 3.6
def add_pollutant(self, p, calc_emission):
if p not in self.pollutants:
self.pollutants[p] = []
self.pollutants[p].append(calc_emission)
def add_distances(self, distances):
self.distances.append(distances)
def total_emission(self, pollutant):
total = sum(self.pollutants[pollutant])
return total
def __repl__(self):
fmt = "Route(distance={}, minutes={})"
return fmt.format(self.distance, self.minutes)
def __str__(self):
return self.__repl__()
def __eq__(self, other):
return self.minutes == other.minutes
def __lt__(self, other):
return self.minutes < other.minutes
class RouteSet:
"""A collection of Route objects"""
def __init__(self, routes=None):
if routes is None:
self._lst = []
else:
self._lst = routes
def __getitem__(self, item):
return self._lst[item]
def __iter__(self):
return iter(self._lst)
def __len__(self):
return len(self._lst)
def add(self, route):
self._lst.append(route)
def __repl__(self):
return "RouteSet({})".format("\n".join([str(r) for r in self._lst]))
def __str__(self):
return self.__repl__()
def sort(self, key=None, reverse=False):
self._lst.sort(key=key, reverse=reverse)
def __hash__(self):
return hash(self._lst)
class Planner:
"""This class takes a start, stop and vehicle input to give the user
a set of possible road routes sorted after the least pollution. Also
more metadata about each route is provided.
"""
def __init__(self, start, stop, vehicle):
self._start = start
self._stop = stop
if not isinstance(vehicle, vehicles.Vehicle):
raise ValueError("Vehicle is not of correct type. Check vehicle implementations.")
self._vehicle = vehicle
# self._emissionJson = EmissionsJsonParser(vehicle)
# self._emissionJson._init_values_from_input_file()
self._emissionDb = None # EmissionsJsonParser(self._vehicle)
self.routes = RouteSet()
self._pollutants = {}
@property
def pollutants(self):
return self._pollutants
def add_pollutant(self, pollutant_type):
# validate input
if pollutant_type not in PollutantTypes.__dict__.values():
raise ValueError("pollutant_type needs to be one of the types defined in planner.PollutantTypes")
if pollutant_type not in self._pollutants:
self._pollutants[pollutant_type] = None
else:
log.debug("warning: pollutant already added..")
log.debug("self._pollutants = {}".format(self._pollutants))
@property
def coordinates(self):
return "{start[0]},{start[1]};{end[0]},{end[1]}".format(
start=self._start, end=self._stop)
@staticmethod
def build_url(vehicle, coordinates, format="json", geometryformat="isoz"):
"""Construct a well formed url for the routing service which
NPRA is using.
"""
load = vehicle.load if vehicle.load > -1.0 else 0
params = {
"format": format,
"height": vehicle.height,
"length": vehicle.length,
"stops": coordinates,
"load": load,
"geometryformat": geometryformat,
"lang": "nb-no",
}
return '?'.join([ROUTE_URL_BASE, urlencode(params)])
def _get_routes(self):
socket.setdefaulttimeout(30)
try:
url = Planner.build_url(self._vehicle, self.coordinates)
log.debug("Calling: {}".format(url))
log.debug("coordinates: {}".format(self.coordinates))
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
data = response.read()
self._json_data = json.loads(data.decode("utf-8"))
if 'messages' in self._json_data:
raise RouteError("Missing 'messages' in returned JSON data.")
except IOError as err:
log.debug("ioerror: {}".format(err))
self._json_data = {}
raise RouteError("IOError: {}".format(err))
except ValueError:
log.warning("Bad data from remote routing service: \n{}".format(data))
self._json_data = {}
raise RouteError("Bad data from remote routing service: \n{}".format(data))
@staticmethod
def _get_distance_2d(point1, point2):
distance = math.sqrt((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2)
return distance
@staticmethod
def _get_distance_3d(point1, point2):
distance = math.sqrt((point2[0] - point1[0]) ** 2 + (point2[1] - point1[1]) ** 2 + (point2[2] - point1[2]) ** 2)
return distance
@staticmethod
def _get_slope(point1, point2):
distance = Planner._get_distance_3d(point1, point2)
slope = 0.0
if distance:
slope = math.degrees(math.asin((float(point2[2]) - float(point1[2])) / distance))
return slope
def _get_pollutants_for_vehicle(self):
"""Retrieve pollutions parameters for the vehicle provided
to the planner. Only include the pollutants provided in
'self._pollutants'
"""
from . import session
category = models.Category.get_for_type(self._vehicle)
if not category:
raise ValueError("Unable to find Category for vehicle: {}".format(category))
fuel = session.query(models.Fuel).filter_by(name=self._vehicle.fuel_type).first()
if not fuel:
raise ValueError("Unable to find Fuel in database: name={}".format(self._vehicle.fuel_type))
segment = session.query(models.Segment).filter_by(name=self._vehicle.segment).first()
if not segment:
raise ValueError("Unable to find segment in database: name={}".format(str(self._vehicle.segment)))
filter_parms = {
"category": category,
"fuel": fuel,
"segment": segment
}
euro_std = session.query(models.EuroStd).filter_by(name=self._vehicle.euro_std).first()
if euro_std:
filter_parms.update({"eurostd": euro_std})
mode = session.query(models.Mode).filter_by(name=self._vehicle.mode).first()
if mode:
filter_parms.update({"mode": mode})
if self._vehicle.load > -1.0:
filter_parms.update({"load": self._vehicle.load})
# Get Parameters based on the other items found above
params = session.query(models.Parameter).filter_by(**filter_parms)
return params.all()
def get_emission(self, parameters, slope=None):
pollutant = None
if len(parameters) > 1:
# We have many parameters instances for a single pollutant.
# This means that we have multiple 'slopes' in our table.
# Need therefore to find slope or extrapolate/interpolate the value.
positive_slopes = [0, 0.02, 0.04, 0.06]
negative_slopes = [-0.06, -0.04, -0.02, 0]
x = [x for x in parameters if x.slope == slope]
if any(x):
pollutant = x[0]
else:
slopes_for_pollutant = []
if slope > 0.0:
tmp_pollutants = [x for x in parameters if x.slope in positive_slopes]
slopes_for_pollutant = map(Planner.calculate, tmp_pollutants)
extrapolate = Extrapolate(positive_slopes, slopes_for_pollutant)
tmp = extrapolate[slope]
log.debug("Extrapolated value: {}".format(tmp))
return tmp
else:
tmp_pollutants = [x for x in parameters if x.slope in negative_slopes]
slopes_for_pollutant = map(Planner.calculate, tmp_pollutants)
interpolate = Interpolate(negative_slopes, slopes_for_pollutant)
tmp = interpolate[slope]
log.debug("Interpolated value: {}".format(tmp))
return tmp
else:
pollutant = parameters[0]
tmp = Planner.calculate(pollutant)
log.debug("tmp: {}".format(tmp))
return tmp
@staticmethod
def calculate(parameter):
"""Equation copied from the EU spreadsheet
"""
alpha = parameter.ALPHA
beta = parameter.BETA
delta = parameter.DELTA
epsilon = parameter.EPSILON
gamma = parameter.GAMMA
hta = parameter.HTA
reduct_fact = parameter.REDUCTIONFACTOR
speed = parameter.SPEED
v_max = parameter.MAXSPEED
v_min = parameter.MINSPEED
zita = parameter.ZITA
""" ((alpha*speed^2) + (beta*speed) + gamma + (delta/speed))/((epsilon*speed^2) * (zita * speed + htz))"""
try:
result = (alpha * math.pow(speed, 2)) + (beta * speed) + gamma + (delta / speed)
result /= (epsilon * math.pow(speed, 2)) + ((zita * speed) + hta)
result *= (1 - reduct_fact)
except ZeroDivisionError:
result = 0.0
return result
def _calculate_emissions(self):
"""Calculate total emission from a route of x,y,z points based on a path between
two points (A -> B). https://www.vegvesen.no/vegkart/vegkart/.
For a simple static emission calculation play with:
- self._get_pollutants_for_vehicle()
- Planner.calculate(parameter)
"""
parameters = self._get_pollutants_for_vehicle()
self.routes = RouteSet()
if "routes" not in self._json_data:
log.debug("Error in returned JSON data from web service.")
log.debug("data: {}".format(self._json_data))
return
# Create a "set" of Routes. The planner web service will
# return 2-4 routes with different paths.
for idx, r in enumerate(self._json_data["routes"]["features"]):
attributes = r.get("attributes")
route = Route(distance=attributes.get("Total_Meters"),
minutes=attributes.get("Total_Minutes"),
path=r.get("geometry").get("paths")[0], id = idx)
self.routes.add(route)
log.debug("Nr of routes: {}".format(len(self.routes)))
for i, route in enumerate(self.routes):
# A list of x,y,z points that all together represents the route
path_coordinates = route.path
distances = []
# Nifty little trick to loop over 'path_coordinates',
# but keep a reference to the 'prev' item to calculate the
# distance between them
iter_points = iter(path_coordinates)
prev = next(iter_points)
for point in path_coordinates:
if not distances:
# first point
distances.append(Planner._get_distance_3d(prev, point) / 1000)
else:
distances.append(distances[-1] + Planner._get_distance_3d(prev, point) / 1000)
point_slope = Planner._get_slope(prev, point)
# Calculate emission for each pollutants the user has asked for
for p in self._pollutants:
parms = [x for x in parameters if x.pollutant.name.startswith(p)]
calc_emission = self.get_emission(parms, point_slope)
route.add_pollutant(p, calc_emission)
prev = point
route.add_distances(distances)
def run(self):
"""
Use the input data and send a HTTP request to route planner.
Construct a 'Routes' object containing all the possible 'Route' objects.
Also compute the pollution factor for each route based on the 'Route' data and
the vehicle choosen.
"""
self._get_routes()
self._calculate_emissions()
| 34.717949 | 120 | 0.599188 |
bb27096abd1900e76e4aadda580a72b8cbffc968 | 6,125 | py | Python | applications/structural_application/test_examples/cantilever2d.gid/cantilever2ddynamic_benchmarking.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 2 | 2020-04-30T19:13:08.000Z | 2021-04-14T19:40:47.000Z | applications/structural_application/test_examples/cantilever2d.gid/cantilever2ddynamic_benchmarking.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 1 | 2020-04-30T19:19:09.000Z | 2020-05-02T14:22:36.000Z | applications/structural_application/test_examples/cantilever2d.gid/cantilever2ddynamic_benchmarking.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
def FindNode(node_list, x, y, z):
for node in node_list:
if ((node.X - x) ** 2 + (node.Y - y) ** 2 + (node.Z - z) ** 2 < 0.0000001):
print(node)
return node
def BenchmarkCheck(time, node1, node2, node3, node4):
benchmarking.Output(time, "Time")
benchmarking.Output(node1.GetSolutionStepValue(DISPLACEMENT_X), "Node 1 Displacement_x", 0.00001)
benchmarking.Output(node2.GetSolutionStepValue(DISPLACEMENT_Y), "Node 2 Displacement_y", 0.00001)
benchmarking.Output(node3.GetSolutionStepValue(REACTION_X), "Node 3 Reaction_x", 0.00001)
benchmarking.Output(node4.GetSolutionStepValue(REACTION_Y), "Node 4 Reaction_y", 0.00001)
# def AnalyticalResults(time, node1, node2,node3, node4):
# benchmarking.Output(time, "Time")
# benchmarking.Output(-0.221921365586, "Node 1 Displacement_x", 0.00001)
# benchmarking.Output(-0.0361068223759, "Node 2 Displacement_y", 0.00001)
# benchmarking.Output( 51.6844785228, "Node 3 Reaction_x", 0.00001)
# benchmarking.Output( -123.134969306, "Node 4 Reaction_y", 0.00001)
#
#
import sys
kratos_benchmarking_path = '../../../../benchmarking' # kratos_root/benchmarking
sys.path.append(kratos_benchmarking_path)
import benchmarking
# import the configuration data as read from the GiD
import Kratos_Structural_Application_var
# find neighbours if required
def FindNeighbours():
if(Kratos_Structural_Application_var.FindNodalNeighbours == "True"):
number_of_avg_elems = 10
number_of_avg_nodes = 10
nodal_neighbour_search = FindNodalNeighboursProcess(model_part, number_of_avg_elems, number_of_avg_nodes)
nodal_neighbour_search.Execute()
if(Kratos_Structural_Application_var.FindElementalNeighbours == "True"):
neighbour_calculator = FindElementalNeighboursProcess(model_part, 2, 10)
neighbour_calculator.Execute()
# importing the rotational dofs degrees of freedom if necessary
def RotationalDofs():
if(Kratos_Structural_Application_var.Rotational_Dofs == "True"):
for node in model_part.Nodes:
node.AddDof(ROTATION_X)
node.AddDof(ROTATION_Y)
node.AddDof(ROTATION_Z)
#
#
from time import *
print(ctime())
t0 = clock()
# including kratos path
from KratosMultiphysics import *
from KratosMultiphysics.StructuralApplication import *
from KratosMultiphysics.ExternalSolversApplication import *
# setting the domain size for the problem to be solved
domain_size = Kratos_Structural_Application_var.domain_size
# defining a model part
model_part = ModelPart("StructurePart")
model_part.AddNodalSolutionStepVariable(FORCE)
if(Kratos_Structural_Application_var.Rotational_Dofs == "True"):
model_part.AddNodalSolutionStepVariable(ROTATION)
import structural_solver_dynamic as SolverType
SolverType.AddVariables(model_part)
# reading a model
name = Kratos_Structural_Application_var.problem_name
gid_mode = GiDPostMode.GiD_PostBinary
multifile = MultiFileFlag.MultipleFiles
deformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed
write_conditions = WriteConditionsFlag.WriteElementsOnly
gid_io = GidIO(name, gid_mode, multifile, deformed_mesh_flag, write_conditions)
model_part_io = ModelPartIO(name)
model_part_io.ReadModelPart(model_part)
mesh_name = 0.0
gid_io.InitializeMesh(mesh_name)
gid_io.WriteMesh((model_part).GetMesh())
gid_io.FinalizeMesh()
# find neighbours if required
FindNeighbours();
model_part.Properties[1].SetValue(CONSTITUTIVE_LAW, Isotropic2D())
print("Linear elastic model selected")
print(model_part)
print(model_part.Properties)
# the buffer size should be set up here after the mesh is read for the first time
model_part.SetBufferSize(3)
# importing the rotational dofs degrees of freedom if necessary
RotationalDofs()
# importing the solver files
SolverType.AddDofs(model_part)
solver = SolverType.DynamicStructuralSolver(model_part, domain_size)
solver.structure_linear_solver = SuperLUSolver()
solver.CalculateReactionFlag = True;
CT = Kratos_Structural_Application_var.Convergence_Tolerance;
AT = Kratos_Structural_Application_var.Absolute_Tolerance;
if(Kratos_Structural_Application_var.Convergence_Criteria == "Displacement_Criteria"):
solver.conv_criteria = DisplacementCriteria(CT, AT)
elif(Kratos_Structural_Application_var.Convergence_Criteria == "Residual_Criteria"):
solver.conv_criteria = ResidualCriteria(CT, AT)
elif(Kratos_Structural_Application_var.Convergence_Criteria == "And_Criteria"):
Displacement = DisplacementCriteria(CT, AT)
Residual = ResidualCriteria(CT, AT)
solver.conv_criteria = AndCriteria(Residual, Displacement)
elif(Kratos_Structural_Application_var.Convergence_Criteria == "Or_Criteria"):
Displacement = DisplacementCriteria(CT, AT)
Residual = ResidualCriteria(CT, AT)
solver.conv_criteria = OrCriteria(Residual, Displacement)
solver.structure_linear_solver = SkylineLUFactorizationSolver()
node_1 = FindNode(model_part.Nodes, 0.05, 1.00, 0.00)
node_2 = FindNode(model_part.Nodes, 0.00, 1.00, 0.00)
node_3 = FindNode(model_part.Nodes, 0.00, 0.00, 0.00)
node_4 = FindNode(model_part.Nodes, 0.05, 0.00, 0.00)
solver.Initialize()
(solver).SetEchoLevel(2);
Dt = 0.001
nsteps = 11
print("initializing results")
gid_io.InitializeResults(mesh_name, (model_part).GetMesh())
for step in range(0, nsteps):
time = Dt * step
model_part.CloneTimeStep(time)
# print model_part.ProcessInfo()[TIME]
# solving the fluid problem
if(step > 3):
solver.Solve()
if (benchmarking.InBuildReferenceMode()):
# AnalyticalResults(time, node_1, node_2, node_3, node_4)
BenchmarkCheck(time, node_1, node_2, node_3, node_4)
else:
BenchmarkCheck(time, node_1, node_2, node_3, node_4)
# print the results
gid_io.WriteNodalResults(DISPLACEMENT, model_part.Nodes, time, 0)
gid_io.WriteNodalResults(REACTION, model_part.Nodes, time, 0)
gid_io.FinalizeResults()
print("Completed Analysis")
| 34.801136 | 134 | 0.767347 |
b72e5c5d4ce2b9826e4f891d37dce2c6fa317430 | 6,435 | py | Python | contrib/pyminer/pyminer.py | Bitspender/h4 | ba58be16dd6f2e9d3d79d2d3f50ac33aab6f3593 | [
"MIT"
] | null | null | null | contrib/pyminer/pyminer.py | Bitspender/h4 | ba58be16dd6f2e9d3d79d2d3f50ac33aab6f3593 | [
"MIT"
] | null | null | null | contrib/pyminer/pyminer.py | Bitspender/h4 | ba58be16dd6f2e9d3d79d2d3f50ac33aab6f3593 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 62512
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| 25.434783 | 84 | 0.664957 |
451c13ca5aca8c4042fab53f1bed3e6372ae9a86 | 4,506 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_service_tags_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_service_tags_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_service_tags_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceTagsOperations:
"""ServiceTagsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
location: str,
**kwargs: Any
) -> "_models.ServiceTagsListResult":
"""Gets a list of service tag information resources.
:param location: The location that will be used as a reference for version (not as a filter
based on location, you will get the list of service tags with prefix details across all regions
but limited to the cloud that your subscription belongs to).
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceTagsListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.ServiceTagsListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceTagsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceTagsListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/serviceTags'} # type: ignore
| 45.979592 | 139 | 0.683311 |
6f998a516b07267eb7a52e580b31872700fbbc8d | 27,300 | py | Python | gazoo_device/capabilities/interfaces/switchboard_base.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | 14 | 2020-11-05T23:23:32.000Z | 2022-03-01T18:59:29.000Z | gazoo_device/capabilities/interfaces/switchboard_base.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | 1 | 2021-06-24T19:20:50.000Z | 2021-06-24T19:20:50.000Z | gazoo_device/capabilities/interfaces/switchboard_base.py | isabella232/gazoo-device | 0e1e276d72333e713b47152815708b9c74c45409 | [
"Apache-2.0"
] | 5 | 2021-05-20T22:52:51.000Z | 2022-02-21T08:46:21.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for the Switchboard capability.
Switchboard is the backbone of all device interaction.
It provides the ability to interact with devices using standardized transport,
button, and expect APIs.
By separating these standardized APIs we can more easily test the logic and
eventually unit test device classes independent of hardware.
Switchboard implementation resides in gazoo_device/switchboard/switchboard.py.
"""
import abc
from typing import List
from gazoo_device import config
from gazoo_device.capabilities.interfaces import capability_base
from gazoo_device.switchboard import line_identifier
MODE_TYPE_ALL = "all"
MODE_TYPE_ANY = "any"
MODE_TYPE_SEQUENTIAL = "sequential"
VERIFY_METHOD_MD5SUM = "md5sum"
class SwitchboardBase(capability_base.CapabilityBase):
"""Manages device interactions and writes everything to a single file.
This will spawn and manage 3 or more multiprocess subprocesses:
Process 0 (Main)
The primary process where the API/CLI/tests execute from;
responsible for initializing this module for each device. Performs
expect and Parser logic and closes subprocesses on shutdown or
transport errors.
Process 1..n (Transport)
One or more subprocesses responsible for performing all device
transport operations (e.g. open/close and read/write) and
communicating results to other subprocesses as needed using
queues.
Process n+1 (Log writer)
Performs all log writing operations received from log queue
shared with Transport subprocess. Log lines are written only for
completed log lines.
Process n+2 (Log filter)
Reads log lines from the log file written by the log writer
subprocess. Filters each log line read for desired events and
writes them to an event file. The main process can then use the
event file to query for relevant events.
"""
@abc.abstractmethod
def add_log_note(self, note):
"""Adds given note to device log file.
Args:
note (str): to write to the log file
"""
@abc.abstractmethod
def add_new_filter(self, filter_path):
"""Adds new log filter at path specified to LogFilterProcess.
Args:
filter_path (str): filter file to add
Raises:
RuntimeError: if LogFilterProcess is not available or running.
ValueError: if filter_path doesn't exist
"""
@abc.abstractmethod
def click(self, button, duration=0.5, port=0):
"""Press and release the button for the duration and port specified.
Args:
button (str): button to press and release
duration (float): seconds to wait before releasing button
port (int): which port to click on, 0 or 1.
Raises:
DeviceError: If buttons are not supported on the device or
button, duration, or port values are invalid
"""
@abc.abstractmethod
def click_and_expect(self,
button,
pattern_list,
duration=0.5,
timeout=30.0,
searchwindowsize=config.SEARCHWINDOWSIZE,
expect_type="log",
port=0,
mode="any",
raise_for_timeout=False):
"""Press and release button, log lines matching patterns are returned.
Args:
button (str): button to press and release
pattern_list (list): list of regex expressions to look for in the
lines
duration (int): seconds to press button before releasing it
timeout (float): seconds to look for the patterns
searchwindowsize (int): number of the last bytes to look at
expect_type (str): 'log', 'response', or 'all'
port (int): which port to send on, 0 or 1
mode (str): type of expect to run ("any", "all" or "sequential")
raise_for_timeout (bool): Raise an exception if the expect times out
Raises:
DeviceError: If buttons are not supported on the device or
other arguments are invalid.
Returns:
ExpectResponse: Object with values for the following attributes:
.index (int): the index of the expected pattern (None if
timeout).
.timedout (bool): indicating whether it timed out.
.time_elapsed (int): number of seconds between start and finish.
.match (str): re.group of pattern match.
.before (str): all the characters looked at before the match.
.after (str): all the characters after the first matching
character.
.remaining (list): remaining patterns not matched
.match_list (list): re.search pattern MatchObjects
Note:
Flushes the expect queue before and after an expect. Starts up
expect queue right before clicking button to catch fast responses.
"""
@abc.abstractmethod
def close(self):
"""Shuts down the subprocesses and closes the transports.
NOTE:
The current implementation relies on queues being garbage collected.
Instead of explicitly closing the queues, all queue references MUST
be deleted to
release the queues and prevent a memory leak!
"""
@abc.abstractmethod
def close_all_transports(self):
"""Leaves the switchboard architecture intact but closes the communication FDs.
This is used prior to the connections being closed, such as disconnecting an
ethernet or a serial connection. Only closes the ones open so if
device.close has already occurred, nothing will be closed.
"""
@abc.abstractmethod
def close_transport(self, port=0):
"""Closes the transport specified.
Args:
port (int or str): the transport port to close
Raises:
DeviceError: If port value is invalid or out of range.
"""
@abc.abstractmethod
def do_and_expect(self,
func,
func_args,
func_kwargs,
pattern_list,
timeout=30.0,
searchwindowsize=config.SEARCHWINDOWSIZE,
expect_type=line_identifier.LINE_TYPE_LOG,
mode=MODE_TYPE_ANY,
raise_for_timeout=False):
"""Executes function with given args, blocks until expect matches or timeout occurs.
Args:
func (method): name of function to be called
func_args (list): positional arguments specified to be passed to
function
func_kwargs (dict): keyword arguments specified to be passed to
function
pattern_list (list): list of regex expressions to look for in the
lines
timeout (float): seconds to look for the patterns
searchwindowsize (int): number of the last bytes to look at
expect_type (str): 'log', 'response', or 'all'
mode (str): type of expect to run ("any", "all" or "sequential")
raise_for_timeout (bool): Raise an exception if the expect times out
Returns:
ExpectResponse: Object with values for the following attributes:
.index (int): the index of the expected pattern (None if
timeout).
.timedout (bool): indicating whether it timed out.
.time_elapsed (int): number of seconds between start and finish.
.match (str): re.group of pattern match.
.before (str): all the characters looked at before the match.
.after (str): all the characters after the first matching
character.
.remaining (list): remaining patterns not matched
.match_list (list): re.search pattern MatchObjects
Raises:
DeviceError: If func is not callable
If other arguments are invalid
Note:
Input parameter "func" MUST NOT call "shell" nor another
"core.xxx_expect" method so as to avoid the nested "flush" problem
described in 'NEP-2343'.
"""
@abc.abstractmethod
def echo_file_to_transport(self,
source_file,
destination_path,
port=0,
bytes_per_echo=50):
r"""Transfers file to transport specified using echo commands.
Args:
source_file (path): to the file to transfer
destination_path (path): to transfer file to on device
port (int or str): the transport port to open
bytes_per_echo (int): call to use during file transfer
Raises:
DeviceError: If source_file doesn't exist, can't be opened, or
the port or bytes_per_echo values are invalid or
out of range.
Note:
The caller is responsible for preparing the device to receive
multiple echo commands to receive the file and only calling this
method for devices that support the following commands::
echo -ne > <destination_path>
echo -ne "\\x{:02x}" >> <destination_path>
"""
@abc.abstractmethod
def ensure_serial_paths_unlocked(self, communication_addresses: List[str]):
"""Ensures serial paths are longer locked by switchboard process after device is closed."""
@abc.abstractmethod
def expect(self,
pattern_list,
timeout=30.0,
searchwindowsize=config.SEARCHWINDOWSIZE,
expect_type=line_identifier.LINE_TYPE_ALL,
mode=MODE_TYPE_ANY,
raise_for_timeout=False):
"""Block until a regex pattern is matched or until a timeout time has elapsed.
Args:
pattern_list (list): list of regex expressions to look for in the
lines
timeout (float): seconds to look for the patterns
searchwindowsize (int): number of the last bytes to look at
expect_type (str): 'log', 'response', or 'all'
mode (str): type of expect to run ("any", "all" or "sequential")
raise_for_timeout (bool): Raise an exception if the expect times out
Raises:
DeviceError: if arguments are not valid.
Returns:
ExpectResponse: Object with values for the following attributes:
.index (int): the index of the expected pattern (None if
timeout).
.timedout (bool): indicating whether it timed out.
.time_elapsed (int): number of seconds between start and finish.
.match (str): re.group of pattern match.
.before (str): all the characters looked at before the match.
.after (str): all the characters after the first matching
character.
.remaining (list): remaining patterns not matched
.match_list (list): re.search pattern MatchObjects
Note:
Flushes the expect queue before and after an expect.
"""
@abc.abstractmethod
def get_line_identifier(self):
"""Returns the line identifier currently used by Switchboard."""
@property
@abc.abstractmethod
def number_transports(self) -> int:
"""Returns the number of transport processes used by Switchboard."""
@abc.abstractmethod
def open_all_transports(self):
"""Opens the communication FDs, assuming switchboard architecture is intact.
This is used after a physical connection has been reopened, such as
reconnecting an ethernet or a serial connection.
Only opens the ones closed so if device.close has already occurred, nothing
will be opened.
"""
@abc.abstractmethod
def open_transport(self, port=0, timeout=30.0):
"""Opens the transport specified.
Args:
port (int or str): the transport port to open
timeout (float): how long to wait for port to open.
Raises:
DeviceError: If port value is invalid or out of range.
"""
@abc.abstractmethod
def press(self, button, wait=0.0, port=0):
"""Presses the button for the port specified and waits the time specified.
Args:
button (str): button to press
wait (float): seconds to wait before returning
port (int): which port to click on, 0 or 1
Raises:
DeviceError: If buttons are not supported on the device or
button, wait, or port values are invalid
"""
@abc.abstractmethod
def press_and_expect(self,
button,
pattern_list,
wait=0.0,
timeout=30.0,
searchwindowsize=config.SEARCHWINDOWSIZE,
expect_type="log",
port=0,
mode="any"):
"""Press button and expect for pattern_list and other arguments provided.
Args:
button (str): button to press
pattern_list (list): list of regex expressions to look for in the
lines
wait (float): seconds to wait
timeout (float): Seconds to look for the patterns
searchwindowsize (int): Number of the last bytes to look at
expect_type (str): 'log', 'response', or 'all'
port (int): Which port to send on, 0 or 1
mode (str): type of expect to run ("any", "all" or "sequential")
Raises:
DeviceError: If buttons are not supported on the device or
button, wait, port, or expect values are invalid
Returns:
ExpectResponse: Object with values for the following attributes:
.index (int): the index of the expected pattern (None if
timeout).
.timedout (bool): indicating whether it timed out.
.time_elapsed (int): number of seconds between start and finish.
.match (str): re.group of pattern match.
.before (str): all the characters looked at before the match.
.after (str): all the characters after the first matching
character.
.remaining (list): remaining patterns not matched
.match_list (list): re.search pattern MatchObjects
Note:
Flushes the expect queue before and after an expect. Starts up
expect queue right before pressing button to catch fast responses.
"""
@abc.abstractmethod
def release(self, button, port=0):
"""Release the button for the port specified.
Args:
button (str): button to release
port (int): Which port to release button on, 0 or 1
Raises:
DeviceError: If buttons are not supported on the device or
button or port values are invalid
"""
@abc.abstractmethod
def release_and_expect(self,
button,
pattern_list,
timeout=30.0,
searchwindowsize=config.SEARCHWINDOWSIZE,
expect_type="log",
port=0,
mode="any"):
"""Release button, matches pattern_list in loglines as specified by expect_type.
Args:
button (str): button to release
pattern_list (list): list of regex expressions to look for in the
lines
timeout (float): seconds to look for the patterns
searchwindowsize (int): number of the last bytes to look at
expect_type (str): 'log', 'response', or 'all'
port (int): which port to send on, 0 or 1
mode (str): type of expect to run ("any", "all" or "sequential")
Raises:
DeviceError: If buttons are not supported on the device or
button, port, or expect values are invalid
Returns:
ExpectResponse: Object with values for the following attributes:
.index (int): the index of the expected pattern (None if
timeout).
.timedout (bool): indicating whether it timed out.
.time_elapsed (int): number of seconds between start and finish.
.match (str): re.group of pattern match.
.before (str): all the characters looked at before the match.
.after (str): all the characters after the first matching
character.
.remaining (list): remaining patterns not matched
.match_list (list): re.search pattern MatchObjects
Note:
Flushes the expect queue before and after an expect. Starts up
expect queue right before releasing button to catch fast responses.
"""
@abc.abstractmethod
def send(self, command, port=0, slow=False, add_newline=True, newline="\n"):
"""Sends the command to the device on the port (transport) specified.
Args:
command (str): to send to the device
port (int): or transport to send command to
slow (bool): flag indicating command should be sent byte-by-byte
add_newline (bool): flag indicating newline should be added to
command if missing
newline (str): character to check for and add if missing at the end
of the command
Raises:
DeviceError: if port specified is an invalid value or out of
range of the available ports
"""
@abc.abstractmethod
def send_and_expect(self,
command,
pattern_list,
timeout=30.0,
searchwindowsize=config.SEARCHWINDOWSIZE,
expect_type=line_identifier.LINE_TYPE_ALL,
mode=MODE_TYPE_ANY,
port=0,
slow=False,
add_newline=True,
newline="\n",
command_tries=1,
raise_for_timeout=False):
r"""Sends the command and expects on the patterns provided.
Note: this method does not prepend the command with a wakeup character
which some devices require. The reason this may be needed is because
some devices go into a sleep state to save energy and will wakeup on
receiving the first character sent to it which means the character won't
get registered into the command buffer. This can be dealt with by
prepending the command with a nop character that won't affect the
command being executed in the case that the device has already woken up.
If there is an issue with this method, try adding "\n" in front of the
command. E.g. "\nsome_command"
Args:
command (str): command to send to the device
pattern_list (list): list of regex expressions to look for in the
lines
timeout (float): Seconds to look for the patterns
searchwindowsize (int): Number of the last bytes to look at
expect_type (str): 'log', 'response', or 'all'
mode (str): type of expect to run ("any", "all" or "sequential")
port (int): Which port to send on, 0 or 1
slow (bool): flag indicating command should be sent byte-by-byte
add_newline (bool): flag indicating newline should be added to
command if missing
newline (str): character to check for and add if missing at the end
of the command
command_tries (int): The number of tries to send the command if it
times out.
raise_for_timeout (bool): Raise an exception if the expect times out
Raises:
DeviceError: if port specified or other expect arguments are
invalid, or timed out and raise_for_timeout was True.
Returns:
ExpectResponse: Object with values for the following attributes:
.index (int): the index of the expected pattern (None if
timeout).
.timedout (bool): indicating whether it timed out.
.time_elapsed (int): number of seconds between start and finish.
.match (str): re.group of pattern match.
.before (str): all the characters looked at before the match.
.after (str): all the characters after the first matching
character.
.remaining (list): remaining patterns not matched
.match_list (list): re.search pattern MatchObjects
Note:
Flushes the expect queue before and after an send.
"""
@abc.abstractmethod
def set_max_log_size(self, max_log_size):
"""Sets the max_log_size value to the value provided.
Args:
max_log_size (int): the max log size to use for log rotation.
Raises:
ValueError: if max_log_size is not an integer value
RuntimeError: if log writer process is not running
Note:
A max_log_size of 0 means no log rotation should ever occur.
"""
@abc.abstractmethod
def start_new_log(self, log_path):
"""Changes log filter and writer to use a new log path provided.
Args:
log_path (str): to log file to switch to
Raises:
RuntimeError: if LogWriterProcess is not available or running.
"""
@abc.abstractmethod
def transport_jlink_flash(self, image_path, port=0):
"""Sends the flash command to the 'J-Link' transport.
Args:
image_path (str): path to the image file to be flashed onto the
device.
port (int): the transport port to send the command to.
"""
@abc.abstractmethod
def transport_jlink_reset(self, port=0):
"""Sends the reset command to the J - Link transport.
Args:
port(int): the transport port to send the command to.
"""
@abc.abstractmethod
def transport_serial_set_baudrate(self, new_baudrate, port=0):
"""Sets the serial interface baudrate to a different baudrate.
Args:
new_baudrate(int): new baudrate to be set, generally 115200 or
921600
port(int or str): the transport port to open
Raises:
DeviceError
"""
@abc.abstractmethod
def transport_serial_send_xon(self, port=0):
"""Sends the XON control character to the serial interface.
Args:
port(int or str): the transport port to open
"""
@abc.abstractmethod
def transport_serial_send_xoff(self, port=0):
"""Sends the XOFF control character to the serial interface.
Args:
port(int or str): the transport port to open
"""
@abc.abstractmethod
def transport_serial_send_break_byte(self, port=0):
"""Sends the break control character to the serial interface (Ctrl + C).
Args:
port(int or str): the transport port to open
"""
@abc.abstractmethod
def verify_file_on_transport(self,
source_file,
destination_path,
port=0,
method=VERIFY_METHOD_MD5SUM):
"""Verifies source file contents matches destination_path on transport using method.
Args:
source_file(path): to compare content to on transport
destination_path(path): to file to verify on transport
port(int or str): the transport port to open
method(str): the method to use to verify destination_path
Raises:
DeviceError: If source_file doesn't exist, can't be opened, or
the port or method values are invalid or out of range.
Returns:
bool: A boolean status indicating verification was successful.
Note:
The caller is responsible for preparing the device to receive one
of the following verification commands::
md5sum < destination_path >
"""
@abc.abstractmethod
def xmodem_file_to_transport(self, source_file, port=0):
"""Transfers file to transport specified using the XModem protocol.
Args:
source_file(path): to the file to transfer
port(int or str): the transport port to open
Raises:
DeviceError: If source_file doesn't exist, can't be opened, or
the port value provided is invalid or out of range.
Returns:
bool: A boolean status indicating xmodem transfer was successful.
Note:
The caller is responsible for putting the transport into XModem
transfer mode before calling this method.
"""
@abc.abstractmethod
def add_transport_process(self, transport, **transport_process_kwargs):
"""Add a new transport process to the list of transport processes.
Args:
transport(Transport): transport to the device for this process
**transport_process_kwargs(dict): keyword arguments to the transport
process
transport_process_kwargs can be:
framer(DataFramer): DataFramer derived classes to use to frame
incoming raw data into raw lines. Defaults to None.
partial_line_timeout(float): time in seconds to wait before adding
partial lines to raw_data_queue and log_queue. Defaults to
transport_process.PARTIAL_LINE_TIMEOUT.
read_timeout(float): time to wait in seconds for transport reads.
Defaults to to transport_process._READ_TIMEOUT
max_read_bytes(int): to attempt to read on each transport read call.
Defaults to transport_process._MAX_READ_BYTES
max_write_bytes(int): to attempt to write on each transport write
call. Defaults to transport_process._MAX_WRITE_BYTES
Returns:
int: position of newly added transport process in list of transport
processes("port")
"""
@abc.abstractmethod
def delete_last_transport_process(self):
"""Stops and deletes the last transport process in self._transport_processes.
Note:
Just stopping a transport process does not delete it.
All stopped processes are typically reopened after a device reboot.
The process must be deleted to ensure it is not reopened after a
device reboot.
Since we're using process numbers to identify the transport, deleting any
transport other than the last one will cause some other transports in the
transport list to shift their transport number by 1, breaking their usage.
To prevent this, allow deleting only the last process for now. The proper
solution would be to use some other form of identification for processes.
Raises:
DeviceError: if there's no transport process to delete.
"""
@abc.abstractmethod
def start_transport_process(self, process_num):
"""Start the transport process at position process_num in transport list.
Args:
process_num(int): position in self._transport_processes list. This
position is returned by a prior self.add_transport_process() call.
Raises:
DeviceError: if process_num has an invalid value.
"""
@abc.abstractmethod
def stop_transport_process(self, process_num):
"""Stop the transport process.
Args:
process_num(int): number of transport to stop.
"""
| 37.759336 | 95 | 0.6463 |
21dfc03b92d550eee5889d54e405f2322d999acd | 738 | py | Python | Itunes/trunk/appscript_2x/sample/aem/filter_reference.py | MarcPartensky/Python-2020 | 1a4ef2edfea6efb353249d5e32c06b230b293c62 | [
"MIT"
] | 1 | 2020-09-02T10:41:49.000Z | 2020-09-02T10:41:49.000Z | Itunes/trunk/appscript_2x/sample/aem/filter_reference.py | MarcPartensky/Python-2020 | 1a4ef2edfea6efb353249d5e32c06b230b293c62 | [
"MIT"
] | null | null | null | Itunes/trunk/appscript_2x/sample/aem/filter_reference.py | MarcPartensky/Python-2020 | 1a4ef2edfea6efb353249d5e32c06b230b293c62 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from aem import *
# tell app "Finder" to get every item of home whose name begins with "d" and name is not "Documents"
print Application(findapp.byname('Finder')).event('coregetd', {'----':
app.property('home').elements('cobj').byfilter(
its.property('pnam').beginswith('d') .AND (its.property('pnam').ne('Documents'))
)
}).send()
# Result should be list of folders of home whose name begins with 'd' except for 'Documents', e.g.:
#
# [
# app.property('sdsk').elements('cfol').byname(u'Users').elements('cfol').byname(u'has').elements('cfol').byname(u'Desktop'),
# app.property('sdsk').elements('cfol').byname(u'Users').elements('cfol').byname(u'has').elements('cfol').byname(u'Downloads')
# ] | 35.142857 | 126 | 0.668022 |
23bf1aea9b5de90d6c3aad9001a6eef7899e4e71 | 2,526 | py | Python | config/settings/local.py | MattyPy/boxy | 6082dbc45512a5fbc2a2d7664613b1e04ba40ddb | [
"MIT"
] | null | null | null | config/settings/local.py | MattyPy/boxy | 6082dbc45512a5fbc2a2d7664613b1e04ba40ddb | [
"MIT"
] | 1 | 2020-04-30T12:50:58.000Z | 2020-04-30T12:50:58.000Z | config/settings/local.py | MattyPy/boxy | 6082dbc45512a5fbc2a2d7664613b1e04ba40ddb | [
"MIT"
] | null | null | null | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="lZ4s7adwNXVxSODOUc4XnoG4fLG0MtTON5J3OqlMR6PDVPLkTKGg9R25udw6zEAE",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Celery
# ------------------------------------------------------------------------------
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-always-eager
CELERY_TASK_ALWAYS_EAGER = True
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-eager-propagates
CELERY_TASK_EAGER_PROPAGATES = True
# Your stuff...
# ------------------------------------------------------------------------------
| 42.1 | 97 | 0.58947 |
2612169e6141ba846183658957c09c8c58f7c923 | 12,060 | py | Python | mmdet/models/roi_heads/mask_heads/fcn_mask_head.py | pablodecm/mmdetection | 4587688b66cbd9c4d13ec9447da2d68b93ba07e5 | [
"Apache-2.0"
] | null | null | null | mmdet/models/roi_heads/mask_heads/fcn_mask_head.py | pablodecm/mmdetection | 4587688b66cbd9c4d13ec9447da2d68b93ba07e5 | [
"Apache-2.0"
] | null | null | null | mmdet/models/roi_heads/mask_heads/fcn_mask_head.py | pablodecm/mmdetection | 4587688b66cbd9c4d13ec9447da2d68b93ba07e5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_upsample_layer
from mmcv.ops import Conv2d
from mmcv.ops.carafe import CARAFEPack
from torch.nn.modules.utils import _pair
from mmdet.core import auto_fp16, force_fp32, mask_target
from mmdet.models.builder import HEADS, build_loss
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3//4 # 0.25 GB memory limit
@HEADS.register_module()
class FCNMaskHead(nn.Module):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
super(FCNMaskHead, self).__init__()
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = Conv2d(logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
else:
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum() * 0
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
img_shape (Tensor): shape (3, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape: original image size
Returns:
list[list]: encoded masks
"""
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.sigmoid()
else:
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
bboxes = det_bboxes[:, :4]
labels = det_labels
if rescale:
img_h, img_w = ori_shape[:2]
else:
img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
scale_factor = 1.0
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = bboxes / scale_factor
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
num_chunks = int(
np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].cpu().numpy())
return cls_segms
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks acoording to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(
y0_int, y1_int, device=device, dtype=torch.float32) + 0.5
img_x = torch.arange(
x0_int, x1_int, device=device, dtype=torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
| 39.029126 | 79 | 0.585821 |
fde6bdb051849464d789a51214da84ad251250d9 | 175 | py | Python | tests/fixtures/formatter/correct.py | cdhiraj40/wemake-python-styleguide | 7cef9be081d594c30045b7a98cae77a9be46e1aa | [
"MIT"
] | 1,931 | 2018-03-17T13:52:45.000Z | 2022-03-27T09:39:17.000Z | tests/fixtures/formatter/correct.py | amansr02/wemake-python-styleguide | 681035ed21fbe28ebfb32b8807b98e8de76b64aa | [
"MIT"
] | 2,231 | 2018-03-09T21:19:05.000Z | 2022-03-31T08:35:37.000Z | tests/fixtures/formatter/correct.py | amansr02/wemake-python-styleguide | 681035ed21fbe28ebfb32b8807b98e8de76b64aa | [
"MIT"
] | 492 | 2018-05-18T21:20:28.000Z | 2022-03-20T14:11:50.000Z | """
Module level docstring.
They are required.
"""
def clear_name(good_name: int) -> int:
"""All functions should be like this one."""
return good_name + good_name
| 15.909091 | 48 | 0.674286 |
f56cd9ab013deebc7decb2841f4d5cc1f13e4013 | 1,619 | py | Python | devilry/devilry_gradingsystem/views/admin/summary.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T22:56:23.000Z | 2020-11-10T21:28:27.000Z | devilry/devilry_gradingsystem/views/admin/summary.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 786 | 2015-01-06T16:10:18.000Z | 2022-03-16T11:10:50.000Z | devilry/devilry_gradingsystem/views/admin/summary.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 15 | 2015-04-06T06:18:43.000Z | 2021-02-24T12:28:30.000Z | from django.views.generic import DetailView
from django.urls import reverse
from django.shortcuts import redirect
from django import forms
from devilry.apps.core.models import Assignment
from devilry.apps.core.models import StaticFeedback
from devilry.devilry_gradingsystem.pluginregistry import GradingSystemPluginNotInRegistryError
from devilry.devilry_gradingsystem.models import FeedbackDraft
from .base import AssignmentSingleObjectMixin
class SummaryView(AssignmentSingleObjectMixin, DetailView):
template_name = 'devilry_gradingsystem/admin/summary.django.html'
def get_context_data(self, **kwargs):
context = super(SummaryView, self).get_context_data(**kwargs)
assignment = self.object
if assignment.grading_system_plugin_id:
context['has_valid_grading_setup'] = assignment.has_valid_grading_setup()
try:
context['pluginapi'] = assignment.get_gradingsystem_plugin_api()
except GradingSystemPluginNotInRegistryError:
pass
else:
context['no_grading_system_plugin_id'] = True
context['has_staticfeedbacks'] = StaticFeedback.objects.filter(delivery__deadline__assignment_group__parentnode=assignment).exists()
context['has_feedbackdrafts'] = FeedbackDraft.objects.filter(delivery__deadline__assignment_group__parentnode=assignment).exists()
if assignment.subject.is_admin(self.request.user):
context['is_subjectadmin'] = True
elif assignment.period.is_admin(self.request.user):
context['is_periodadmin'] = True
return context
| 46.257143 | 140 | 0.75664 |
dd43c20ac5703abc6e43c5af7408dc30d0d41aa3 | 5,416 | py | Python | tests/wallet/did_wallet/test_did_rpc.py | BTChia-Network/btchia-blockchain | 2ab991f6b207872b17ce237ebe409defb96cd524 | [
"Apache-2.0"
] | 19 | 2021-08-09T21:21:09.000Z | 2022-03-18T02:27:13.000Z | tests/wallet/did_wallet/test_did_rpc.py | BTChia-Network/btchia-blockchain | 2ab991f6b207872b17ce237ebe409defb96cd524 | [
"Apache-2.0"
] | 29 | 2021-08-13T12:05:09.000Z | 2022-03-20T19:30:36.000Z | tests/wallet/did_wallet/test_did_rpc.py | BTChia-Network/btchia-blockchain | 2ab991f6b207872b17ce237ebe409defb96cd524 | [
"Apache-2.0"
] | 4 | 2021-08-18T16:42:30.000Z | 2022-03-15T08:24:58.000Z | import asyncio
import logging
import pytest
from btcgreen.rpc.rpc_server import start_rpc_server
from btcgreen.rpc.wallet_rpc_api import WalletRpcApi
from btcgreen.rpc.wallet_rpc_client import WalletRpcClient
from btcgreen.simulator.simulator_protocol import FarmNewBlockProtocol
from btcgreen.types.peer_info import PeerInfo
from btcgreen.util.ints import uint16, uint64
from btcgreen.wallet.util.wallet_types import WalletType
from tests.setup_nodes import self_hostname, setup_simulators_and_wallets, bt
from tests.time_out_assert import time_out_assert
from btcgreen.wallet.did_wallet.did_wallet import DIDWallet
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestDIDWallet:
@pytest.fixture(scope="function")
async def three_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 3, {}):
yield _
@pytest.mark.asyncio
async def test_create_did(self, three_wallet_nodes):
num_blocks = 4
full_nodes, wallets = three_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, wallet_server_0 = wallets[0]
wallet_node_1, wallet_server_1 = wallets[1]
wallet_node_2, wallet_server_2 = wallets[2]
MAX_WAIT_SECS = 30
wallet = wallet_node_0.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_1.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
for i in range(0, num_blocks + 1):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
log.info("Waiting for initial money in Wallet 0 ...")
api_one = WalletRpcApi(wallet_node_0)
config = bt.config
daemon_port = config["daemon_port"]
test_rpc_port = uint16(21529)
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
client = await WalletRpcClient.create(self_hostname, test_rpc_port, bt.root_path, bt.config)
rpc_server_cleanup = await start_rpc_server(
api_one,
self_hostname,
daemon_port,
test_rpc_port,
lambda x: None,
bt.root_path,
config,
connect_to_daemon=False,
)
async def got_initial_money():
balances = await client.get_wallet_balance("1")
return balances["confirmed_wallet_balance"] > 0
await time_out_assert(timeout=MAX_WAIT_SECS, function=got_initial_money)
val = await client.create_new_did_wallet(201)
assert isinstance(val, dict)
if "success" in val:
assert val["success"]
assert val["type"] == WalletType.DISTRIBUTED_ID.value
assert val["wallet_id"] > 1
assert len(val["my_did"]) == 64
assert bytes.fromhex(val["my_did"])
main_wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph2 = await main_wallet_2.get_new_puzzlehash()
for i in range(0, num_blocks + 1):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
recovery_list = [bytes.fromhex(val["my_did"])]
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, main_wallet_2, uint64(101), recovery_list
)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
filename = "test.backup"
did_wallet_2.create_backup(filename)
val = await client.create_new_did_wallet_from_recovery(filename)
if "success" in val:
assert val["success"]
assert val["type"] == WalletType.DISTRIBUTED_ID.value
assert val["wallet_id"] > 1
did_wallet_id_3 = val["wallet_id"]
assert len(val["my_did"]) == 64
assert bytes.fromhex(val["my_did"]) == did_wallet_2.did_info.origin_coin.name()
assert bytes.fromhex(val["coin_name"])
assert bytes.fromhex(val["newpuzhash"])
assert bytes.fromhex(val["pubkey"])
filename = "test.attest"
val = await client.did_create_attest(
did_wallet_2.wallet_id, val["coin_name"], val["pubkey"], val["newpuzhash"], filename
)
if "success" in val:
assert val["success"]
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
val = await client.did_recovery_spend(did_wallet_id_3, [filename])
if "success" in val:
assert val["success"]
for i in range(0, num_blocks * 2):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
val = await client.get_wallet_balance(did_wallet_id_3)
assert val["confirmed_wallet_balance"] == 101
await rpc_server_cleanup()
| 40.41791 | 105 | 0.6887 |
9d4a13a5f05ff9d7f0ff87e0fcdd79eaee234a7b | 19,323 | py | Python | ThirdParty/Twisted/twisted/conch/client/knownhosts.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 3 | 2020-06-20T23:31:06.000Z | 2021-01-11T02:17:16.000Z | ThirdParty/Twisted/twisted/conch/client/knownhosts.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | null | null | null | ThirdParty/Twisted/twisted/conch/client/knownhosts.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 1 | 2021-12-02T07:29:15.000Z | 2021-12-02T07:29:15.000Z | # -*- test-case-name: twisted.conch.test.test_knownhosts -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An implementation of the OpenSSH known_hosts database.
@since: 8.2
"""
import hmac
from binascii import Error as DecodeError, b2a_base64
from hashlib import sha1
from zope.interface import implements
from twisted.python.randbytes import secureRandom
from twisted.internet import defer
from twisted.python import log
from twisted.python.util import FancyEqMixin
from twisted.conch.interfaces import IKnownHostEntry
from twisted.conch.error import HostKeyChanged, UserRejectedKey, InvalidEntry
from twisted.conch.ssh.keys import Key, BadKeyError
def _b64encode(s):
"""
Encode a binary string as base64 with no trailing newline.
@param s: The string to encode.
@type s: L{bytes}
@return: The base64-encoded string.
@rtype: L{bytes}
"""
return b2a_base64(s).strip()
def _extractCommon(string):
"""
Extract common elements of base64 keys from an entry in a hosts file.
@param string: A known hosts file entry (a single line).
@type string: L{bytes}
@return: a 4-tuple of hostname data (L{bytes}), ssh key type (L{bytes}), key
(L{Key}), and comment (L{bytes} or L{None}). The hostname data is
simply the beginning of the line up to the first occurrence of
whitespace.
@rtype: L{tuple}
"""
elements = string.split(None, 2)
if len(elements) != 3:
raise InvalidEntry()
hostnames, keyType, keyAndComment = elements
splitkey = keyAndComment.split(None, 1)
if len(splitkey) == 2:
keyString, comment = splitkey
comment = comment.rstrip("\n")
else:
keyString = splitkey[0]
comment = None
key = Key.fromString(keyString.decode('base64'))
return hostnames, keyType, key, comment
class _BaseEntry(object):
"""
Abstract base of both hashed and non-hashed entry objects, since they
represent keys and key types the same way.
@ivar keyType: The type of the key; either ssh-dss or ssh-rsa.
@type keyType: L{str}
@ivar publicKey: The server public key indicated by this line.
@type publicKey: L{twisted.conch.ssh.keys.Key}
@ivar comment: Trailing garbage after the key line.
@type comment: L{str}
"""
def __init__(self, keyType, publicKey, comment):
self.keyType = keyType
self.publicKey = publicKey
self.comment = comment
def matchesKey(self, keyObject):
"""
Check to see if this entry matches a given key object.
@param keyObject: A public key object to check.
@type keyObject: L{Key}
@return: C{True} if this entry's key matches C{keyObject}, C{False}
otherwise.
@rtype: L{bool}
"""
return self.publicKey == keyObject
class PlainEntry(_BaseEntry):
"""
A L{PlainEntry} is a representation of a plain-text entry in a known_hosts
file.
@ivar _hostnames: the list of all host-names associated with this entry.
@type _hostnames: L{list} of L{str}
"""
implements(IKnownHostEntry)
def __init__(self, hostnames, keyType, publicKey, comment):
self._hostnames = hostnames
super(PlainEntry, self).__init__(keyType, publicKey, comment)
def fromString(cls, string):
"""
Parse a plain-text entry in a known_hosts file, and return a
corresponding L{PlainEntry}.
@param string: a space-separated string formatted like "hostname
key-type base64-key-data comment".
@type string: L{str}
@raise DecodeError: if the key is not valid encoded as valid base64.
@raise InvalidEntry: if the entry does not have the right number of
elements and is therefore invalid.
@raise BadKeyError: if the key, once decoded from base64, is not
actually an SSH key.
@return: an IKnownHostEntry representing the hostname and key in the
input line.
@rtype: L{PlainEntry}
"""
hostnames, keyType, key, comment = _extractCommon(string)
self = cls(hostnames.split(","), keyType, key, comment)
return self
fromString = classmethod(fromString)
def matchesHost(self, hostname):
"""
Check to see if this entry matches a given hostname.
@param hostname: A hostname or IP address literal to check against this
entry.
@type hostname: L{str}
@return: C{True} if this entry is for the given hostname or IP address,
C{False} otherwise.
@rtype: L{bool}
"""
return hostname in self._hostnames
def toString(self):
"""
Implement L{IKnownHostEntry.toString} by recording the comma-separated
hostnames, key type, and base-64 encoded key.
@return: The string representation of this entry, with unhashed hostname
information.
@rtype: L{bytes}
"""
fields = [','.join(self._hostnames),
self.keyType,
_b64encode(self.publicKey.blob())]
if self.comment is not None:
fields.append(self.comment)
return ' '.join(fields)
class UnparsedEntry(object):
"""
L{UnparsedEntry} is an entry in a L{KnownHostsFile} which can't actually be
parsed; therefore it matches no keys and no hosts.
"""
implements(IKnownHostEntry)
def __init__(self, string):
"""
Create an unparsed entry from a line in a known_hosts file which cannot
otherwise be parsed.
"""
self._string = string
def matchesHost(self, hostname):
"""
Always returns False.
"""
return False
def matchesKey(self, key):
"""
Always returns False.
"""
return False
def toString(self):
"""
Returns the input line, without its newline if one was given.
@return: The string representation of this entry, almost exactly as was
used to initialize this entry but without a trailing newline.
@rtype: L{bytes}
"""
return self._string.rstrip("\n")
def _hmacedString(key, string):
"""
Return the SHA-1 HMAC hash of the given key and string.
@param key: The HMAC key.
@type key: L{bytes}
@param string: The string to be hashed.
@type string: L{bytes}
@return: The keyed hash value.
@rtype: L{bytes}
"""
hash = hmac.HMAC(key, digestmod=sha1)
hash.update(string)
return hash.digest()
class HashedEntry(_BaseEntry, FancyEqMixin):
"""
A L{HashedEntry} is a representation of an entry in a known_hosts file
where the hostname has been hashed and salted.
@ivar _hostSalt: the salt to combine with a hostname for hashing.
@ivar _hostHash: the hashed representation of the hostname.
@cvar MAGIC: the 'hash magic' string used to identify a hashed line in a
known_hosts file as opposed to a plaintext one.
"""
implements(IKnownHostEntry)
MAGIC = '|1|'
compareAttributes = (
"_hostSalt", "_hostHash", "keyType", "publicKey", "comment")
def __init__(self, hostSalt, hostHash, keyType, publicKey, comment):
self._hostSalt = hostSalt
self._hostHash = hostHash
super(HashedEntry, self).__init__(keyType, publicKey, comment)
def fromString(cls, string):
"""
Load a hashed entry from a string representing a line in a known_hosts
file.
@param string: A complete single line from a I{known_hosts} file,
formatted as defined by OpenSSH.
@type string: L{bytes}
@raise DecodeError: if the key, the hostname, or the is not valid
encoded as valid base64
@raise InvalidEntry: if the entry does not have the right number of
elements and is therefore invalid, or the host/hash portion contains
more items than just the host and hash.
@raise BadKeyError: if the key, once decoded from base64, is not
actually an SSH key.
@return: The newly created L{HashedEntry} instance, initialized with the
information from C{string}.
"""
stuff, keyType, key, comment = _extractCommon(string)
saltAndHash = stuff[len(cls.MAGIC):].split("|")
if len(saltAndHash) != 2:
raise InvalidEntry()
hostSalt, hostHash = saltAndHash
self = cls(hostSalt.decode("base64"), hostHash.decode("base64"),
keyType, key, comment)
return self
fromString = classmethod(fromString)
def matchesHost(self, hostname):
"""
Implement L{IKnownHostEntry.matchesHost} to compare the hash of the
input to the stored hash.
@param hostname: A hostname or IP address literal to check against this
entry.
@type hostname: L{bytes}
@return: C{True} if this entry is for the given hostname or IP address,
C{False} otherwise.
@rtype: L{bool}
"""
return (_hmacedString(self._hostSalt, hostname) == self._hostHash)
def toString(self):
"""
Implement L{IKnownHostEntry.toString} by base64-encoding the salt, host
hash, and key.
@return: The string representation of this entry, with the hostname part
hashed.
@rtype: L{bytes}
"""
fields = [self.MAGIC + '|'.join([_b64encode(self._hostSalt),
_b64encode(self._hostHash)]),
self.keyType,
_b64encode(self.publicKey.blob())]
if self.comment is not None:
fields.append(self.comment)
return ' '.join(fields)
class KnownHostsFile(object):
"""
A structured representation of an OpenSSH-format ~/.ssh/known_hosts file.
@ivar _added: A list of L{IKnownHostEntry} providers which have been added
to this instance in memory but not yet saved.
@ivar _clobber: A flag indicating whether the current contents of the save
path will be disregarded and potentially overwritten or not. If
C{True}, this will be done. If C{False}, entries in the save path will
be read and new entries will be saved by appending rather than
overwriting.
@type _clobber: L{bool}
@ivar _savePath: See C{savePath} parameter of L{__init__}.
"""
def __init__(self, savePath):
"""
Create a new, empty KnownHostsFile.
Unless you want to erase the current contents of C{savePath}, you want
to use L{KnownHostsFile.fromPath} instead.
@param savePath: The L{FilePath} to which to save new entries.
@type savePath: L{FilePath}
"""
self._added = []
self._savePath = savePath
self._clobber = True
@property
def savePath(self):
"""
@see: C{savePath} parameter of L{__init__}
"""
return self._savePath
def iterentries(self):
"""
Iterate over the host entries in this file.
@return: An iterable the elements of which provide L{IKnownHostEntry}.
There is an element for each entry in the file as well as an element
for each added but not yet saved entry.
@rtype: iterable of L{IKnownHostEntry} providers
"""
for entry in self._added:
yield entry
if self._clobber:
return
try:
fp = self._savePath.open()
except IOError:
return
try:
for line in fp:
try:
if line.startswith(HashedEntry.MAGIC):
entry = HashedEntry.fromString(line)
else:
entry = PlainEntry.fromString(line)
except (DecodeError, InvalidEntry, BadKeyError):
entry = UnparsedEntry(line)
yield entry
finally:
fp.close()
def hasHostKey(self, hostname, key):
"""
Check for an entry with matching hostname and key.
@param hostname: A hostname or IP address literal to check for.
@type hostname: L{bytes}
@param key: The public key to check for.
@type key: L{Key}
@return: C{True} if the given hostname and key are present in this file,
C{False} if they are not.
@rtype: L{bool}
@raise HostKeyChanged: if the host key found for the given hostname
does not match the given key.
"""
for lineidx, entry in enumerate(self.iterentries(), -len(self._added)):
if entry.matchesHost(hostname):
if entry.matchesKey(key):
return True
else:
# Notice that lineidx is 0-based but HostKeyChanged.lineno
# is 1-based.
if lineidx < 0:
line = None
path = None
else:
line = lineidx + 1
path = self._savePath
raise HostKeyChanged(entry, path, line)
return False
def verifyHostKey(self, ui, hostname, ip, key):
"""
Verify the given host key for the given IP and host, asking for
confirmation from, and notifying, the given UI about changes to this
file.
@param ui: The user interface to request an IP address from.
@param hostname: The hostname that the user requested to connect to.
@param ip: The string representation of the IP address that is actually
being connected to.
@param key: The public key of the server.
@return: a L{Deferred} that fires with True when the key has been
verified, or fires with an errback when the key either cannot be
verified or has changed.
@rtype: L{Deferred}
"""
hhk = defer.maybeDeferred(self.hasHostKey, hostname, key)
def gotHasKey(result):
if result:
if not self.hasHostKey(ip, key):
ui.warn("Warning: Permanently added the %s host key for "
"IP address '%s' to the list of known hosts." %
(key.type(), ip))
self.addHostKey(ip, key)
self.save()
return result
else:
def promptResponse(response):
if response:
self.addHostKey(hostname, key)
self.addHostKey(ip, key)
self.save()
return response
else:
raise UserRejectedKey()
proceed = ui.prompt(
"The authenticity of host '%s (%s)' "
"can't be established.\n"
"RSA key fingerprint is %s.\n"
"Are you sure you want to continue connecting (yes/no)? " %
(hostname, ip, key.fingerprint()))
return proceed.addCallback(promptResponse)
return hhk.addCallback(gotHasKey)
def addHostKey(self, hostname, key):
"""
Add a new L{HashedEntry} to the key database.
Note that you still need to call L{KnownHostsFile.save} if you wish
these changes to be persisted.
@param hostname: A hostname or IP address literal to associate with the
new entry.
@type hostname: L{bytes}
@param key: The public key to associate with the new entry.
@type key: L{Key}
@return: The L{HashedEntry} that was added.
@rtype: L{HashedEntry}
"""
salt = secureRandom(20)
keyType = "ssh-" + key.type().lower()
entry = HashedEntry(salt, _hmacedString(salt, hostname),
keyType, key, None)
self._added.append(entry)
return entry
def save(self):
"""
Save this L{KnownHostsFile} to the path it was loaded from.
"""
p = self._savePath.parent()
if not p.isdir():
p.makedirs()
if self._clobber:
mode = "w"
else:
mode = "a"
with self._savePath.open(mode) as hostsFileObj:
if self._added:
hostsFileObj.write(
"\n".join([entry.toString() for entry in self._added]) +
"\n")
self._added = []
self._clobber = False
def fromPath(cls, path):
"""
Create a new L{KnownHostsFile}, potentially reading existing known
hosts information from the given file.
@param path: A path object to use for both reading contents from and
later saving to. If no file exists at this path, it is not an
error; a L{KnownHostsFile} with no entries is returned.
@type path: L{FilePath}
@return: A L{KnownHostsFile} initialized with entries from C{path}.
@rtype: L{KnownHostsFile}
"""
knownHosts = cls(path)
knownHosts._clobber = False
return knownHosts
fromPath = classmethod(fromPath)
class ConsoleUI(object):
"""
A UI object that can ask true/false questions and post notifications on the
console, to be used during key verification.
"""
def __init__(self, opener):
"""
@param opener: A no-argument callable which should open a console
binary-mode file-like object to be used for reading and writing.
This initializes the C{opener} attribute.
@type opener: callable taking no arguments and returning a read/write
file-like object
"""
self.opener = opener
def prompt(self, text):
"""
Write the given text as a prompt to the console output, then read a
result from the console input.
@param text: Something to present to a user to solicit a yes or no
response.
@type text: L{bytes}
@return: a L{Deferred} which fires with L{True} when the user answers
'yes' and L{False} when the user answers 'no'. It may errback if
there were any I/O errors.
"""
d = defer.succeed(None)
def body(ignored):
f = self.opener()
f.write(text)
while True:
answer = f.readline().strip().lower()
if answer == 'yes':
f.close()
return True
elif answer == 'no':
f.close()
return False
else:
f.write("Please type 'yes' or 'no': ")
return d.addCallback(body)
def warn(self, text):
"""
Notify the user (non-interactively) of the provided text, by writing it
to the console.
@param text: Some information the user is to be made aware of.
@type text: L{bytes}
"""
try:
f = self.opener()
f.write(text)
f.close()
except:
log.err()
| 30.9168 | 80 | 0.588884 |
fac7f02c30ebe7ebe1c8a819863b12f781b67f1a | 1,143 | py | Python | velocity_controller.py | krishanrana/robot_learning_algorithms | 3e66c9bf44e81ff281195130c71bcc6ebdf5ccda | [
"MIT"
] | null | null | null | velocity_controller.py | krishanrana/robot_learning_algorithms | 3e66c9bf44e81ff281195130c71bcc6ebdf5ccda | [
"MIT"
] | null | null | null | velocity_controller.py | krishanrana/robot_learning_algorithms | 3e66c9bf44e81ff281195130c71bcc6ebdf5ccda | [
"MIT"
] | null | null | null | from pyrep.errors import IKError
import numpy as np
class joint_velocity_controller():
def __init__(self, panda):
self.panda = panda
self.target_q = np.zeros(7)
def set_target(self, target):
pos = target.get_position()
target.set_orientation([0,3.14,0]) # Set orientation to be upright
quat = target.get_quaternion()
try:
self.target_q = np.array(self.panda.solve_ik_via_jacobian(pos, quaternion=quat))
except IKError:
# So let's swap to an alternative IK method...
# This returns 'max_configs' number of joint positions
print("Trying sampling...")
self.target_q = np.array(self.panda.solve_ik_via_sampling(pos, quaternion=quat)[0])
def compute_action(self, gain=0.03):
current_q = self.panda.get_joint_positions()
err = current_q - self.target_q
v = -err * gain
v = np.append(v, 1.0)
return v
def recompute_action(self, current_q, target_q, gain=0.03):
err = current_q - target_q
v = -err * gain
v = np.append(v, 1.0)
return v
| 31.75 | 95 | 0.613298 |
969a426da43a34df4a22381db193613469d87b89 | 3,121 | py | Python | Week 4/CIFAR.py | thanhhff/AIVN-Course-AI-For-Everyone | e8e582dea304341f0c03cedb920bcd1d450e5a9c | [
"MIT"
] | 25 | 2019-11-24T03:15:22.000Z | 2021-12-29T07:23:19.000Z | Week 4/CIFAR.py | thanhhff/AIVN-Course-AI-For-Everyone | e8e582dea304341f0c03cedb920bcd1d450e5a9c | [
"MIT"
] | 1 | 2019-12-03T10:44:48.000Z | 2019-12-03T10:44:48.000Z | Week 4/CIFAR.py | thanhhff/AIVN-Course-AI-For-Everyone | e8e582dea304341f0c03cedb920bcd1d450e5a9c | [
"MIT"
] | 13 | 2019-11-24T04:33:42.000Z | 2022-03-02T10:58:14.000Z | # The original code is from http://cs231n.github.io/assignment1/
import _pickle as pickle
from matplotlib import pyplot as plt
import numpy as np
import os
def load_CIFAR_batch(filename):
with open(filename,'rb') as f:
datadict=pickle.load(f,encoding='bytes')
x=datadict[b'data']
y=datadict[b'labels']
x=x.reshape(10000,3,32,32).transpose(0,2,3,1).astype('float')
y=np.array(y)
return x,y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(num_training=49000, num_val=1000, num_test=10000, show_sample=True):
"""
Load the CIFAR-10 dataset, and divide the sample into training set, validation set and test set
"""
cifar10_dir = 'datasets/datasets-cifar-10/cifar-10-batches-py/'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# subsample the data for validation set
mask = range(num_training, num_training + num_val)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
def visualize_sample(X_train, y_train, classes, samples_per_class=7):
"""visualize some samples in the training datasets """
num_classes = len(classes)
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y) # get all the indexes of cls
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs): # plot the image one by one
plt_idx = i * num_classes + y + 1 # i*num_classes and y+1 determine the row and column respectively
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
def preprocessing_CIFAR10_data(X_train, y_train, X_val, y_val, X_test, y_test):
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1)) # [49000, 3072]
X_val = np.reshape(X_val, (X_val.shape[0], -1)) # [1000, 3072]
X_test = np.reshape(X_test, (X_test.shape[0], -1)) # [10000, 3072]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis = 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Add bias dimension and transform into columns
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]).T
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]).T
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]).T
return X_train, y_train, X_val, y_val, X_test, y_test | 37.60241 | 111 | 0.650112 |
79446ac1e5e04c50de25db8fb4c96ef4c81d994b | 245 | py | Python | cms/templatetags/js.py | eduncan911/django_cms | 66c27f059ca0779157a7c3cc2e007d8090f10351 | [
"BSD-3-Clause"
] | 1 | 2017-04-27T20:00:40.000Z | 2017-04-27T20:00:40.000Z | cms/templatetags/js.py | eduncan911/django_cms | 66c27f059ca0779157a7c3cc2e007d8090f10351 | [
"BSD-3-Clause"
] | null | null | null | cms/templatetags/js.py | eduncan911/django_cms | 66c27f059ca0779157a7c3cc2e007d8090f10351 | [
"BSD-3-Clause"
] | null | null | null | from django import template
from django.utils import simplejson
from django.core.serializers.json import DjangoJSONEncoder
register = template.Library()
@register.filter
def js(value):
return simplejson.dumps(value, cls=DjangoJSONEncoder)
| 24.5 | 58 | 0.816327 |
4179f7aa9f6707635124c101052a96a64f8b4e22 | 1,393 | py | Python | 0501-0600/0567-Permutation in String/0567-Permutation in String.py | jiadaizhao/LeetCode | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | [
"MIT"
] | 49 | 2018-05-05T02:53:10.000Z | 2022-03-30T12:08:09.000Z | 0501-0600/0567-Permutation in String/0567-Permutation in String.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 11 | 2017-12-15T22:31:44.000Z | 2020-10-02T12:42:49.000Z | 0501-0600/0567-Permutation in String/0567-Permutation in String.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 28 | 2017-12-05T10:56:51.000Z | 2022-01-26T18:18:27.000Z | import collections
class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
if len(s2) < len(s1):
return False
table = collections.Counter(s1)
count = len(table)
start = 0
for i in range(len(s2)):
table[s2[i]] -= 1
if table[s2[i]] == 0:
count -= 1
if count == 0 and i - start + 1 == len(s1):
return True
while count == 0:
table[s2[start]] += 1
if table[s2[start]] == 1:
count += 1
elif i - start == len(s1):
return True
start += 1
return False
class Solution2:
def checkInclusion(self, s1: str, s2: str) -> bool:
if len(s2) < len(s1):
return False
table = collections.Counter(s1)
count = len(s1)
start = 0
for i in range(len(s2)):
table[s2[i]] -= 1
if table[s2[i]] >= 0:
count -= 1
if count == 0:
return True
if i - start + 1 == len(s1):
table[s2[start]] += 1
if table[s2[start]] > 0:
count += 1
start += 1
return False
| 29.020833 | 59 | 0.38191 |
9829dd72d2961476aedaa70f84f834deb7b36281 | 3,656 | py | Python | vitrage/tests/functional/datasources/aodh/test_aodh.py | mail2nsrajesh/vitrage | 41f863bbb7568f70d347feeab8eaca13085f81ba | [
"Apache-2.0"
] | null | null | null | vitrage/tests/functional/datasources/aodh/test_aodh.py | mail2nsrajesh/vitrage | 41f863bbb7568f70d347feeab8eaca13085f81ba | [
"Apache-2.0"
] | null | null | null | vitrage/tests/functional/datasources/aodh/test_aodh.py | mail2nsrajesh/vitrage | 41f863bbb7568f70d347feeab8eaca13085f81ba | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 - ZTE, Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.aodh import AODH_DATASOURCE
from vitrage.datasources.aodh.properties import AodhProperties as AodhProps
from vitrage.datasources import NOVA_HOST_DATASOURCE
from vitrage.datasources import NOVA_INSTANCE_DATASOURCE
from vitrage.datasources import NOVA_ZONE_DATASOURCE
from vitrage.datasources.transformer_base import TransformerBase
from vitrage.tests.functional.datasources.base import \
TestDataSourcesBase
from vitrage.tests.mocks import mock_transformer
class TestAodhAlarms(TestDataSourcesBase):
DATASOURCES_OPTS = [
cfg.ListOpt('types',
default=[AODH_DATASOURCE,
NOVA_HOST_DATASOURCE,
NOVA_INSTANCE_DATASOURCE,
NOVA_ZONE_DATASOURCE],
help='Names of supported driver data sources'),
cfg.ListOpt('path',
default=['vitrage.datasources'],
help='base path for data sources')
]
# noinspection PyPep8Naming
@classmethod
def setUpClass(cls):
super(TestAodhAlarms, cls).setUpClass()
cls.conf = cfg.ConfigOpts()
cls.conf.register_opts(cls.PROCESSOR_OPTS, group='entity_graph')
cls.conf.register_opts(cls.DATASOURCES_OPTS, group='datasources')
cls.load_datasources(cls.conf)
def test_aodh_alarms_validity(self):
# Setup
processor = self._create_processor_with_graph(self.conf, uuid=True)
self.assertEqual(self._num_total_expected_vertices(),
len(processor.entity_graph))
detail = {TransformerBase.QUERY_RESULT: ''}
spec_list = \
mock_transformer.simple_aodh_alarm_generators(alarm_num=1,
snapshot_events=1,
snap_vals=detail)
static_events = mock_transformer.generate_random_events_list(spec_list)
aodh_event = static_events[0]
aodh_event[AodhProps.RESOURCE_ID] = \
self._find_entity_id_by_type(processor.entity_graph,
NOVA_HOST_DATASOURCE)
# Action
processor.process_event(aodh_event)
# Test assertions
self.assertEqual(self._num_total_expected_vertices() + 1,
len(processor.entity_graph))
aodh_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter={
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_TYPE: AODH_DATASOURCE
})
self.assertEqual(1, len(aodh_vertices))
aodh_neighbors = processor.entity_graph.neighbors(
aodh_vertices[0].vertex_id)
self.assertEqual(1, len(aodh_neighbors))
self.assertEqual(NOVA_HOST_DATASOURCE,
aodh_neighbors[0][VProps.VITRAGE_TYPE])
| 39.73913 | 79 | 0.664387 |
eeeddb84495e2b1ec622624a595a4133911afebd | 4,476 | py | Python | webpage/lib/python3.5/site-packages/dask/tests/test_rewrite.py | pseudoPixels/SourceFlow | e1738c8b838c71b18598ceca29d7c487c76f876b | [
"MIT"
] | 2 | 2017-03-30T11:22:11.000Z | 2019-03-03T05:18:01.000Z | webpage/lib/python3.5/site-packages/dask/tests/test_rewrite.py | pseudoPixels/SourceFlow | e1738c8b838c71b18598ceca29d7c487c76f876b | [
"MIT"
] | null | null | null | webpage/lib/python3.5/site-packages/dask/tests/test_rewrite.py | pseudoPixels/SourceFlow | e1738c8b838c71b18598ceca29d7c487c76f876b | [
"MIT"
] | null | null | null | from dask.rewrite import RewriteRule, RuleSet, head, args, VAR, Traverser
from dask.utils_test import inc, add
def double(x):
return x * 2
def test_head():
assert head((inc, 1)) == inc
assert head((add, 1, 2)) == add
assert head((add, (inc, 1), (inc, 1))) == add
assert head([1, 2, 3]) == list
def test_args():
assert args((inc, 1)) == (1,)
assert args((add, 1, 2)) == (1, 2)
assert args(1) == ()
assert args([1, 2, 3]) == [1, 2, 3]
def test_traverser():
term = (add, (inc, 1), (double, (inc, 1), 2))
t = Traverser(term)
t2 = t.copy()
assert t.current == add
t.next()
assert t.current == inc
# Ensure copies aren't advanced when the original advances
assert t2.current == add
t.skip()
assert t.current == double
t.next()
assert t.current == inc
assert list(t2) == [add, inc, 1, double, inc, 1, 2]
vars = ("a", "b", "c")
# add(a, 1) -> inc(a)
rule1 = RewriteRule((add, "a", 1), (inc, "a"), vars)
# add(a, a) -> double(a)
rule2 = RewriteRule((add, "a", "a"), (double, "a"), vars)
# add(inc(a), inc(a)) -> add(double(a), 2)
rule3 = RewriteRule((add, (inc, "a"), (inc, "a")), (add, (double, "a"), 2), vars)
# add(inc(b), inc(a)) -> add(add(a, b), 2)
rule4 = RewriteRule((add, (inc, "b"), (inc, "a")), (add, (add, "a", "b"), 2), vars)
# sum([c, b, a]) -> add(add(a, b), c)
rule5 = RewriteRule((sum, ["c", "b", "a"]), (add, (add, "a", "b"), "c"), vars)
# list(x) -> x if x is a list
def repl_list(sd):
x = sd['x']
if isinstance(x, list):
return x
else:
return (list, x)
rule6 = RewriteRule((list, 'x'), repl_list, ('x',))
def test_RewriteRule():
# Test extraneous vars are removed, varlist is correct
assert rule1.vars == ("a",)
assert rule1._varlist == ["a"]
assert rule2.vars == ("a",)
assert rule2._varlist == ["a", "a"]
assert rule3.vars == ("a",)
assert rule3._varlist == ["a", "a"]
assert rule4.vars == ("a", "b")
assert rule4._varlist == ["b", "a"]
assert rule5.vars == ("a", "b", "c")
assert rule5._varlist == ["c", "b", "a"]
def test_RewriteRuleSubs():
# Test both rhs substitution and callable rhs
assert rule1.subs({'a': 1}) == (inc, 1)
assert rule6.subs({'x': [1, 2, 3]}) == [1, 2, 3]
rules = [rule1, rule2, rule3, rule4, rule5, rule6]
rs = RuleSet(*rules)
def test_RuleSet():
net = ({add: ({VAR: ({VAR: ({}, [1]), 1: ({}, [0])}, []),
inc: ({VAR: ({inc: ({VAR: ({}, [2, 3])}, [])}, [])}, [])}, []),
list: ({VAR: ({}, [5])}, []),
sum: ({list: ({VAR: ({VAR: ({VAR: ({}, [4])}, [])}, [])}, [])}, [])}, [])
assert rs._net == net
assert rs.rules == rules
def test_matches():
term = (add, 2, 1)
matches = list(rs.iter_matches(term))
assert len(matches) == 1
assert matches[0] == (rule1, {'a': 2})
# Test matches specific before general
term = (add, 1, 1)
matches = list(rs.iter_matches(term))
assert len(matches) == 2
assert matches[0] == (rule1, {'a': 1})
assert matches[1] == (rule2, {'a': 1})
# Test matches unhashable. What it's getting rewritten to doesn't make
# sense, this is just to test that it works. :)
term = (add, [1], [1])
matches = list(rs.iter_matches(term))
assert len(matches) == 1
assert matches[0] == (rule2, {'a': [1]})
# Test match at depth
term = (add, (inc, 1), (inc, 1))
matches = list(rs.iter_matches(term))
assert len(matches) == 3
assert matches[0] == (rule3, {'a': 1})
assert matches[1] == (rule4, {'a': 1, 'b': 1})
assert matches[2] == (rule2, {'a': (inc, 1)})
# Test non-linear pattern checking
term = (add, 2, 3)
matches = list(rs.iter_matches(term))
assert len(matches) == 0
def test_rewrite():
# Rewrite inside list
term = (sum, [(add, 1, 1), (add, 1, 1), (add, 1, 1)])
new_term = rs.rewrite(term)
assert new_term == (add, (add, (inc, 1), (inc, 1)), (inc, 1))
# Rules aren't applied to exhaustion, this can be further simplified
new_term = rs.rewrite(new_term)
assert new_term == (add, (add, (double, 1), 2), (inc, 1))
term = (add, (add, (add, (add, 1, 2), (add, 1, 2)), (add, (add, 1, 2), (add, 1, 2))), 1)
assert rs.rewrite(term) == (inc, (double, (double, (add, 1, 2))))
# Callable RewriteRule rhs
term = (list, [1, 2, 3])
assert rs.rewrite(term) == [1, 2, 3]
term = (list, (map, inc, [1, 2, 3]))
assert rs.rewrite(term) == term
| 31.744681 | 92 | 0.532172 |
4265a0e894cff6513bfe9c165a2c6a1e305f4036 | 45 | py | Python | test/python/LIM2Metrics/py3/base/common/Python007/Python007.py | sagodiz/SonarQube-plug-in | 4f8e111baecc4c9f9eaa5cd3d7ebeb1e365ace2c | [
"BSD-4-Clause"
] | 20 | 2015-06-16T17:39:10.000Z | 2022-03-20T22:39:40.000Z | test/python/LIM2Metrics/py3/base/common/Python007/Python007.py | sagodiz/SonarQube-plug-in | 4f8e111baecc4c9f9eaa5cd3d7ebeb1e365ace2c | [
"BSD-4-Clause"
] | 29 | 2015-12-29T19:07:22.000Z | 2022-03-22T10:39:02.000Z | test/python/LIM2Metrics/py3/base/common/Python007/Python007.py | sagodiz/SonarQube-plug-in | 4f8e111baecc4c9f9eaa5cd3d7ebeb1e365ace2c | [
"BSD-4-Clause"
] | 12 | 2015-08-28T01:22:18.000Z | 2021-09-25T08:17:31.000Z | class MyClass:
"""A simple example class"""
| 15 | 29 | 0.688889 |
0dde5a893079fb5e39feccf24d5133c624953985 | 179 | py | Python | sns_mobile_push_notification/apps.py | yoongjian98/django-sns-mobile-push-notification | 9864e1e395d421aafe6b8b3cf6f546ee38142da3 | [
"MIT"
] | 12 | 2018-04-29T23:47:32.000Z | 2022-02-22T07:41:27.000Z | sns_mobile_push_notification/apps.py | yoongjian98/django-sns-mobile-push-notification | 9864e1e395d421aafe6b8b3cf6f546ee38142da3 | [
"MIT"
] | null | null | null | sns_mobile_push_notification/apps.py | yoongjian98/django-sns-mobile-push-notification | 9864e1e395d421aafe6b8b3cf6f546ee38142da3 | [
"MIT"
] | 3 | 2019-03-21T04:17:37.000Z | 2021-07-27T22:25:16.000Z | from django.apps import AppConfig
class SnsNotificationConfig(AppConfig):
name = 'sns_mobile_push_notification'
verbose_name = 'SNS Mobile Push Notification fro Django'
| 25.571429 | 60 | 0.793296 |
2fbfae498e54b933daf85bcd3ec4b6c7d12f70e3 | 4,094 | py | Python | code/client/munkilib/munkilog.py | grahamgilbert/munki | 9ddc5a063a92b7f7671bddd679db3fbe7cb860b6 | [
"Apache-2.0"
] | null | null | null | code/client/munkilib/munkilog.py | grahamgilbert/munki | 9ddc5a063a92b7f7671bddd679db3fbe7cb860b6 | [
"Apache-2.0"
] | null | null | null | code/client/munkilib/munkilog.py | grahamgilbert/munki | 9ddc5a063a92b7f7671bddd679db3fbe7cb860b6 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
#
# Copyright 2009-2019 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
munkilog.py
Created by Greg Neagle on 2016-12-14.
Logging functions for Munki
"""
from __future__ import absolute_import, print_function
import codecs
import logging
import logging.handlers
import os
import time
from . import prefs
def log(msg, logname=''):
"""Generic logging function."""
if len(msg) > 1000:
# See http://bugs.python.org/issue11907 and RFC-3164
# break up huge msg into chunks and send 1000 characters at a time
msg_buffer = msg
while msg_buffer:
logging.info(msg_buffer[:1000])
msg_buffer = msg_buffer[1000:]
else:
logging.info(msg) # noop unless configure_syslog() is called first.
# date/time format string
formatstr = '%b %d %Y %H:%M:%S %z'
if not logname:
# use our regular logfile
logpath = prefs.pref('LogFile')
else:
logpath = os.path.join(os.path.dirname(prefs.pref('LogFile')), logname)
try:
fileobj = codecs.open(logpath, mode='a', buffering=1, encoding='UTF-8')
try:
fileobj.write("%s %s\n" % (time.strftime(formatstr), msg))
except (OSError, IOError):
pass
fileobj.close()
except (OSError, IOError):
pass
def configure_syslog():
"""Configures logging to system.log, when pref('LogToSyslog') == True."""
logger = logging.getLogger()
# Remove existing handlers to avoid sending unexpected messages.
for handler in logger.handlers:
logger.removeHandler(handler)
logger.setLevel(logging.DEBUG)
# If /System/Library/LaunchDaemons/com.apple.syslogd.plist is restarted
# then /var/run/syslog stops listening. If we fail to catch this then
# Munki completely errors.
try:
syslog = logging.handlers.SysLogHandler('/var/run/syslog')
except BaseException:
log('LogToSyslog is enabled but socket connection failed.')
return
syslog.setFormatter(logging.Formatter('munki: %(message)s'))
syslog.setLevel(logging.INFO)
logger.addHandler(syslog)
def rotatelog(logname=''):
"""Rotate a log"""
if not logname:
# use our regular logfile
logpath = prefs.pref('LogFile')
else:
logpath = os.path.join(os.path.dirname(prefs.pref('LogFile')), logname)
if os.path.exists(logpath):
for i in range(3, -1, -1):
try:
os.unlink(logpath + '.' + str(i + 1))
except (OSError, IOError):
pass
try:
os.rename(logpath + '.' + str(i), logpath + '.' + str(i + 1))
except (OSError, IOError):
pass
try:
os.rename(logpath, logpath + '.0')
except (OSError, IOError):
pass
def rotate_main_log():
"""Rotate our main log"""
main_log = prefs.pref('LogFile')
if os.path.exists(main_log):
if os.path.getsize(main_log) > 1000000:
rotatelog(main_log)
def reset_warnings():
"""Rotate our warnings log."""
warningsfile = os.path.join(
os.path.dirname(prefs.pref('LogFile')), 'warnings.log')
if os.path.exists(warningsfile):
rotatelog(warningsfile)
def reset_errors():
"""Rotate our errors.log"""
errorsfile = os.path.join(
os.path.dirname(prefs.pref('LogFile')), 'errors.log')
if os.path.exists(errorsfile):
rotatelog(errorsfile)
if __name__ == '__main__':
print('This is a library of support tools for the Munki Suite.')
| 30.102941 | 79 | 0.636297 |
38cc31c0bea802ce6a1dc24457ac8b51cee245c8 | 21,058 | py | Python | pxr/usdImaging/lib/usdviewq/viewSettingsDataModel.py | octarrow/USD | 1845291a9701ab0a3a7d591bc243a1a80fdcba8a | [
"Unlicense"
] | 3 | 2019-02-20T07:34:17.000Z | 2019-08-13T08:17:04.000Z | pxr/usdImaging/lib/usdviewq/viewSettingsDataModel.py | octarrow/USD | 1845291a9701ab0a3a7d591bc243a1a80fdcba8a | [
"Unlicense"
] | null | null | null | pxr/usdImaging/lib/usdviewq/viewSettingsDataModel.py | octarrow/USD | 1845291a9701ab0a3a7d591bc243a1a80fdcba8a | [
"Unlicense"
] | null | null | null | #
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from qt import QtCore
from pxr import UsdGeom, Sdf
from common import (RenderModes, PickModes, SelectionHighlightModes,
CameraMaskModes, Complexities, PrintWarning)
import settings2
from settings2 import StateSource
from constantGroup import ConstantGroup
from freeCamera import FreeCamera
from common import ClearColors, HighlightColors
# Map of clear color names to rgba color tuples.
_CLEAR_COLORS_DICT = {
ClearColors.BLACK: (0.0, 0.0, 0.0, 0.0),
ClearColors.DARK_GREY: (0.3, 0.3, 0.3, 0.0),
ClearColors.LIGHT_GREY: (0.7, 0.7, 0.7, 0.0),
ClearColors.WHITE: (1.0, 1.0, 1.0, 0.0)}
# Map of highlight color names to rgba color tuples.
_HIGHLIGHT_COLORS_DICT = {
HighlightColors.WHITE: (1.0, 1.0, 1.0, 0.5),
HighlightColors.YELLOW: (1.0, 1.0, 0.0, 0.5),
HighlightColors.CYAN: (0.0, 1.0, 1.0, 0.5)}
# Default values for default material components.
DEFAULT_AMBIENT = 0.2
DEFAULT_SPECULAR = 0.1
def visibleViewSetting(f):
def wrapper(self, *args, **kwargs):
f(self, *args, **kwargs)
# If f raises an exception, the signal is not emitted.
self.signalVisibleSettingChanged.emit()
self.signalSettingChanged.emit()
return wrapper
def invisibleViewSetting(f):
def wrapper(self, *args, **kwargs):
f(self, *args, **kwargs)
# If f raises an exception, the signal is not emitted.
self.signalSettingChanged.emit()
return wrapper
class ViewSettingsDataModel(QtCore.QObject, StateSource):
"""Data model containing settings related to the rendered view of a USD
file.
"""
# emitted when any view setting changes
signalSettingChanged = QtCore.Signal()
# emitted when any view setting which may affect the rendered image changes
signalVisibleSettingChanged = QtCore.Signal()
# emitted when any aspect of the defaultMaterial changes
signalDefaultMaterialChanged = QtCore.Signal()
def __init__(self, rootDataModel, parent):
QtCore.QObject.__init__(self)
StateSource.__init__(self, parent, "model")
self._rootDataModel = rootDataModel
self._cameraMaskColor = tuple(self.stateProperty("cameraMaskColor", default=[0.1, 0.1, 0.1, 1.0]))
self._cameraReticlesColor = tuple(self.stateProperty("cameraReticlesColor", default=[0.0, 0.7, 1.0, 1.0]))
self._defaultMaterialAmbient = self.stateProperty("defaultMaterialAmbient", default=DEFAULT_AMBIENT)
self._defaultMaterialSpecular = self.stateProperty("defaultMaterialSpecular", default=DEFAULT_SPECULAR)
self._redrawOnScrub = self.stateProperty("redrawOnScrub", default=True)
self._renderMode = self.stateProperty("renderMode", default=RenderModes.SMOOTH_SHADED)
self._pickMode = self.stateProperty("pickMode", default=PickModes.PRIMS)
# We need to store the trinary selHighlightMode state here,
# because the stageView only deals in True/False (because it
# cannot know anything about playback state).
self._selHighlightMode = self.stateProperty("selectionHighlightMode", default=SelectionHighlightModes.ONLY_WHEN_PAUSED)
# We store the highlightColorName so that we can compare state during
# initialization without inverting the name->value logic
self._highlightColorName = self.stateProperty("highlightColor", default="Yellow")
self._ambientLightOnly = self.stateProperty("cameraLightEnabled", default=True)
self._keyLightEnabled = self.stateProperty("keyLightEnabled", default=True)
self._fillLightEnabled = self.stateProperty("fillLightEnabled", default=True)
self._backLightEnabled = self.stateProperty("backLightEnabled", default=True)
self._clearColorText = self.stateProperty("backgroundColor", default="Grey (Dark)")
self._showBBoxPlayback = self.stateProperty("showBBoxesDuringPlayback", default=False)
self._showBBoxes = self.stateProperty("showBBoxes", default=True)
self._showAABBox = self.stateProperty("showAABBox", default=True)
self._showOBBox = self.stateProperty("showOBBox", default=True)
self._displayGuide = self.stateProperty("displayGuide", default=False)
self._displayProxy = self.stateProperty("displayProxy", default=True)
self._displayRender = self.stateProperty("displayRender", default=False)
self._displayPrimId = self.stateProperty("displayPrimId", default=False)
self._enableSceneMaterials = self.stateProperty("enableSceneMaterials", default=True)
self._cullBackfaces = self.stateProperty("cullBackfaces", default=False)
self._showInactivePrims = self.stateProperty("showInactivePrims", default=True)
self._showAllMasterPrims = self.stateProperty("showAllMasterPrims", default=False)
self._showUndefinedPrims = self.stateProperty("showUndefinedPrims", default=False)
self._showAbstractPrims = self.stateProperty("showAbstractPrims", default=False)
self._rolloverPrimInfo = self.stateProperty("rolloverPrimInfo", default=False)
self._displayCameraOracles = self.stateProperty("cameraOracles", default=False)
self._cameraMaskMode = self.stateProperty("cameraMaskMode", default=CameraMaskModes.NONE)
self._showMask_Outline = self.stateProperty("cameraMaskOutline", default=False)
self._showReticles_Inside = self.stateProperty("cameraReticlesInside", default=False)
self._showReticles_Outside = self.stateProperty("cameraReticlesOutside", default=False)
self._showHUD = self.stateProperty("showHUD", default=True)
self._showHUD_Info = self.stateProperty("showHUDInfo", default=False)
# XXX Until we can make the "Subtree Info" stats-gathering faster
# we do not want the setting to persist from session to session.
self._showHUD_Info = False
self._showHUD_Complexity = self.stateProperty("showHUDComplexity", default=True)
self._showHUD_Performance = self.stateProperty("showHUDPerformance", default=True)
self._showHUD_GPUstats = self.stateProperty("showHUDGPUStats", default=False)
self._complexity = Complexities.LOW
self._freeCamera = None
self._cameraPath = None
def onSaveState(self, state):
state["cameraMaskColor"] = list(self._cameraMaskColor)
state["cameraReticlesColor"] = list(self._cameraReticlesColor)
state["defaultMaterialAmbient"] = self._defaultMaterialAmbient
state["defaultMaterialSpecular"] = self._defaultMaterialSpecular
state["redrawOnScrub"] = self._redrawOnScrub
state["renderMode"] = self._renderMode
state["pickMode"] = self._pickMode
state["selectionHighlightMode"] = self._selHighlightMode
state["highlightColor"] = self._highlightColorName
state["cameraLightEnabled"] = self._ambientLightOnly
state["keyLightEnabled"] = self._keyLightEnabled
state["fillLightEnabled"] = self._fillLightEnabled
state["backLightEnabled"] = self._backLightEnabled
state["backgroundColor"] = self._clearColorText
state["showBBoxesDuringPlayback"] = self._showBBoxPlayback
state["showBBoxes"] = self._showBBoxes
state["showAABBox"] = self._showAABBox
state["showOBBox"] = self._showOBBox
state["displayGuide"] = self._displayGuide
state["displayProxy"] = self._displayProxy
state["displayRender"] = self._displayRender
state["displayPrimId"] = self._displayPrimId
state["enableSceneMaterials"] = self._enableSceneMaterials
state["cullBackfaces"] = self._cullBackfaces
state["showInactivePrims"] = self._showInactivePrims
state["showAllMasterPrims"] = self._showAllMasterPrims
state["showUndefinedPrims"] = self._showUndefinedPrims
state["showAbstractPrims"] = self._showAbstractPrims
state["rolloverPrimInfo"] = self._rolloverPrimInfo
state["cameraOracles"] = self._displayCameraOracles
state["cameraMaskMode"] = self._cameraMaskMode
state["cameraMaskOutline"] = self._showMask_Outline
state["cameraReticlesInside"] = self._showReticles_Inside
state["cameraReticlesOutside"] = self._showReticles_Outside
state["showHUD"] = self._showHUD
state["showHUDInfo"] = self._showHUD_Info
state["showHUDComplexity"] = self._showHUD_Complexity
state["showHUDPerformance"] = self._showHUD_Performance
state["showHUDGPUStats"] = self._showHUD_GPUstats
@property
def cameraMaskColor(self):
return self._cameraMaskColor
@cameraMaskColor.setter
@visibleViewSetting
def cameraMaskColor(self, color):
self._cameraMaskColor = color
@property
def cameraReticlesColor(self):
return self._cameraReticlesColor
@cameraReticlesColor.setter
@visibleViewSetting
def cameraReticlesColor(self, color):
self._cameraReticlesColor = color
@property
def defaultMaterialAmbient(self):
return self._defaultMaterialAmbient
@defaultMaterialAmbient.setter
@visibleViewSetting
def defaultMaterialAmbient(self, value):
if value != self._defaultMaterialAmbient:
self._defaultMaterialAmbient = value
self.signalDefaultMaterialChanged.emit()
@property
def defaultMaterialSpecular(self):
return self._defaultMaterialSpecular
@defaultMaterialSpecular.setter
@visibleViewSetting
def defaultMaterialSpecular(self, value):
if value != self._defaultMaterialSpecular:
self._defaultMaterialSpecular = value
self.signalDefaultMaterialChanged.emit()
@visibleViewSetting
def setDefaultMaterial(self, ambient, specular):
if (ambient != self._defaultMaterialAmbient
or specular != self._defaultMaterialSpecular):
self._defaultMaterialAmbient = ambient
self._defaultMaterialSpecular = specular
self.signalDefaultMaterialChanged.emit()
def resetDefaultMaterial(self):
self.setDefaultMaterial(DEFAULT_AMBIENT, DEFAULT_SPECULAR)
@property
def complexity(self):
return self._complexity
@complexity.setter
@visibleViewSetting
def complexity(self, value):
if value not in Complexities:
raise ValueError("Expected Complexity, got: '{}'.".format(value))
self._complexity = value
@property
def renderMode(self):
return self._renderMode
@renderMode.setter
@visibleViewSetting
def renderMode(self, value):
self._renderMode = value
@property
def pickMode(self):
return self._pickMode
@pickMode.setter
@invisibleViewSetting
def pickMode(self, value):
self._pickMode = value
@property
def showAABBox(self):
return self._showAABBox
@showAABBox.setter
@visibleViewSetting
def showAABBox(self, value):
self._showAABBox = value
@property
def showOBBox(self):
return self._showOBBox
@showOBBox.setter
@visibleViewSetting
def showOBBox(self, value):
self._showOBBox = value
@property
def showBBoxes(self):
return self._showBBoxes
@showBBoxes.setter
@visibleViewSetting
def showBBoxes(self, value):
self._showBBoxes = value
@property
def showBBoxPlayback(self):
return self._showBBoxPlayback
@showBBoxPlayback.setter
@visibleViewSetting
def showBBoxPlayback(self, value):
self._showBBoxPlayback = value
@property
def displayGuide(self):
return self._displayGuide
@displayGuide.setter
@visibleViewSetting
def displayGuide(self, value):
self._displayGuide = value
@property
def displayProxy(self):
return self._displayProxy
@displayProxy.setter
@visibleViewSetting
def displayProxy(self, value):
self._displayProxy = value
@property
def displayRender(self):
return self._displayRender
@displayRender.setter
@visibleViewSetting
def displayRender(self, value):
self._displayRender = value
@property
def displayCameraOracles(self):
return self._displayCameraOracles
@displayCameraOracles.setter
@visibleViewSetting
def displayCameraOracles(self, value):
self._displayCameraOracles = value
@property
def displayPrimId(self):
return self._displayPrimId
@displayPrimId.setter
@visibleViewSetting
def displayPrimId(self, value):
self._displayPrimId = value
@property
def enableSceneMaterials(self):
return self._enableSceneMaterials
@enableSceneMaterials.setter
@visibleViewSetting
def enableSceneMaterials(self, value):
self._enableSceneMaterials = value
@property
def cullBackfaces(self):
return self._cullBackfaces
@cullBackfaces.setter
@visibleViewSetting
def cullBackfaces(self, value):
self._cullBackfaces = value
@property
def showInactivePrims(self):
return self._showInactivePrims
@showInactivePrims.setter
@invisibleViewSetting
def showInactivePrims(self, value):
self._showInactivePrims = value
@property
def showAllMasterPrims(self):
return self._showAllMasterPrims
@showAllMasterPrims.setter
@invisibleViewSetting
def showAllMasterPrims(self, value):
self._showAllMasterPrims = value
@property
def showUndefinedPrims(self):
return self._showUndefinedPrims
@showUndefinedPrims.setter
@invisibleViewSetting
def showUndefinedPrims(self, value):
self._showUndefinedPrims = value
@property
def showAbstractPrims(self):
return self._showAbstractPrims
@showAbstractPrims.setter
@invisibleViewSetting
def showAbstractPrims(self, value):
self._showAbstractPrims = value
@property
def rolloverPrimInfo(self):
return self._rolloverPrimInfo
@rolloverPrimInfo.setter
@invisibleViewSetting
def rolloverPrimInfo(self, value):
self._rolloverPrimInfo = value
@property
def cameraMaskMode(self):
return self._cameraMaskMode
@cameraMaskMode.setter
@visibleViewSetting
def cameraMaskMode(self, value):
self._cameraMaskMode = value
@property
def showMask(self):
return self._cameraMaskMode in (CameraMaskModes.FULL, CameraMaskModes.PARTIAL)
@property
def showMask_Opaque(self):
return self._cameraMaskMode == CameraMaskModes.FULL
@property
def showMask_Outline(self):
return self._showMask_Outline
@showMask_Outline.setter
@visibleViewSetting
def showMask_Outline(self, value):
self._showMask_Outline = value
@property
def showReticles_Inside(self):
return self._showReticles_Inside
@showReticles_Inside.setter
@visibleViewSetting
def showReticles_Inside(self, value):
self._showReticles_Inside = value
@property
def showReticles_Outside(self):
return self._showReticles_Outside
@showReticles_Outside.setter
@visibleViewSetting
def showReticles_Outside(self, value):
self._showReticles_Outside = value
@property
def showHUD(self):
return self._showHUD
@showHUD.setter
@visibleViewSetting
def showHUD(self, value):
self._showHUD = value
@property
def showHUD_Info(self):
return self._showHUD_Info
@showHUD_Info.setter
@visibleViewSetting
def showHUD_Info(self, value):
self._showHUD_Info = value
@property
def showHUD_Complexity(self):
return self._showHUD_Complexity
@showHUD_Complexity.setter
@visibleViewSetting
def showHUD_Complexity(self, value):
self._showHUD_Complexity = value
@property
def showHUD_Performance(self):
return self._showHUD_Performance
@showHUD_Performance.setter
@visibleViewSetting
def showHUD_Performance(self, value):
self._showHUD_Performance = value
@property
def showHUD_GPUstats(self):
return self._showHUD_GPUstats
@showHUD_GPUstats.setter
@visibleViewSetting
def showHUD_GPUstats(self, value):
self._showHUD_GPUstats = value
@property
def ambientLightOnly(self):
return self._ambientLightOnly
@ambientLightOnly.setter
@visibleViewSetting
def ambientLightOnly(self, value):
self._ambientLightOnly = value
@property
def keyLightEnabled(self):
return self._keyLightEnabled
@keyLightEnabled.setter
@visibleViewSetting
def keyLightEnabled(self, value):
self._keyLightEnabled = value
@property
def fillLightEnabled(self):
return self._fillLightEnabled
@fillLightEnabled.setter
@visibleViewSetting
def fillLightEnabled(self, value):
self._fillLightEnabled = value
@property
def backLightEnabled(self):
return self._backLightEnabled
@backLightEnabled.setter
@visibleViewSetting
def backLightEnabled(self, value):
self._backLightEnabled = value
@property
def clearColorText(self):
return self._clearColorText
@clearColorText.setter
@visibleViewSetting
def clearColorText(self, value):
if value not in ClearColors:
raise ValueError("Unknown clear color: '{}'".format(value))
self._clearColorText = value
@property
def clearColor(self):
return _CLEAR_COLORS_DICT[self._clearColorText]
@property
def highlightColorName(self):
return self._highlightColorName
@highlightColorName.setter
@visibleViewSetting
def highlightColorName(self, value):
if value not in HighlightColors:
raise ValueError("Unknown highlight color: '{}'".format(value))
self._highlightColorName = value
@property
def highlightColor(self):
return _HIGHLIGHT_COLORS_DICT[self._highlightColorName]
@property
def selHighlightMode(self):
return self._selHighlightMode
@selHighlightMode.setter
@visibleViewSetting
def selHighlightMode(self, value):
if value not in SelectionHighlightModes:
raise ValueError("Unknown highlight mode: '{}'".format(value))
self._selHighlightMode = value
@property
def redrawOnScrub(self):
return self._redrawOnScrub
@redrawOnScrub.setter
@visibleViewSetting
def redrawOnScrub(self, value):
self._redrawOnScrub = value
@property
def freeCamera(self):
return self._freeCamera
@freeCamera.setter
@visibleViewSetting
def freeCamera(self, value):
if not isinstance(value, FreeCamera):
raise TypeError("Free camera must be a FreeCamera object.")
self._freeCamera = value
@property
def cameraPath(self):
return self._cameraPath
@cameraPath.setter
@visibleViewSetting
def cameraPath(self, value):
if ((not isinstance(value, Sdf.Path) or not value.IsPrimPath())
and value is not None):
raise TypeError("Expected prim path, got: {}".format(value))
self._cameraPath = value
@property
def cameraPrim(self):
if self.cameraPath is not None and self._rootDataModel.stage is not None:
return self._rootDataModel.stage.GetPrimAtPath(self.cameraPath)
else:
return None
@cameraPrim.setter
def cameraPrim(self, value):
if value is not None:
if value.IsA(UsdGeom.Camera):
self.cameraPath = value.GetPrimPath()
else:
PrintWarning("Incorrect Prim Type",
"Attempted to view the scene using the prim '%s', but "
"the prim is not a UsdGeom.Camera." % (value.GetName()))
else:
self.cameraPath = None
| 33.31962 | 127 | 0.701634 |
578846313d2a494fbae78aaf5ce361348ca92624 | 1,639 | py | Python | autoindex.py | langsci/157 | dd0f52128321748dc53171ac2c8220fff525221b | [
"CC-BY-4.0"
] | null | null | null | autoindex.py | langsci/157 | dd0f52128321748dc53171ac2c8220fff525221b | [
"CC-BY-4.0"
] | null | null | null | autoindex.py | langsci/157 | dd0f52128321748dc53171ac2c8220fff525221b | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/python3
import glob
import re
lgs=open("locallanguages.txt").read().split('\n')
terms=open("localsubjectterms.txt").read().split('\n')[::-1]#reverse to avoid double indexing
print("found %i language names for autoindexing" % len(lgs))
print("found %i subject terms for autoindexing" % len(terms))
files = glob.glob('chapters/*tex')
SUBJECTP = re.compile
for f in files:
print("indexing %s" % f)
#strip preamble of edited volume chapters to avoid indexing there
a = open(f).read().split(r"\begin{document}")
content = a[-1]
preamble = ''
joiner = ''
if len(a) == 2:
preamble = a[0]
joiner = r"\begin{document}"
lines = content.split('\n')
excluders = ("section","caption","chapter")
newlines = []
for line in lines:
included = True
for excluder in excluders:
if "%s{"%excluder in line:
included = False
print line
if included:
for lg in lgs:
lg = lg.strip()
if lg == '':
continue
line = re.sub('(?<!ili{)%s(?![\w}])'%lg, '\ili{%s}'%lg, line)
for term in terms:
term = term.strip()
if term == '':
continue
line = re.sub('(?<!isi{|...[A-Za-z])%s(?![-_\w}])'%term, '\isi{%s}'%term, line)
newlines.append(line)
content = "\n".join(newlines)
nlg = len(re.findall('\\ili{',content))
nt = len(re.findall('\\isi{',content))
outfile = open(f.replace('chapters','indexed'), 'w')
outfile.write(preamble)
outfile.write(joiner)
outfile.write(content)
outfile.close()
print(" %s now contains %i indexed languages and %i indexed subject terms"%(f.split('/')[-1],nlg,nt))
print("indexed files are in the folder 'indexed'")
| 27.779661 | 103 | 0.621721 |
60281c591b5d97bab8ad22fcad757fa0ea10a6dc | 452 | py | Python | Python/5. Math/5.1 Polar Coordinates.py | clago7/HackerRank-Python-Practice | 048bbdad2aef090eda39eb02c1e0287284000785 | [
"MIT"
] | null | null | null | Python/5. Math/5.1 Polar Coordinates.py | clago7/HackerRank-Python-Practice | 048bbdad2aef090eda39eb02c1e0287284000785 | [
"MIT"
] | null | null | null | Python/5. Math/5.1 Polar Coordinates.py | clago7/HackerRank-Python-Practice | 048bbdad2aef090eda39eb02c1e0287284000785 | [
"MIT"
] | null | null | null | # Problem: https://www.hackerrank.com/challenges/polar-coordinates/problem
import cmath
input_str = input()
if '+' in input_str:
sign_pos = input_str.find('+')
else:
if input_str[0] == '-':
sign_pos = input_str[1:].find('-') + 1
else: sign_pos = input_str.find('-')
j_pos = input_str.find('j')
x = int(input_str[:sign_pos])
y = int(input_str[sign_pos:j_pos])
print(abs(complex(x,y)))
print(cmath.phase(complex(x,y))) | 25.111111 | 74 | 0.643805 |
497e80c0572568e6166335c5ea205577fc984d96 | 20,039 | py | Python | scripts_coco/test_retrieval.py | nganltp/admicro-LaSO | 857d67a40af437ab57068fb0de35e4ada56c6209 | [
"BSD-3-Clause"
] | 83 | 2019-04-14T06:58:15.000Z | 2022-03-01T01:34:03.000Z | scripts_coco/test_retrieval.py | leokarlin/LaSO | 8941bdc9316361ad03dbc2bcabd4bf9922c0ecc7 | [
"BSD-3-Clause"
] | 17 | 2019-04-28T04:26:24.000Z | 2022-01-19T15:37:42.000Z | scripts_coco/test_retrieval.py | nganltp/admicro-LaSO | 857d67a40af437ab57068fb0de35e4ada56c6209 | [
"BSD-3-Clause"
] | 15 | 2019-09-05T04:22:10.000Z | 2022-01-13T15:31:25.000Z | """Calculate retrieval on the seen classes of COCO."""
import logging
from more_itertools import chunked
import numpy as np
from pathlib import Path
import pickle
from tqdm import tqdm
from joblib import Parallel, delayed
import torch
torch.backends.cudnn.benchmark = True
from torch.utils.data import DataLoader
from torchvision import transforms
from sklearn.neighbors import BallTree
from scipy.spatial import KDTree
from traitlets import Bool, Enum, Float, Int, Unicode
from oneshot import setops_models
from oneshot.setops_models import Inception3
from oneshot import alfassy
from oneshot.coco import copy_coco_data
from experiment import Experiment
from CCC import setupCUDAdevice
from ignite._utils import convert_tensor
setupCUDAdevice()
cuda = True if torch.cuda.is_available() else False
device = 'cuda'
#
# Seed the random states
#
np.random.seed(0)
random_state = np.random.RandomState(0)
def _prepare_batch(batch, device=None):
return [convert_tensor(x, device=device) for x in batch]
def calc_IOU(y, y_pred):
"""Calculate Intersection Over Union between two multi labels vectors."""
y = y.astype(np.uint8)
y_pred = y_pred.astype(np.uint8)
support = (y + y_pred) > 0.5
correct = np.equal(y_pred, y)[support]
return correct.sum() / (support.sum() + 1e-6)
def label2hash(label):
hash = "".join([chr(i) for i in np.where(label==1)[0]])
return hash
class Main(Experiment):
description = Unicode(u"Calculate retrieval of trained coco model.")
#
# Run setup
#
batch_size = Int(256, config=True, help="Batch size. default: 256")
num_workers = Int(8, config=True, help="Number of workers to use for data loading. default: 8")
n_jobs = Int(-1, config=True, help="Number of workers to use for data loading. default: -1")
device = Unicode("cuda", config=True, help="Use `cuda` backend. default: cuda")
#
# Hyper parameters.
#
unseen = Bool(False, config=True, help="Test on unseen classes.")
skip_tests = Int(1, config=True, help="How many test pairs to skip? for better runtime. default: 1")
debug_size = Int(-1, config=True, help="Reduce dataset sizes. This is useful when developing the script. default -1")
#
# Resume previous run parameters.
#
resume_path = Unicode(u"/dccstor/alfassy/finalLaSO/code_release/paperModels", config=True,
help="Resume from checkpoint file (requires using also '--resume_epoch'.")
resume_epoch = Int(0, config=True, help="Epoch to resume (requires using also '--resume_path'.")
coco_path = Unicode(u"/tmp/aa/coco", config=True, help="path to local coco dataset path")
init_inception = Bool(True, config=True, help="Initialize the inception networks using paper's network.")
#
# Network hyper parameters
#
base_network_name = Unicode("Inception3", config=True, help="Name of base network to use.")
avgpool_kernel = Int(10, config=True,
help="Size of the last avgpool layer in the Resnet. Should match the cropsize.")
classifier_name = Unicode("Inception3Classifier", config=True, help="Name of classifier to use.")
sets_network_name = Unicode("SetOpsResModule", config=True, help="Name of setops module to use.")
sets_block_name = Unicode("SetopResBlock_v1", config=True, help="Name of setops network to use.")
sets_basic_block_name = Unicode("SetopResBasicBlock", config=True,
help="Name of the basic setops block to use (where applicable).")
ops_layer_num = Int(1, config=True, help="Ops Module layer num.")
ops_latent_dim = Int(1024, config=True, help="Ops Module inner latent dim.")
setops_dropout = Float(0, config=True, help="Dropout ratio of setops module.")
crop_size = Int(299, config=True, help="Size of input crop (Resnet 224, inception 299).")
scale_size = Int(350, config=True, help="Size of input scale for data augmentation. default: 350")
paper_reproduce = Bool(False, config=True, help="Use paper reproduction settings. default: False")
#
# Metric
#
tree_type = Enum(("BallTree", "KDTree"), config=True, default_value="BallTree",
help="The Nearest-Neighbour algorithm to use. Default='BallTree'.")
metric = Enum(("manhattan", "minkowski"), config=True, default_value="minkowski",
help="The distance metric to use for the BallTree. Default='minkowski'.")
def run(self):
#
# Setup the model
#
base_model, classifier, setops_model = self.setup_model()
base_model.to(self.device)
classifier.to(self.device)
setops_model.to(self.device)
base_model.eval()
classifier.eval()
setops_model.eval()
#
# Load the dataset
#
val_loader, pair_loader, pair_loader_sub = self.setup_datasets()
val_labels, val_outputs = self.embed_dataset(base_model, val_loader)
self.val_labels_set = set([label2hash(label) for label in val_labels])
logging.info("Calculate the embedding NN {}.".format(self.tree_type))
if self.tree_type == "BallTree":
tree = BallTree(val_outputs, metric=self.metric)
else:
tree = KDTree(val_outputs)
#
# Run the testing
#
logging.info("Calculate test set embedding.")
a_S_b_list, b_S_a_list, a_U_b_list, b_U_a_list, a_I_b_list, b_I_a_list = [], [], [], [], [], []
target_a_I_b_list, target_a_U_b_list, target_a_S_b_list, target_b_S_a_list = [], [], [], []
embed_a_list, embed_b_list, target_a_list, target_b_list = [], [], [], []
ids_a_list, ids_b_list = [], []
with torch.no_grad():
for batch in tqdm(pair_loader):
input_a, input_b, target_a, target_b, id_a, id_b = _prepare_batch(batch, device=self.device)
ids_a_list.append(id_a.cpu().numpy())
ids_b_list.append(id_b.cpu().numpy())
#
# Apply the classification model
#
embed_a = base_model(input_a).view(input_a.size(0), -1)
embed_b = base_model(input_b).view(input_b.size(0), -1)
#
# Apply the setops model.
#
outputs_setopt = setops_model(embed_a, embed_b)
a_S_b, b_S_a, a_U_b, b_U_a, a_I_b, b_I_a = \
outputs_setopt[2:8]
embed_a_list.append(embed_a.cpu().numpy())
embed_b_list.append(embed_b.cpu().numpy())
a_S_b_list.append(a_S_b.cpu().numpy())
b_S_a_list.append(b_S_a.cpu().numpy())
a_U_b_list.append(a_U_b.cpu().numpy())
b_U_a_list.append(b_U_a.cpu().numpy())
a_I_b_list.append(a_I_b.cpu().numpy())
b_I_a_list.append(b_I_a.cpu().numpy())
#
# Calculate the target setops operations
#
target_a_list.append(target_a.cpu().numpy())
target_b_list.append(target_b.cpu().numpy())
target_a = target_a.type(torch.cuda.ByteTensor)
target_b = target_b.type(torch.cuda.ByteTensor)
target_a_I_b = target_a & target_b
target_a_U_b = target_a | target_b
target_a_S_b = target_a & ~target_a_I_b
target_b_S_a = target_b & ~target_a_I_b
target_a_I_b_list.append(target_a_I_b.type(torch.cuda.FloatTensor).cpu().numpy())
target_a_U_b_list.append(target_a_U_b.type(torch.cuda.FloatTensor).cpu().numpy())
target_a_S_b_list.append(target_a_S_b.type(torch.cuda.FloatTensor).cpu().numpy())
target_b_S_a_list.append(target_b_S_a.type(torch.cuda.FloatTensor).cpu().numpy())
ids_a_all = np.concatenate(ids_a_list, axis=0)
ids_b_all = np.concatenate(ids_b_list, axis=0)
del ids_a_list, ids_b_list
a_S_b_list, b_S_a_list = [], []
target_a_S_b_list, target_b_S_a_list = [], []
ids_a_list, ids_b_list = [], []
with torch.no_grad():
for batch in tqdm(pair_loader_sub):
input_a, input_b, target_a, target_b, id_a, id_b = _prepare_batch(batch, device=self.device)
ids_a_list.append(id_a.cpu().numpy())
ids_b_list.append(id_b.cpu().numpy())
#
# Apply the classification model
#
embed_a = base_model(input_a).view(input_a.size(0), -1)
embed_b = base_model(input_b).view(input_b.size(0), -1)
#
# Apply the setops model.
#
outputs_setopt = setops_model(embed_a, embed_b)
a_S_b, b_S_a, a_U_b, b_U_a, a_I_b, b_I_a = \
outputs_setopt[2:8]
a_S_b_list.append(a_S_b.cpu().numpy())
b_S_a_list.append(b_S_a.cpu().numpy())
#
# Calculate the target setops operations
#
target_a = target_a.type(torch.cuda.ByteTensor)
target_b = target_b.type(torch.cuda.ByteTensor)
target_a_I_b = target_a & target_b
target_a_S_b = target_a & ~target_a_I_b
target_b_S_a = target_b & ~target_a_I_b
target_a_S_b_list.append(target_a_S_b.type(torch.cuda.FloatTensor).cpu().numpy())
target_b_S_a_list.append(target_b_S_a.type(torch.cuda.FloatTensor).cpu().numpy())
ids_a_sub = np.concatenate(ids_a_list, axis=0)
ids_b_sub = np.concatenate(ids_b_list, axis=0)
def score_outputs(output_chunk, target_chunk, ids_a_chunk, ids_b_chunk, val_labels, K=5):
_, inds_chunk = tree.query(np.array(output_chunk), k=K+2)
ious = []
inds_list = []
input_ids_list = []
targets_list = []
for target, inds, id_a, id_b in zip(target_chunk, inds_chunk, ids_a_chunk, ids_b_chunk):
#
# Verify that the target label exists in the validation dataset.
#
if label2hash(target) not in self.val_labels_set:
continue
#
# Verify that didn't return one of the original vectors.
#
inds = inds.flatten()
ids = [val_loader.dataset.image_ids[i] for i in inds]
banned_ids = {id_a, id_b}
inds_ok = []
for i, id_ in enumerate(ids):
if id_ in banned_ids:
continue
inds_ok.append(inds[i])
#
# Calculate the IOU for different k
#
ious_k = []
for k in (1, 3, 5):
inds_k = list(inds_ok[:k])
ious_k.append(np.max([calc_IOU(target, val_labels[i]) for i in inds_k]))
ious.append(ious_k)
inds_list.append(inds_ok[:K])
input_ids_list.append([id_a, id_b])
targets_list.append(target)
return ious, inds_list, input_ids_list, targets_list
#
# Output results
#
logging.info("Calculate scores.")
results_path = Path(self.results_path)
for outputs, targets, ids_a, ids_b, name in zip(
(a_S_b_list, b_S_a_list, a_U_b_list, b_U_a_list, a_I_b_list, b_I_a_list, embed_a_list, embed_b_list),
(target_a_S_b_list, target_b_S_a_list, target_a_U_b_list, target_a_U_b_list, target_a_I_b_list,
target_a_I_b_list, target_a_list, target_b_list),
(ids_a_sub, ids_a_sub, ids_a_all, ids_a_all, ids_a_all, ids_a_all, ids_a_all, ids_a_all),
(ids_b_sub, ids_b_sub, ids_b_all, ids_b_all, ids_b_all, ids_b_all, ids_b_all, ids_b_all),
("a_S_b", "b_S_a", "a_U_b", "b_U_a", "a_I_b", "b_I_a", "a", "b")):
outputs = np.concatenate(outputs, axis=0)
targets = np.concatenate(targets, axis=0)
# res = Parallel(n_jobs=-1)(
res = Parallel(n_jobs=1)(
delayed(score_outputs)(output_chunk, target_chunk, ids_a_chunk, ids_b_chunk, val_labels) \
for output_chunk, target_chunk, ids_a_chunk, ids_b_chunk in \
zip(chunked(outputs[::self.skip_tests], 200), chunked(targets[::self.skip_tests], 200),
chunked(ids_a[::self.skip_tests], 200), chunked(ids_b[::self.skip_tests], 200))
)
ious, inds_list, input_ids_list, targets_list = list(zip(*res))
ious = np.concatenate(ious, axis=0)
selected_inds = np.concatenate(inds_list, axis=0)
input_ids = np.concatenate(input_ids_list, axis=0)
targets = np.concatenate(targets_list, axis=0)
del inds_list, input_ids_list, targets_list
with (results_path / "results_{}.pkl".format(name)).open("wb") as f:
pickle.dump(dict(ious=ious, selected_inds=selected_inds, input_ids=input_ids, targets=targets), f)
logging.info(
'Test {} average recall (k=1, 3, 5): {}'.format(
name, np.mean(ious, axis=0)
)
)
def embed_dataset(self, base_model, val_loader):
"""Calculate the validation embedding.
Args:
base_model:
val_loader:
Returns:
"""
logging.info("Calculate the validation embeddings.")
val_outputs = []
val_labels = []
with torch.no_grad():
for batch in tqdm(val_loader):
input_, labels = convert_tensor(batch, device=self.device)
if self.paper_reproduce:
embed = torch.tanh(base_model(input_))
else:
embed = base_model(input_)
val_outputs.append(embed.cpu().numpy())
val_labels.append(labels.cpu().numpy())
val_outputs = np.concatenate(val_outputs, axis=0)
val_labels = np.concatenate(val_labels, axis=0)
return val_labels, val_outputs
def setup_datasets(self):
"""Load the training datasets."""
logging.info("Setting up the datasets.")
# TODO: comment out if you don't want to copy coco to /tmp/aa
# copy_coco_data()
CocoDatasetPairs = getattr(alfassy, "CocoDatasetPairs")
CocoDatasetPairsSub = getattr(alfassy, "CocoDatasetPairsSub")
if self.paper_reproduce:
scaler = transforms.Scale((350, 350))
else:
scaler = transforms.Resize(self.crop_size)
val_transform = transforms.Compose(
[
scaler,
transforms.CenterCrop(self.crop_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
]
)
CocoDataset = getattr(alfassy, "CocoDataset")
val_dataset = CocoDataset(
root_dir=self.coco_path,
set_name='val2014',
unseen_set=self.unseen,
transform=val_transform,
debug_size=self.debug_size
)
val_loader = DataLoader(
val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers
)
pair_dataset = CocoDatasetPairs(
root_dir=self.coco_path,
set_name='val2014',
unseen_set=self.unseen,
transform=val_transform,
return_ids=True,
debug_size=self.debug_size
)
pair_loader = DataLoader(
pair_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers
)
pair_dataset_sub = CocoDatasetPairsSub(
root_dir=self.coco_path,
set_name='val2014',
unseen_set=self.unseen,
transform=val_transform,
return_ids=True,
debug_size=self.debug_size
)
pair_loader_sub = DataLoader(
pair_dataset_sub,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers
)
return val_loader, pair_loader, pair_loader_sub
def setup_model(self):
"""Create or resume the models."""
logging.info("Setup the models.")
logging.info("{} model".format(self.base_network_name))
models_path = Path(self.resume_path)
if self.base_network_name.lower().startswith("resnet"):
base_model, classifier = getattr(setops_models, self.base_network_name)(
num_classes=80,
avgpool_kernel=self.avgpool_kernel
)
else:
base_model = Inception3(aux_logits=False, transform_input=True)
classifier = getattr(setops_models, self.classifier_name)(num_classes=80)
if self.init_inception:
logging.info("Initialize inception model using paper's networks.")
checkpoint = torch.load(models_path / 'paperBaseModel')
base_model = Inception3(aux_logits=False, transform_input=True)
base_model.load_state_dict(
{k: v for k, v in checkpoint["state_dict"].items() if k in base_model.state_dict()}
)
classifier.load_state_dict(
{k: v for k, v in checkpoint["state_dict"].items() if k in classifier.state_dict()}
)
setops_model_cls = getattr(setops_models, self.sets_network_name)
setops_model = setops_model_cls(
input_dim=2048,
S_latent_dim=self.ops_latent_dim, S_layers_num=self.ops_layer_num,
I_latent_dim=self.ops_latent_dim, I_layers_num=self.ops_layer_num,
U_latent_dim=self.ops_latent_dim, U_layers_num=self.ops_layer_num,
block_cls_name=self.sets_block_name, basic_block_cls_name=self.sets_basic_block_name,
dropout_ratio=self.setops_dropout,
)
if self.resume_path:
logging.info("Resuming the models.")
if not self.init_inception:
base_model.load_state_dict(
torch.load(sorted(models_path.glob("networks_base_model_{}*.pth".format(self.resume_epoch)))[-1])
)
classifier.load_state_dict(
torch.load(sorted(models_path.glob("networks_classifier_{}*.pth".format(self.resume_epoch)))[-1])
)
if self.paper_reproduce:
logging.info("using paper models")
setops_model_cls = getattr(setops_models, "SetOpsModulePaper")
setops_model = setops_model_cls(models_path)
else:
setops_model.load_state_dict(
torch.load(
sorted(
models_path.glob("networks_setops_model_{}*.pth".format(self.resume_epoch))
)[-1]
)
)
return base_model, classifier, setops_model
if __name__ == "__main__":
main = Main()
main.initialize()
main.start()
| 39.759921 | 122 | 0.579221 |
9d5ff762cb7d91e6dedf07a289adcb3743fc455c | 4,623 | py | Python | milpool/MIL_distributions.py | knutdrand/milpool | 6d33c6eb4d3bbdd8d95fce6a7006c43d9d939026 | [
"MIT"
] | null | null | null | milpool/MIL_distributions.py | knutdrand/milpool | 6d33c6eb4d3bbdd8d95fce6a7006c43d9d939026 | [
"MIT"
] | null | null | null | milpool/MIL_distributions.py | knutdrand/milpool | 6d33c6eb4d3bbdd8d95fce6a7006c43d9d939026 | [
"MIT"
] | null | null | null | from .reparametrization import Reparametrization, reparametrize
from .distributions import MixtureXY
import numpy as np
from scipy.special import logsumexp
from numpy import logaddexp
import torch
class MILXY(MixtureXY):
q: float = torch.tensor(0.5)
group_size: float = 10
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params = tuple(list(self.params) + [self.q])
def sample(self, n=1):
y = torch.bernoulli(self.q*torch.ones(n)[:, None])
z = torch.bernoulli(y*torch.ones(self.group_size)*self.w)
mu = self.mu_1*z+self.mu_2*(1-z)
return torch.normal(mu, self.sigma), y
def log_likelihood(self, x, y, mu_1, mu_2, sigma, w, q):
L1 = w*(1/np.sqrt(2*np.pi)/sigma*torch.exp(-(x-mu_1)**2/(2*sigma**2)))
L2 = (1-w)*(1/np.sqrt(2*np.pi)/sigma*torch.exp(-(x-mu_2)**2/(2*sigma**2)))
l_posY = torch.log(L1+L2).sum(axis=-1)
l_negY = (np.log(1/np.sqrt(2*np.pi))-torch.log(sigma) - (x-mu_2)**2/(2*sigma**2)).sum(axis=-1)
y = y.ravel()
return y*(torch.log(q) + l_posY) + (1-y)*(l_negY+torch.log(1-q))
class MILX(MILXY):
def sample(self, n=1):
return super().sample(n)[:1]
def log_likelihood(self, x, mu_1, mu_2, sigma, w, q):
L1 = w*(1/np.sqrt(2*np.pi)/sigma*torch.exp(-(x-mu_1)**2/(2*sigma**2)))
L2 = (1-w)*(1/np.sqrt(2*np.pi)/sigma*torch.exp(-(x-mu_2)**2/(2*sigma**2)))
l_posY = torch.log(L1+L2).sum(axis=-1)
l_negY = (np.log(1/np.sqrt(2*np.pi))-torch.log(sigma) - (x-mu_2)**2/(2*sigma**2)).sum(axis=-1)
return torch.logaddexp(torch.log(q) + l_posY, l_negY+torch.log(1-q))
class MILConditional(MILXY):
def log_likelihood(self, x, y, mu_1, mu_2, sigma, w, q):
return MILXY().log_likelihood(x, y, mu_1, mu_2, sigma, w, q)-MILX().log_likelihood(x, mu_1, mu_2, sigma, w, q)
class PureConditional(MILConditional):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
mu_1, mu_2, sigma, w, q = self.params
self.params = ((mu_2**2-mu_1**2)/(2*sigma**2)+torch.log(w/(1-w)),
(mu_1-mu_2)/sigma**2, w, q)
def log_likelihood(self, x, y, alpha, beta, w, q):
eta = alpha+beta*x
p = torch.sigmoid(eta)
# l_pos = torch.log(q) + torch.sum(
# torch.log(torch.sigmoid(eta)*w/(q*w)+torch.sigmoid(-eta)*(1-w)/(1-q*w)), axis=-1)
# l_pos = torch.log(q) + torch.sum(
# torch.log(torch.sigmoid(eta)*w/(q*w)+torch.sigmoid(-eta)*(1-w)/(1-q*w)), axis=-1)
inside_the_sum = torch.logaddexp(torch.log(1-w), torch.log(1/w*q-2-w) + torch.log(p))
inside_the_sum = torch.logaddexp(torch.log(1-p)+torch.log(1-w), torch.log(p)+torch.log(1/q-w)-1)
l_pos = torch.log(q) + torch.sum(inside_the_sum, axis=-1)
l_neg = torch.log(1-q) + torch.sum(torch.log(1-p), axis=-1)
print(l_pos.mean(axis=-1), l_neg.mean(axis=-1))
#l_neg = torch.log(1-q) + torch.sum(torch.log(torch.sigmoid(-eta)/(1-q*w)), axis=-1)
l = torch.logaddexp(l_pos, l_neg)
print(l.mean(axis=-1))
y = y.ravel()
total = y*l_pos+(1-y)*l_neg-l
print(total.mean())
return total
dists = (MILX, MILXY, MILConditional)
MidPointReparam = Reparametrization(
old_to_new=(lambda mu_0, mu_1, sigma, w, q: (mu_0+mu_1)/2-sigma**2*torch.log(w/(1-w))/(2*(mu_0-mu_1)),
lambda mu_0, mu_1, _, __, q: (mu_0-mu_1),
lambda _, __, sigma, w, q: sigma**2*torch.log(w/(1-w)),
lambda _, __, sigma, w, q: sigma**2,
lambda _, __, sigma, w, q: q),
new_to_old=(lambda eta_0, eta_1, eta_2, eta_3, q: eta_0 + eta_2/(2*eta_1) + eta_1/2,
lambda eta_0, eta_1, eta_2, eta_3, q: eta_0 + eta_2/(2*eta_1) -eta_1/2,
lambda eta_0, eta_1, eta_2, eta_3, q: torch.sqrt(eta_3),
lambda eta_0, eta_1, eta_2, eta_3, q: torch.sigmoid(eta_2/eta_3),
lambda eta_0, eta_1, eta_2, eta_3, q: q))
PureMidPointReparam = Reparametrization(
old_to_new=(lambda alpha, beta, w, q: -alpha/beta,
lambda alpha, beta, w, q: beta,
lambda alpha, beta, w, q: w,
lambda alpha, beta, w, q: q),
new_to_old=(lambda eta_0, eta_1, w, q: -eta_0*eta_1,
lambda eta_0, eta_1, w, q: eta_1,
lambda eta_0, eta_1, w, q: w,
lambda eta_0, eta_1, w, q: q))
reparam_dists = tuple(reparametrize(cls, MidPointReparam) for cls in dists)
RPPureConditional = reparametrize(PureConditional, PureMidPointReparam)
| 44.883495 | 118 | 0.584253 |
ce844fdfcd9af08293173cb6bf24cbe933285e64 | 2,208 | py | Python | venv/lib/python2.7/site-packages/ebcli/controllers/codesource.py | zwachtel11/fruitful-backend | 45b8994917182e7b684b9e25944cc79c9494c9f3 | [
"MIT"
] | 4 | 2018-04-19T19:56:53.000Z | 2021-06-28T19:53:41.000Z | venv/lib/python2.7/site-packages/ebcli/controllers/codesource.py | zwachtel11/fruitful-backend | 45b8994917182e7b684b9e25944cc79c9494c9f3 | [
"MIT"
] | 1 | 2017-04-27T12:06:05.000Z | 2017-04-27T12:06:05.000Z | venv/lib/python2.7/site-packages/ebcli/controllers/codesource.py | zwachtel11/fruitful-backend | 45b8994917182e7b684b9e25944cc79c9494c9f3 | [
"MIT"
] | 4 | 2016-10-12T23:54:55.000Z | 2020-07-25T23:28:25.000Z | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ..lib import utils
from ..core import io
from ..core.abstractcontroller import AbstractBaseController
from ..resources.strings import strings, flag_text, prompts
from ..operations import gitops
class CodeSourceController(AbstractBaseController):
class Meta(AbstractBaseController.Meta):
label = 'codesource'
description = strings['codesource.info']
arguments = [
(['sourcename'], dict(action='store', nargs='?',
help=flag_text['codesource.sourcename'],
choices=['codecommit', 'local'], type=str.lower)),
]
usage = 'eb codesource <sourcename> [options ...]'
def do_command(self):
sourcename = self.app.pargs.sourcename
if sourcename is not None:
if sourcename == 'local':
gitops.print_current_codecommit_settings()
self.set_local()
if sourcename == 'codecommit':
self.set_codecommit()
else:
self.prompt_for_codesource()
def prompt_for_codesource(self):
gitops.print_current_codecommit_settings()
io.echo(prompts['codesource.codesourceprompt'])
setup_choices = ['CodeCommit', 'Local']
choice = utils.prompt_for_item_in_list(setup_choices, 2)
if choice == setup_choices[0]:
self.set_codecommit()
elif choice == setup_choices[1]:
self.set_local()
def set_local(self):
gitops.disable_codecommit()
io.echo(strings['codesource.localmsg'])
def set_codecommit(self):
gitops.initialize_codecommit()
| 37.423729 | 84 | 0.653533 |
06529b39cccb1af62c6ff9b8121ab72236bba2f1 | 6,269 | py | Python | client_mining_p/blockchain.py | LeTanque/Blockchain | fcb370107040b12b36afe807c3579d979087f71a | [
"MIT"
] | null | null | null | client_mining_p/blockchain.py | LeTanque/Blockchain | fcb370107040b12b36afe807c3579d979087f71a | [
"MIT"
] | null | null | null | client_mining_p/blockchain.py | LeTanque/Blockchain | fcb370107040b12b36afe807c3579d979087f71a | [
"MIT"
] | null | null | null | import hashlib
import json
from time import time
from uuid import uuid4
from flask import Flask, jsonify, request, render_template
class Blockchain(object):
def __init__(self):
self.chain = []
self.current_transactions = []
# Create the genesis block
self.new_block(previous_hash="Californication", proof=100)
def new_block(self, proof, previous_hash=None):
# Create a new Block in the Blockchain
# A block should have:
# * Index
# * Timestamp
# * List of current transactions
# * The proof used to mine this block
# * The hash of the previous block
# :param proof: <int> The proof given by the Proof of Work algorithm
# :param previous_hash: (Optional) <str> Hash of previous Block
# :return: <dict> New Block
block = {
"index": len(self.chain) + 1,
"timestamp": time(),
"transactions": self.current_transactions,
"proof": proof,
"previous_hash": previous_hash or self.hash(self.chain[-1]),
}
# Reset the current list of transactions
self.current_transactions = []
# Append the block to the chain
self.chain.append(block)
# Return the new block
return block
def hash(self, block):
# Creates a SHA-256 hash of a Block
# :param block": <dict> Block
# "return": <str>
# Use json.dumps to convert json into a string
# Use hashlib.sha256 to create a hash
# It requires a `bytes-like` object, which is what
# .encode() does.
# It converts the Python string into a byte string.
# We must make sure that the Dictionary is Ordered,
# or we'll have inconsistent hashes
# TODO: Create the block_string
string_object = json.dumps(block, sort_keys=True)
block_string = string_object.encode()
# TODO: Hash this string using sha256
raw_hash = hashlib.sha256(block_string)
# By itself, the sha256 function returns the hash in a raw string
# that will likely include escaped characters.
# This can be hard to read, but .hexdigest() converts the
# hash to a string of hexadecimal characters, which is
# easier to work with and understand
hex_hash = raw_hash.hexdigest()
# TODO: Return the hashed block string in hexadecimal format
return hex_hash
@property
def last_block(self):
return self.chain[-1]
def proof_of_work(self, last_block):
# Simple Proof of Work Algorithm
# Stringify the block and look for a proof.
# Loop through possibilities, checking each one against `valid_proof`
# in an effort to find a number that is a valid proof
# :return: A valid proof for the provided block
block_string = json.dumps(last_block, sort_keys=True)
proof = 0
while self.valid_proof(block_string, proof) is False:
proof += 1
return proof
@staticmethod
def valid_proof(block_string, proof):
# Validates the Proof: Does hash(block_string, proof) contain 3
# leading zeroes? Return true if the proof is valid
# :param block_string: <string> The stringified block to use to
# check in combination with `proof`
# :param proof: <int?> The value that when combined with the
# stringified previous block results in a hash that has the
# correct number of leading zeroes.
# :return: True if the resulting hash is a valid proof, False otherwise
guess = f"{block_string}{proof}".encode()
guess_hash = hashlib.sha256(guess).hexdigest()
# # Increase the index and "0" here to increase the difficulty
return guess_hash[:4] == "0000"
# Instantiate our Node
app = Flask(__name__)
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# Instantiate the Blockchain
blockchain = Blockchain()
@app.route('/', methods=['GET'])
def server_hello():
response = "Hello! Welcome to blockchain server!!!"
return f" \
<body> \
<h2>Hi</h2> \
<p>{response}</p> \
</body>", 200
@app.route("/thingaroo", methods=["POST"])
def.thing():
last_block = blockchain.last_block
last_block_string = json.dumps(last_block, sort_keys=True)
if blockchain.valid_proof(last_block_string, proof):
# Forge new
previous_hash = blockchain.hash(blockchain.last_block)
new_block = blockchain.new_block(proof, previous_hash)
response = {
"block": new_block
}
return jsonify(response), 200
@app.route('/mine', methods=['GET'])
def mine():
# Run the proof of work algorithm to get the next proof
proof = blockchain.proof_of_work(blockchain.last_block)
# Forge the new Block by adding it to the chain with the proof
previous_hash = blockchain.hash(blockchain.last_block)
new_kid_on_the_block = blockchain.new_block(proof, previous_hash)
response = {
**new_kid_on_the_block
}
return jsonify(response), 200
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
# TODO: Return the chain and its current length
"chain": blockchain.chain,
"length": len(blockchain.chain)
}
return jsonify(response), 200
@app.route('/last', methods=['GET'])
def last_block():
response = {
"last_block": blockchain.last_block
}
return jsonify(response), 200
@app.route('/minechain', methods=['GET'])
def mine_chain():
# Run the proof of work algorithm to get the next proof
proof = blockchain.proof_of_work(blockchain.last_block)
# Forge the new Block by adding it to the chain with the proof
previous_hash = blockchain.hash(blockchain.last_block)
nkotb = blockchain.new_block(proof, previous_hash)
previous_hashes = [x for x in blockchain.chain]
length = len(blockchain.chain)
return render_template("show_view.html", previous_hashes=previous_hashes, nkotb=nkotb, length=length), 200
# Run the program on port 5000
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 32.148718 | 110 | 0.644441 |
85277af1637c7d674423f44d3980a93a6321a524 | 1,905 | py | Python | tests/data/test_Function.py | IavTavares/Practicing-Unit-Testing-and-CI | d1b1d25d8b25fda4f5713211ab2264bcd7d2db92 | [
"MIT"
] | null | null | null | tests/data/test_Function.py | IavTavares/Practicing-Unit-Testing-and-CI | d1b1d25d8b25fda4f5713211ab2264bcd7d2db92 | [
"MIT"
] | null | null | null | tests/data/test_Function.py | IavTavares/Practicing-Unit-Testing-and-CI | d1b1d25d8b25fda4f5713211ab2264bcd7d2db92 | [
"MIT"
] | null | null | null | import sys
# adding data folder to the system path
pathname=r"src/data"
# do not break the string above with \ it will not work...
if not pathname in sys.path:
sys.path.insert(0,pathname)
# append or insert will always add a path
import pytest
from Function import string_to_int,string_to_int_2
class TestStringToInt:
def test_on_string_to_int(self):
assert string_to_int("20.1")==20
def test_on_first_print(self,capsys):
assert string_to_int("asdol") is None
captured = capsys.readouterr()
assert captured.out ==("Variable is not convertible to a number\n")
def test_on_second_print(self,capsys):
assert string_to_int({"asdol"}) is None
captured = capsys.readouterr()
assert captured.out == "Wrong Type\n"
class TestStringToInt2:
def test_on_string_to_int(self):
assert string_to_int_2("20.1")==20
def test_on_first_error_raised(self):
with pytest.raises(ValueError) as value_info:
string_to_int_2("asdol")
assert value_info.match("Variable is not convertible to a number")
def test_on_second_error_raised(self):
with pytest.raises(TypeError) as type_info:
string_to_int_2({"asdol"})
assert type_info.match("Wrong Type")
@pytest.mark.xfail(reason="Test-driven Dev") # we expect the next test to fail
def test_expected_to_fail(self):
assert string_to_int_2({"asdol"}) is None
@pytest.mark.skipif(sys.version_info>=(3,9,6),
reason="requires Pytho 3.9.5 or lower")
# we expect the next test to not be run
# on Python 3.9.6 and higher
def test_expected_to_skip(self):
assert string_to_int_2({"asdol"}) is None
# !cd C:\path
# we always need ! to run shell commands in IPython console
| 34.017857 | 83 | 0.649344 |
0d024dc57d2a5a5f9978c48222e9b5e18e7a4efc | 11,416 | py | Python | tests/membership.py | Perfumiste777/CCF | d3ef3e88b8997d7e1b033f687e45f0de17f26ce6 | [
"Apache-2.0"
] | null | null | null | tests/membership.py | Perfumiste777/CCF | d3ef3e88b8997d7e1b033f687e45f0de17f26ce6 | [
"Apache-2.0"
] | null | null | null | tests/membership.py | Perfumiste777/CCF | d3ef3e88b8997d7e1b033f687e45f0de17f26ce6 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import http
import infra.e2e_args
import infra.network
import infra.consortium
import random
import suite.test_requirements as reqs
from loguru import logger as LOG
@reqs.description("Add and activate a new member to the consortium")
def test_add_member(network, args, recovery_member=True):
primary, _ = network.find_primary()
member_data = {
"example": "of",
"structured": ["and", {"nested": "arbitrary data"}],
}
new_member = network.consortium.generate_and_add_new_member(
primary,
curve=infra.network.ParticipantsCurve(args.participants_curve).next(),
member_data=member_data,
recovery_member=recovery_member,
)
r = new_member.ack(primary)
with primary.client() as nc:
nc.wait_for_commit(r)
return network
@reqs.description("Retire existing member")
@reqs.sufficient_recovery_member_count()
def test_remove_member(network, args, member_to_remove=None, recovery_member=True):
primary, _ = network.find_primary()
if member_to_remove is None:
member_to_remove = network.consortium.get_any_active_member(recovery_member)
network.consortium.remove_member(primary, member_to_remove)
# Check that remove member cannot be authenticated by the service
try:
member_to_remove.ack(primary)
except infra.member.UnauthenticatedMember:
pass
else:
assert False, "Member should have been removed"
return network
@reqs.description("Issue new recovery shares (without re-key)")
def test_update_recovery_shares(network, args):
primary, _ = network.find_primary()
network.consortium.trigger_recovery_shares_refresh(primary)
return network
@reqs.description("Set recovery threshold")
def test_set_recovery_threshold(network, args, recovery_threshold=None):
if recovery_threshold is None:
# If the recovery threshold is not specified, a new threshold is
# randomly selected based on the number of active recovery members.
# The new recovery threshold is guaranteed to be different from the
# previous one.
list_recovery_threshold = list(
range(1, len(network.consortium.get_active_recovery_members()) + 1)
)
list_recovery_threshold.remove(network.consortium.recovery_threshold)
recovery_threshold = random.choice(list_recovery_threshold)
primary, _ = network.find_primary()
network.consortium.set_recovery_threshold(primary, recovery_threshold)
return network
def assert_recovery_shares_update(are_shared_updated, func, network, args, **kwargs):
primary, _ = network.find_primary()
saved_recovery_shares = {}
for m in network.consortium.get_active_recovery_members():
saved_recovery_shares[m] = m.get_and_decrypt_recovery_share(primary)
if func is test_remove_member:
recovery_member = kwargs.pop("recovery_member")
member_to_remove = network.consortium.get_any_active_member(
recovery_member=recovery_member
)
if recovery_member:
saved_recovery_shares.pop(member_to_remove)
func(network, args, member_to_remove)
elif func is test_set_recovery_threshold and "recovery_threshold" in kwargs:
func(network, args, recovery_threshold=kwargs["recovery_threshold"])
else:
func(network, args, **kwargs)
for m, share_before in saved_recovery_shares.items():
if are_shared_updated:
assert share_before != m.get_and_decrypt_recovery_share(primary)
else:
assert share_before == m.get_and_decrypt_recovery_share(primary)
def service_startups(args):
LOG.info("Starting service with insufficient number of recovery members")
args.initial_member_count = 2
args.initial_recovery_member_count = 0
args.initial_operator_count = 1
with infra.network.network(args.nodes, args.binary_dir, pdb=args.pdb) as network:
try:
network.start_and_join(args)
assert False, "Service cannot be opened with no recovery members"
except AssertionError:
primary, _ = network.find_primary()
network.consortium.check_for_service(
primary, infra.network.ServiceStatus.OPENING
)
LOG.success(
"Service could not be opened with insufficient number of recovery mmebers"
)
LOG.info(
"Starting service with a recovery operator member, a non-recovery operator member and a non-recovery non-operator member"
)
args.initial_member_count = 3
args.initial_recovery_member_count = 1
args.initial_operator_count = 2
with infra.network.network(args.nodes, args.binary_dir, pdb=args.pdb) as network:
network.start_and_join(args)
LOG.info(
"Starting service with a recovery operator member, a recovery non-operator member and a non-recovery non-operator member"
)
args.initial_member_count = 3
args.initial_recovery_member_count = 2
args.initial_operator_count = 1
with infra.network.network(args.nodes, args.binary_dir, pdb=args.pdb) as network:
network.start_and_join(args)
def recovery_shares_scenario(args):
# Members 0 and 1 are recovery members, member 2 isn't
args.initial_member_count = 3
args.initial_recovery_member_count = 2
non_recovery_member_id = "member2"
# Recovery threshold is initially set to number of recovery members (2)
with infra.network.network(
args.nodes, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
network.start_and_join(args)
# Membership changes trigger re-sharing and re-keying and are
# only supported with CFT
if args.consensus != "cft":
LOG.warning("Skipping test recovery threshold as consensus is not CFT")
return
LOG.info("Update recovery shares")
assert_recovery_shares_update(True, test_update_recovery_shares, network, args)
LOG.info("Non-recovery member does not have a recovery share")
primary, _ = network.find_primary()
with primary.client(non_recovery_member_id) as mc:
r = mc.get("/gov/recovery_share")
assert r.status_code == http.HTTPStatus.NOT_FOUND.value
assert (
f"Recovery share not found for member {network.consortium.get_member_by_local_id(non_recovery_member_id).service_id}"
in r.body.json()["error"]["message"]
)
# Removing a recovery number is not possible as the number of recovery
# members would be under recovery threshold (2)
LOG.info("Removing a recovery member should not be possible")
try:
test_remove_member(network, args, recovery_member=True)
assert False, "Removing a recovery member should not be possible"
except infra.proposal.ProposalNotAccepted as e:
assert e.proposal.state == infra.proposal.ProposalState.FAILED
# However, removing a non-recovery member is allowed
LOG.info("Removing a non-recovery member is still possible")
member_to_remove = network.consortium.get_member_by_local_id(
non_recovery_member_id
)
test_remove_member(network, args, member_to_remove=member_to_remove)
LOG.info("Removing an already-removed member succeeds with no effect")
test_remove_member(network, args, member_to_remove=member_to_remove)
LOG.info("Adding one non-recovery member")
assert_recovery_shares_update(
False, test_add_member, network, args, recovery_member=False
)
LOG.info("Adding one recovery member")
assert_recovery_shares_update(
True, test_add_member, network, args, recovery_member=True
)
LOG.info("Removing one non-recovery member")
assert_recovery_shares_update(
False, test_remove_member, network, args, recovery_member=False
)
LOG.info("Removing one recovery member")
assert_recovery_shares_update(
True, test_remove_member, network, args, recovery_member=True
)
LOG.info("Reduce recovery threshold")
assert_recovery_shares_update(
True,
test_set_recovery_threshold,
network,
args,
recovery_threshold=network.consortium.recovery_threshold - 1,
)
# Removing a recovery member now succeeds
LOG.info("Removing one recovery member")
assert_recovery_shares_update(
True, test_remove_member, network, args, recovery_member=True
)
LOG.info("Set recovery threshold to 0 is impossible")
exception = infra.proposal.ProposalNotCreated
try:
test_set_recovery_threshold(network, args, recovery_threshold=0)
assert False, "Setting recovery threshold to 0 should not be possible"
except exception as e:
assert (
e.response.status_code == 400
and e.response.body.json()["error"]["code"]
== "ProposalFailedToValidate"
), e.response.body.text()
LOG.info(
"Set recovery threshold to more that number of active recovery members is impossible"
)
try:
test_set_recovery_threshold(
network,
args,
recovery_threshold=len(network.consortium.get_active_recovery_members())
+ 1,
)
assert (
False
), "Setting recovery threshold to more than number of active recovery members should not be possible"
except infra.proposal.ProposalNotAccepted as e:
assert e.proposal.state == infra.proposal.ProposalState.FAILED
try:
test_set_recovery_threshold(network, args, recovery_threshold=256)
assert False, "Recovery threshold cannot be set to > 255"
except exception as e:
assert (
e.response.status_code == 400
and e.response.body.json()["error"]["code"]
== "ProposalFailedToValidate"
), e.response.body.text()
try:
network.consortium.set_recovery_threshold(primary, recovery_threshold=None)
assert False, "Recovery threshold value must be passed as proposal argument"
except exception as e:
assert (
e.response.status_code == 400
and e.response.body.json()["error"]["code"]
== "ProposalFailedToValidate"
), e.response.body.text()
LOG.info(
"Setting recovery threshold to current threshold does not update shares"
)
assert_recovery_shares_update(
False,
test_set_recovery_threshold,
network,
args,
recovery_threshold=network.consortium.recovery_threshold,
)
def run(args):
service_startups(args)
recovery_shares_scenario(args)
if __name__ == "__main__":
args = infra.e2e_args.cli_args()
args.package = "liblogging"
# Fast test
args.nodes = infra.e2e_args.min_nodes(args, f=0)
args.initial_user_count = 0
run(args)
| 37.801325 | 133 | 0.673441 |
fab2c86281b1ffbd59eed7d7f3a4052a98d8c331 | 3,802 | py | Python | aim/sdk/artifacts/artifact_writer.py | jamesj-jiao/aim | 452380368f76ff441b2ff3a51029f4ad43a9d902 | [
"MIT"
] | null | null | null | aim/sdk/artifacts/artifact_writer.py | jamesj-jiao/aim | 452380368f76ff441b2ff3a51029f4ad43a9d902 | [
"MIT"
] | null | null | null | aim/sdk/artifacts/artifact_writer.py | jamesj-jiao/aim | 452380368f76ff441b2ff3a51029f4ad43a9d902 | [
"MIT"
] | null | null | null | import time
import math
from aim.engine.aim_repo import AimRepo
from aim.engine.utils import random_str
from aim.sdk.artifacts.artifact import Artifact
from aim.sdk.artifacts.record import Record, RecordCollection
from aim.sdk.artifacts.record_writer import RecordWriter
Writable = [Record, RecordCollection]
class ArtifactWriter:
def __init__(self):
...
def save(self, repo: AimRepo, artifact: Artifact) -> bool:
"""
Stores serialized instance into .aim repo
"""
item = artifact.serialize()
res = None
if isinstance(item, Record):
res = ArtifactWriter._save_record(repo, artifact, item)
elif isinstance(item, RecordCollection):
dir_path, dir_rel_path = repo.store_dir(item.name,
item.cat,
item.data)
res = []
for record in item.records:
# Store dir files
res.append(
ArtifactWriter._save_record(
repo, artifact, record, dir_rel_path))
# Save dict
return res
@staticmethod
def _save_record(
repo: AimRepo,
artifact: Artifact,
record: Record,
dir_path: str = None):
if record.binary_type is Artifact.IMAGE:
# Get image name and abs path
img_name_time = math.floor(time.time() * 1000)
img_name_random = random_str(10)
img_name = '{time}__{random}.jpg'.format(
time=img_name_time,
random=img_name_random)
res = repo.store_image(img_name, record.cat)
# Save image at specified path
artifact.save_blobs(res['path'], res['abs_path'])
elif record.binary_type == Artifact.MODEL:
# Get model name, directory and zip archive paths
file_res = repo.store_model_file(record.name,
record.cat)
# Save model at specified path
model_save_res = artifact.save_blobs(file_res)
res = repo.store_model(record.name,
record.data['model_name'],
record.data['epoch'],
record.data['meta'],
model_save_res,
record.cat)
# Archive model directory
repo.archive_dir(res['zip_path'], res['dir_path'])
elif record.binary_type == Artifact.PROTOBUF:
writer_type = RecordWriter.AIMRECORDS_WRITER
write_mode = 'w' if record.is_singular else 'a'
writer = RecordWriter.get_writer(writer_type,
repo.records_storage)
writer.write(artifact.get_inst_unique_name(),
write_mode,
record.content)
res = repo.store_artifact(record.name,
record.cat,
record.data,
writer_type,
record.binary_type)
else:
file_name = '{}.log'.format(record.name)
res = repo.store_file(file_name,
record.name,
record.cat,
record.data,
dir_path)
write_mode = 'w' if record.is_singular else 'a'
writer = RecordWriter.get_writer(RecordWriter.JSON_LOG_WRITER)
writer.write(res['abs_path'], write_mode, record.content)
return res
| 38.40404 | 74 | 0.507365 |
e958db85397521419f961e25987b9e095d8fa277 | 186 | py | Python | touchstone/buffers/ppo_experience.py | LechuzaAI/touchstone | 6893b199f14f34986b475c79b4a41934fcf8e7a5 | [
"MIT"
] | null | null | null | touchstone/buffers/ppo_experience.py | LechuzaAI/touchstone | 6893b199f14f34986b475c79b4a41934fcf8e7a5 | [
"MIT"
] | null | null | null | touchstone/buffers/ppo_experience.py | LechuzaAI/touchstone | 6893b199f14f34986b475c79b4a41934fcf8e7a5 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Any
from touchstone.buffers import Experience
@dataclass
class PPOExperience(Experience):
action_log_probs: Any
value: Any
| 18.6 | 41 | 0.806452 |
1bdec0ba3ef9acee22318e184b1ab46493f4ec31 | 672 | py | Python | lupin/validators/match.py | Clustaar/lupin | 9ef73642d84a99adb80abf5a922a9422ddae9254 | [
"MIT"
] | 22 | 2017-10-18T08:27:20.000Z | 2022-03-25T18:53:43.000Z | lupin/validators/match.py | Clustaar/lupin | 9ef73642d84a99adb80abf5a922a9422ddae9254 | [
"MIT"
] | 5 | 2019-09-16T15:31:55.000Z | 2022-02-10T08:29:14.000Z | lupin/validators/match.py | Clustaar/lupin | 9ef73642d84a99adb80abf5a922a9422ddae9254 | [
"MIT"
] | null | null | null | from . import Validator
from ..errors import InvalidMatch
class Match(Validator):
"""Validate that a string matches a pattern"""
def __init__(self, regex):
"""
Args:
regex (regex): a regexp object
"""
self._regex = regex
def __call__(self, value, path):
"""Validate that value matches the regex
Args:
value (str): string to validate
path (list): error path
"""
try:
if not self._regex.match(value):
raise InvalidMatch(value, self._regex, path)
except TypeError:
raise InvalidMatch(value, self._regex, path)
| 24.888889 | 60 | 0.5625 |
dcb1118b74c74546636550c7f8ec35878a27b041 | 806 | py | Python | programme/views/admin_publish_view.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 13 | 2015-11-29T12:19:12.000Z | 2021-02-21T15:42:11.000Z | programme/views/admin_publish_view.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 23 | 2015-04-29T19:43:34.000Z | 2021-02-10T05:50:17.000Z | programme/views/admin_publish_view.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 11 | 2015-09-20T18:59:00.000Z | 2020-02-07T08:47:34.000Z |
from django.utils.translation import ugettext_lazy as _
from labour.views.admin_startstop_view import generic_publish_unpublish_view
from ..helpers import programme_admin_required
from ..forms import PublishForm
@programme_admin_required
def admin_publish_view(request, vars, event):
meta = event.programme_event_meta
return generic_publish_unpublish_view(
request, vars, event,
meta=event.programme_event_meta,
template='programme_admin_publish_view.pug',
FormClass=PublishForm,
save_success_message=_("The publication time was saved."),
start_now_success_message=_("The schedule was published."),
stop_now_success_message=_("The schedule was un-published."),
already_public_message=_("The schedule was already public."),
)
| 35.043478 | 76 | 0.759305 |
15b484fa7f29901943d70cd0d78160a472e685d4 | 1,933 | py | Python | lib/surface/notebooks/instances/get_health.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/notebooks/instances/get_health.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/notebooks/instances/get_health.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'notebooks instances get-health' command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.notebooks import instances as instance_util
from googlecloudsdk.api_lib.notebooks import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.notebooks import flags
DETAILED_HELP = {
'DESCRIPTION':
"""
Request for checking if a notebook instance is healthy.
""",
'EXAMPLES':
"""
To check if an instance is healthy, run:
$ {command} example-instance --location=us-central1-a
""",
}
@base.ReleaseTracks(base.ReleaseTrack.GA)
class GetHealth(base.DescribeCommand):
"""Request for checking if a notebook instance is healthy."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
flags.AddGetHealthInstanceFlags(parser)
def Run(self, args):
release_track = self.ReleaseTrack()
client = util.GetClient(release_track)
messages = util.GetMessages(release_track)
instance_service = client.projects_locations_instances
result = instance_service.GetInstanceHealth(
instance_util.CreateInstanceGetHealthRequest(args, messages))
return result
GetHealth.detailed_help = DETAILED_HELP
| 32.216667 | 74 | 0.747543 |
01024627e7724f9013716345b323dd2e0f9ad54c | 24,002 | py | Python | autogluon/utils/tabular/ml/learner/abstract_learner.py | TEChopra1000/autogluon | 3f2e3b3e46cc10e01017257b46e8f5992cbc6c00 | [
"Apache-2.0"
] | null | null | null | autogluon/utils/tabular/ml/learner/abstract_learner.py | TEChopra1000/autogluon | 3f2e3b3e46cc10e01017257b46e8f5992cbc6c00 | [
"Apache-2.0"
] | null | null | null | autogluon/utils/tabular/ml/learner/abstract_learner.py | TEChopra1000/autogluon | 3f2e3b3e46cc10e01017257b46e8f5992cbc6c00 | [
"Apache-2.0"
] | null | null | null | import datetime, json, warnings, logging
from collections import OrderedDict
import pandas as pd
from pandas import DataFrame, Series
from sklearn.metrics import accuracy_score, balanced_accuracy_score, matthews_corrcoef, f1_score, classification_report # , roc_curve, auc
from sklearn.metrics import mean_absolute_error, explained_variance_score, r2_score, mean_squared_error, median_absolute_error # , max_error
import numpy as np
from numpy import corrcoef
from ..constants import BINARY, MULTICLASS, REGRESSION
from ...data.label_cleaner import LabelCleaner
from ..utils import get_pred_from_proba
from ...utils.loaders import load_pkl, load_pd
from ...utils.savers import save_pkl, save_pd
from ..trainer.abstract_trainer import AbstractTrainer
from ..tuning.ensemble_selection import EnsembleSelection
logger = logging.getLogger(__name__)
# TODO: - Semi-supervised learning
# Learner encompasses full problem, loading initial data, feature generation, model training, model prediction
class AbstractLearner:
save_file_name = 'learner.pkl'
def __init__(self, path_context: str, label: str, id_columns: list, feature_generator, label_count_threshold=10,
problem_type=None, objective_func=None, is_trainer_present=False):
self.path_context, self.model_context, self.latest_model_checkpoint, self.eval_result_path, self.pred_cache_path, self.save_path = self.create_contexts(path_context)
self.label = label
self.submission_columns = id_columns
self.threshold = label_count_threshold
self.problem_type = problem_type
self.trainer_problem_type = None
self.objective_func = objective_func
self.is_trainer_present = is_trainer_present
self.cleaner = None
self.label_cleaner: LabelCleaner = None
self.feature_generator = feature_generator
self.feature_generators = [self.feature_generator]
self.trainer: AbstractTrainer = None
self.trainer_type = None
self.trainer_path = None
self.reset_paths = False
@property
def class_labels(self):
if self.problem_type == MULTICLASS:
return self.label_cleaner.ordered_class_labels
else:
return None
def set_contexts(self, path_context):
self.path_context, self.model_context, self.latest_model_checkpoint, self.eval_result_path, self.pred_cache_path, self.save_path = self.create_contexts(path_context)
def create_contexts(self, path_context):
model_context = path_context + 'models/'
latest_model_checkpoint = model_context + 'model_checkpoint_latest.pointer'
eval_result_path = model_context + 'eval_result.pkl'
predictions_path = path_context + 'predictions.csv'
save_path = path_context + self.save_file_name
return path_context, model_context, latest_model_checkpoint, eval_result_path, predictions_path, save_path
def fit(self, X: DataFrame, X_test: DataFrame = None, scheduler_options=None, hyperparameter_tune=True,
feature_prune=False, holdout_frac=0.1, hyperparameters={}, verbosity=2):
raise NotImplementedError
# TODO: Add pred_proba_cache functionality as in predict()
def predict_proba(self, X_test: DataFrame, as_pandas=False, inverse_transform=True, sample=None):
##########
# Enable below for local testing # TODO: do we want to keep sample option?
if sample is not None:
X_test = X_test.head(sample)
##########
trainer = self.load_trainer()
X_test = self.transform_features(X_test)
y_pred_proba = trainer.predict_proba(X_test)
if inverse_transform:
y_pred_proba = self.label_cleaner.inverse_transform_proba(y_pred_proba)
if as_pandas:
if self.problem_type == MULTICLASS:
y_pred_proba = pd.DataFrame(data=y_pred_proba, columns=self.class_labels)
else:
y_pred_proba = pd.Series(data=y_pred_proba, name=self.label)
return y_pred_proba
# TODO: Add decorators for cache functionality, return core code to previous state
# use_pred_cache to check for a cached prediction of rows, can dramatically speedup repeated runs
# add_to_pred_cache will update pred_cache with new predictions
def predict(self, X_test: DataFrame, as_pandas=False, sample=None, use_pred_cache=False, add_to_pred_cache=False):
pred_cache = None
if use_pred_cache or add_to_pred_cache:
try:
pred_cache = load_pd.load(path=self.pred_cache_path, dtype=X_test[self.submission_columns].dtypes.to_dict())
except Exception:
pass
if use_pred_cache and (pred_cache is not None):
X_id = X_test[self.submission_columns]
X_in_cache_with_pred = pd.merge(left=X_id.reset_index(), right=pred_cache, on=self.submission_columns).set_index('index') # Will break if 'index' == self.label or 'index' in self.submission_columns
X_test_cache_miss = X_test[~X_test.index.isin(X_in_cache_with_pred.index)]
logger.log(20, 'Using cached predictions for '+str(len(X_in_cache_with_pred))+' out of '+str(len(X_test))+' rows, which have already been predicted previously. To make new predictions, set use_pred_cache=False')
else:
X_in_cache_with_pred = pd.DataFrame(data=None, columns=self.submission_columns + [self.label])
X_test_cache_miss = X_test
if len(X_test_cache_miss) > 0:
y_pred_proba = self.predict_proba(X_test=X_test_cache_miss, inverse_transform=False, sample=sample)
if self.trainer_problem_type is not None:
problem_type = self.trainer_problem_type
else:
problem_type = self.problem_type
y_pred = get_pred_from_proba(y_pred_proba=y_pred_proba, problem_type=problem_type)
y_pred = self.label_cleaner.inverse_transform(pd.Series(y_pred))
y_pred.index = X_test_cache_miss.index
else:
logger.debug('All X_test rows found in cache, no need to load model')
y_pred = X_in_cache_with_pred[self.label].values
if as_pandas:
y_pred = pd.Series(data=y_pred, name=self.label)
return y_pred
if add_to_pred_cache:
X_id_with_y_pred = X_test_cache_miss[self.submission_columns].copy()
X_id_with_y_pred[self.label] = y_pred
if pred_cache is None:
pred_cache = X_id_with_y_pred.drop_duplicates(subset=self.submission_columns).reset_index(drop=True)
else:
pred_cache = pd.concat([X_id_with_y_pred, pred_cache]).drop_duplicates(subset=self.submission_columns).reset_index(drop=True)
save_pd.save(path=self.pred_cache_path, df=pred_cache)
if len(X_in_cache_with_pred) > 0:
y_pred = pd.concat([y_pred, X_in_cache_with_pred[self.label]]).reindex(X_test.index)
y_pred = y_pred.values
if as_pandas:
y_pred = pd.Series(data=y_pred, name=self.label)
return y_pred
def fit_transform_features(self, X, y=None):
for feature_generator in self.feature_generators:
X = feature_generator.fit_transform(X, y)
return X
def transform_features(self, X):
for feature_generator in self.feature_generators:
X = feature_generator.transform(X)
return X
def score(self, X: DataFrame, y=None):
if y is None:
X, y = self.extract_label(X)
X = self.transform_features(X)
y = self.label_cleaner.transform(y)
trainer = self.load_trainer()
if self.problem_type == MULTICLASS:
y = y.fillna(-1)
if trainer.objective_func_expects_y_pred:
return trainer.score(X=X, y=y)
else:
# Log loss
if -1 in y.unique():
raise ValueError('Multiclass scoring with eval_metric=' + self.objective_func.name + ' does not support unknown classes.')
return trainer.score(X=X, y=y)
else:
return trainer.score(X=X, y=y)
# Scores both learner and all individual models, along with computing the optimal ensemble score + weights (oracle)
def score_debug(self, X: DataFrame, y=None):
if y is None:
X, y = self.extract_label(X)
X = self.transform_features(X)
y = self.label_cleaner.transform(y)
trainer = self.load_trainer()
if self.problem_type == MULTICLASS:
y = y.fillna(-1)
if (not trainer.objective_func_expects_y_pred) and (-1 in y.unique()):
# Log loss
raise ValueError('Multiclass scoring with eval_metric=' + self.objective_func.name + ' does not support unknown classes.')
max_level = trainer.max_level
max_level_auxiliary = trainer.max_level_auxiliary
max_level_to_check = max(max_level, max_level_auxiliary)
scores = {}
pred_probas = None
for level in range(max_level_to_check+1):
model_names_core = trainer.models_level[level]
if level >= 1:
X_stack = trainer.get_inputs_to_stacker(X, level_start=0, level_end=level, y_pred_probas=pred_probas)
else:
X_stack = X
if len(model_names_core) > 0:
pred_probas = self.get_pred_probas_models(X=X_stack, trainer=trainer, model_names=model_names_core)
for i, model_name in enumerate(model_names_core):
pred_proba = pred_probas[i]
if (trainer.problem_type == BINARY) and (self.problem_type == MULTICLASS):
pred_proba = self.label_cleaner.inverse_transform_proba(pred_proba)
if trainer.objective_func_expects_y_pred:
pred = get_pred_from_proba(y_pred_proba=pred_proba, problem_type=self.problem_type)
scores[model_name] = self.objective_func(y, pred)
else:
scores[model_name] = self.objective_func(y, pred_proba)
ensemble_selection = EnsembleSelection(ensemble_size=100, problem_type=trainer.problem_type, metric=self.objective_func)
ensemble_selection.fit(predictions=pred_probas, labels=y, identifiers=None)
oracle_weights = ensemble_selection.weights_
oracle_pred_proba_norm = [pred * weight for pred, weight in zip(pred_probas, oracle_weights)]
oracle_pred_proba_ensemble = np.sum(oracle_pred_proba_norm, axis=0)
if (trainer.problem_type == BINARY) and (self.problem_type == MULTICLASS):
oracle_pred_proba_ensemble = self.label_cleaner.inverse_transform_proba(oracle_pred_proba_ensemble)
if trainer.objective_func_expects_y_pred:
oracle_pred_ensemble = get_pred_from_proba(y_pred_proba=oracle_pred_proba_ensemble, problem_type=self.problem_type)
scores['oracle_ensemble_l' + str(level+1)] = self.objective_func(y, oracle_pred_ensemble)
else:
scores['oracle_ensemble_l' + str(level+1)] = self.objective_func(y, oracle_pred_proba_ensemble)
model_names_aux = trainer.models_level_auxiliary[level]
if len(model_names_aux) > 0:
pred_probas_auxiliary = self.get_pred_probas_models(X=X_stack, trainer=trainer, model_names=model_names_aux)
for i, model_name in enumerate(model_names_aux):
pred_proba = pred_probas_auxiliary[i]
if (trainer.problem_type == BINARY) and (self.problem_type == MULTICLASS):
pred_proba = self.label_cleaner.inverse_transform_proba(pred_proba)
if trainer.objective_func_expects_y_pred:
pred = get_pred_from_proba(y_pred_proba=pred_proba, problem_type=self.problem_type)
scores[model_name] = self.objective_func(y, pred)
else:
scores[model_name] = self.objective_func(y, pred_proba)
logger.debug('MODEL SCORES:')
logger.debug(str(scores))
return scores
def get_pred_probas_models(self, X, trainer, model_names):
if (self.problem_type == MULTICLASS) and (not trainer.objective_func_expects_y_pred):
# Handles case where we need to add empty columns to represent classes that were not used for training
pred_probas = trainer.pred_proba_predictions(models=model_names, X_test=X)
pred_probas = [self.label_cleaner.inverse_transform_proba(pred_proba) for pred_proba in pred_probas]
else:
pred_probas = trainer.pred_proba_predictions(models=model_names, X_test=X)
return pred_probas
def evaluate(self, y_true, y_pred, silent=False, auxiliary_metrics=False, detailed_report=True, high_always_good=False):
""" Evaluate predictions.
Args:
silent (bool): Should we print which metric is being used as well as performance.
auxiliary_metrics (bool): Should we compute other (problem_type specific) metrics in addition to the default metric?
detailed_report (bool): Should we computed more-detailed versions of the auxiliary_metrics? (requires auxiliary_metrics=True).
high_always_good (bool): If True, this means higher values of returned metric are ALWAYS superior (so metrics like MSE should be returned negated)
Returns single performance-value if auxiliary_metrics=False.
Otherwise returns dict where keys = metrics, values = performance along each metric.
"""
# Remove missing labels and produce warning if any are found:
if self.problem_type == REGRESSION:
missing_indicators = [(y is None or np.isnan(y)) for y in y_true]
else:
missing_indicators = [(y is None or y=='') for y in y_true]
missing_inds = [i for i,j in enumerate(missing_indicators) if j]
if len(missing_inds) > 0:
nonmissing_inds = [i for i,j in enumerate(missing_indicators) if j]
y_true = y_true[nonmissing_inds]
y_pred = y_pred[nonmissing_inds]
warnings.warn("There are %s (out of %s) evaluation datapoints for which the label is missing. "
"AutoGluon removed these points from the evaluation, which thus may not be entirely representative. "
"You should carefully study why there are missing labels in your evaluation data." % (len(missing_inds),len(y_true)))
perf = self.objective_func(y_true, y_pred)
metric = self.objective_func.name
if not high_always_good:
sign = self.objective_func._sign
perf = perf * sign # flip negative once again back to positive (so higher is no longer necessarily better)
if not silent:
logger.log(20, "Evaluation: %s on test data: %f" % (metric, perf))
if not auxiliary_metrics:
return perf
# Otherwise compute auxiliary metrics:
perf_dict = OrderedDict({metric: perf})
if self.problem_type == REGRESSION: # Additional metrics: R^2, Mean-Absolute-Error, Pearson correlation
pearson_corr = lambda x,y: corrcoef(x,y)[0][1]
pearson_corr.__name__ = 'pearson_correlation'
regression_metrics = [mean_absolute_error, explained_variance_score, r2_score, pearson_corr, mean_squared_error, median_absolute_error,
# max_error
]
for reg_metric in regression_metrics:
metric_name = reg_metric.__name__
if metric_name not in perf_dict:
perf_dict[metric_name] = reg_metric(y_true, y_pred)
else: # Compute classification metrics
classif_metrics = [accuracy_score, balanced_accuracy_score, matthews_corrcoef]
if self.problem_type == BINARY: # binary-specific metrics
# def auc_score(y_true, y_pred): # TODO: this requires y_pred to be probability-scores
# fpr, tpr, _ = roc_curve(y_true, y_pred, pos_label)
# return auc(fpr, tpr)
f1micro_score = lambda y_true, y_pred: f1_score(y_true, y_pred, average='micro')
f1micro_score.__name__ = f1_score.__name__
classif_metrics += [f1micro_score] # TODO: add auc?
elif self.problem_type == MULTICLASS: # multiclass metrics
classif_metrics += [] # TODO: No multi-class specific metrics for now. Include, top-1, top-5, top-10 accuracy here.
for cl_metric in classif_metrics:
metric_name = cl_metric.__name__
if metric_name not in perf_dict:
perf_dict[metric_name] = cl_metric(y_true, y_pred)
if not silent:
logger.log(20, "Evaluations on test data:")
logger.log(20, json.dumps(perf_dict, indent=4))
if detailed_report and (self.problem_type != REGRESSION):
# One final set of metrics to report
cl_metric = lambda y_true,y_pred: classification_report(y_true,y_pred, output_dict=True)
metric_name = cl_metric.__name__
if metric_name not in perf_dict:
perf_dict[metric_name] = cl_metric(y_true, y_pred)
if not silent:
logger.log(20, "Detailed (per-class) classification report:")
logger.log(20, json.dumps(perf_dict[metric_name], indent=4))
return perf_dict
def extract_label(self, X):
y = X[self.label].copy()
X = X.drop(self.label, axis=1)
return X, y
def submit_from_preds(self, X_test: DataFrame, y_pred_proba, save=True, save_proba=False):
submission = X_test[self.submission_columns].copy()
y_pred = get_pred_from_proba(y_pred_proba=y_pred_proba, problem_type=self.problem_type)
submission[self.label] = y_pred
submission[self.label] = self.label_cleaner.inverse_transform(submission[self.label])
if save:
utcnow = datetime.datetime.utcnow()
timestamp_str_now = utcnow.strftime("%Y%m%d_%H%M%S")
path_submission = self.model_context + 'submissions/submission_' + timestamp_str_now + '.csv'
path_submission_proba = self.model_context + 'submissions/submission_proba_' + timestamp_str_now + '.csv'
save_pd.save(path=path_submission, df=submission)
if save_proba:
submission_proba = pd.DataFrame(y_pred_proba) # TODO: Fix for multiclass
save_pd.save(path=path_submission_proba, df=submission_proba)
return submission
def predict_and_submit(self, X_test: DataFrame, save=True, save_proba=False):
y_pred_proba = self.predict_proba(X_test=X_test, inverse_transform=False)
return self.submit_from_preds(X_test=X_test, y_pred_proba=y_pred_proba, save=save, save_proba=save_proba)
def leaderboard(self):
trainer = self.load_trainer()
return trainer.leaderboard()
def info(self):
trainer = self.load_trainer()
return trainer.info()
@staticmethod
def get_problem_type(y: Series):
""" Identifies which type of prediction problem we are interested in (if user has not specified).
Ie. binary classification, multi-class classification, or regression.
"""
if len(y) == 0:
raise ValueError("provided labels cannot have length = 0")
y = y.dropna() # Remove missing values from y (there should not be any though as they were removed in Learner.general_data_processing())
unique_vals = y.unique()
num_rows = len(y)
# print(unique_vals)
logger.log(20, 'Here are the first 10 unique label values in your data: '+str(unique_vals[:10]))
unique_count = len(unique_vals)
MULTICLASS_LIMIT = 1000 # if numeric and class count would be above this amount, assume it is regression
if num_rows > 1000:
REGRESS_THRESHOLD = 0.05 # if the unique-ratio is less than this, we assume multiclass classification, even when labels are integers
else:
REGRESS_THRESHOLD = 0.1
if len(unique_vals) == 2:
problem_type = BINARY
reason = "only two unique label-values observed"
elif unique_vals.dtype == 'float':
unique_ratio = len(unique_vals) / float(len(y))
if (unique_ratio <= REGRESS_THRESHOLD) and (unique_count <= MULTICLASS_LIMIT):
try:
can_convert_to_int = np.array_equal(y, y.astype(int))
if can_convert_to_int:
problem_type = MULTICLASS
reason = "dtype of label-column == float, but few unique label-values observed and label-values can be converted to int"
else:
problem_type = REGRESSION
reason = "dtype of label-column == float and label-values can't be converted to int"
except:
problem_type = REGRESSION
reason = "dtype of label-column == float and label-values can't be converted to int"
else:
problem_type = REGRESSION
reason = "dtype of label-column == float and many unique label-values observed"
elif unique_vals.dtype == 'object':
problem_type = MULTICLASS
reason = "dtype of label-column == object"
elif unique_vals.dtype == 'int':
unique_ratio = len(unique_vals)/float(len(y))
if (unique_ratio <= REGRESS_THRESHOLD) and (unique_count <= MULTICLASS_LIMIT):
problem_type = MULTICLASS # TODO: Check if integers are from 0 to n-1 for n unique values, if they have a wide spread, it could still be regression
reason = "dtype of label-column == int, but few unique label-values observed"
else:
problem_type = REGRESSION
reason = "dtype of label-column == int and many unique label-values observed"
else:
raise NotImplementedError('label dtype', unique_vals.dtype, 'not supported!')
logger.log(25, "AutoGluon infers your prediction problem is: %s (because %s)" % (problem_type, reason))
logger.log(25, "If this is wrong, please specify `problem_type` argument in fit() instead (You may specify problem_type as one of: ['%s', '%s', '%s'])\n" % (BINARY, MULTICLASS, REGRESSION))
return problem_type
def save(self):
save_pkl.save(path=self.save_path, object=self)
# reset_paths=True if the learner files have changed location since fitting.
@classmethod
def load(cls, path_context, reset_paths=False):
load_path = path_context + cls.save_file_name
obj = load_pkl.load(path=load_path)
if reset_paths:
obj.set_contexts(path_context)
obj.trainer_path = obj.model_context
obj.reset_paths = reset_paths
# TODO: Still have to change paths of models in trainer + trainer object path variables
return obj
else:
obj.set_contexts(obj.path_context)
return obj
def save_trainer(self, trainer):
if self.is_trainer_present:
self.trainer = trainer
self.save()
else:
self.trainer_path = trainer.path
trainer.save()
def load_trainer(self) -> AbstractTrainer:
if self.is_trainer_present:
return self.trainer
else:
return self.trainer_type.load(path=self.trainer_path, reset_paths=self.reset_paths)
| 54.303167 | 223 | 0.654612 |
b750201f5401dc7a92ab202c309483b181d4f4b9 | 427 | py | Python | todolist/wsgi.py | carlos-moreno/to-do-list | 5cdf9b38cb3490b2c905d3599b8fe9bc6287df07 | [
"MIT"
] | null | null | null | todolist/wsgi.py | carlos-moreno/to-do-list | 5cdf9b38cb3490b2c905d3599b8fe9bc6287df07 | [
"MIT"
] | 5 | 2020-04-15T19:28:20.000Z | 2021-09-22T18:52:35.000Z | todolist/wsgi.py | carlos-moreno/to-do-list | 5cdf9b38cb3490b2c905d3599b8fe9bc6287df07 | [
"MIT"
] | null | null | null | """
WSGI config for todolist project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from dj_static import Cling
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "todolist.settings")
application = Cling(get_wsgi_application())
| 25.117647 | 78 | 0.791569 |
366efd008e16705f0eb797655b79e3864d6f50bf | 1,530 | py | Python | wagtailhoneypot/forms.py | suchermon/wagtailhoneypot | 56ab339b0e6190ac3d36f671c47deae024814ea0 | [
"MIT"
] | 1 | 2021-12-26T14:44:34.000Z | 2021-12-26T14:44:34.000Z | wagtailhoneypot/forms.py | suchermon/wagtailhoneypot | 56ab339b0e6190ac3d36f671c47deae024814ea0 | [
"MIT"
] | 1 | 2021-03-11T19:14:40.000Z | 2021-03-11T19:48:41.000Z | wagtailhoneypot/forms.py | suchermon/wagtailhoneypot | 56ab339b0e6190ac3d36f671c47deae024814ea0 | [
"MIT"
] | null | null | null | from django import forms
from django.conf import settings
from wagtail.contrib.forms.forms import FormBuilder
from .widgets import HoneyPotFieldWidget
from captcha.fields import ReCaptchaField
from captcha.widgets import ReCaptchaV3, ReCaptchaV2Checkbox
CAPTCHA_VERSION = settings.WAGTAIL_HONEYPOT_CAPTCHA_VERSION
class HoneyPotFormField(forms.CharField):
# Native Django Honeypot field
def validate(self, value):
# Make it validated no matter what
return value
class WagtailCaptchaFormBuilder(FormBuilder):
CAPTCHA_FIELD_NAME = 'wagtailcaptcha'
''' Extend from https://github.com/springload/wagtail-django-recaptcha/blob/master/wagtailcaptcha/forms.py to support Recaptch V3 - Project is dead'''
@property
def formfields(self):
# Add wagtailcaptcha to formfields property
fields = super(WagtailCaptchaFormBuilder, self).formfields
if CAPTCHA_VERSION == 3:
fields[self.CAPTCHA_FIELD_NAME] = ReCaptchaField(label='', widget=ReCaptchaV3)
else:
fields[self.CAPTCHA_FIELD_NAME] = ReCaptchaField(label='', widget=ReCaptchaV2Checkbox)
return fields
class WagtailHoneyPotFormBuilder(WagtailCaptchaFormBuilder):
def create_honeypot_field(self, field, options):
return HoneyPotFormField(widget=HoneyPotFieldWidget, **options)
def remove_captcha_field(form):
form.fields.pop(WagtailCaptchaFormBuilder.CAPTCHA_FIELD_NAME, None)
form.cleaned_data.pop(WagtailCaptchaFormBuilder.CAPTCHA_FIELD_NAME, None)
| 33.26087 | 154 | 0.766667 |
d7f8ab9cdcae6545037965781fc7b4ab9a7b54b1 | 855 | py | Python | 1001-1100/1094-Second Minimum Node In a Binary Tree/1094-Second Minimum Node In a Binary Tree.py | jiadaizhao/LintCode | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 77 | 2017-12-30T13:33:37.000Z | 2022-01-16T23:47:08.000Z | 1001-1100/1094-Second Minimum Node In a Binary Tree/1094-Second Minimum Node In a Binary Tree.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 1 | 2018-05-14T14:15:40.000Z | 2018-05-14T14:15:40.000Z | 1001-1100/1094-Second Minimum Node In a Binary Tree/1094-Second Minimum Node In a Binary Tree.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 39 | 2017-12-07T14:36:25.000Z | 2022-03-10T23:05:37.000Z | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: the root
@return: the second minimum value in the set made of all the nodes' value in the whole tree
"""
def findSecondMinimumValue(self, root):
# Write your code here
def helper(root, smallest):
if root is None:
return -1
if root.val != smallest:
return root.val
left = helper(root.left, smallest)
right = helper(root.right, smallest)
if left == -1:
return right
elif right == -1:
return left
else:
return min(left, right)
return helper(root, root.val)
| 28.5 | 95 | 0.518129 |
f714df9ac1fd6d0bf4721aeb747d23287a74cfba | 15,684 | py | Python | tests/python/unit/dku_timeseries/resampling/test_resampler_helpers.py | dataiku/dss-plugin-timeseries-preparation | bdb662c909a0ad6d7845325a70e3dac2bdcc6b28 | [
"Apache-2.0"
] | 2 | 2021-03-12T10:48:20.000Z | 2021-04-23T09:37:18.000Z | tests/python/unit/dku_timeseries/resampling/test_resampler_helpers.py | dataiku/dss-plugin-timeseries-preparation | bdb662c909a0ad6d7845325a70e3dac2bdcc6b28 | [
"Apache-2.0"
] | 27 | 2020-07-22T15:49:25.000Z | 2021-06-18T09:40:48.000Z | tests/python/unit/dku_timeseries/resampling/test_resampler_helpers.py | dataiku/dss-plugin-timeseries-preparation | bdb662c909a0ad6d7845325a70e3dac2bdcc6b28 | [
"Apache-2.0"
] | 1 | 2021-06-01T12:49:53.000Z | 2021-06-01T12:49:53.000Z | import numpy as np
import pandas as pd
import pytest
from dku_timeseries.timeseries_helpers import generate_date_range, get_date_offset
from recipe_config_loading import get_resampling_params
@pytest.fixture
def config():
config = {u'clip_end': 0, u'constant_value': 0, u'extrapolation_method': u'none', u'shift': 0, u'time_unit_end_of_week': u'SUN',
u'datetime_column': u'Date', u'advanced_activated': False, u'time_unit': u'quarters', u'clip_start': 0, u'time_step': 2,
u'interpolation_method': u'linear'}
return config
class TestResamplerHelpers:
def test_date_offset(self):
time_unit = "business_days"
offset_value = 0
sunday = pd.Timestamp('2021-01-31 10:00:00')
offset = get_date_offset(time_unit, offset_value)
assert sunday + offset == sunday
sunday = pd.Timestamp('2021-01-31 00:00:00')
offset = get_date_offset(time_unit, 1)
assert sunday + offset == pd.Timestamp('2021-02-01 00:00:00')
assert sunday - offset == pd.Timestamp('2021-01-29 00:00:00')
assert sunday + offset + offset == pd.Timestamp('2021-02-02 00:00:00')
friday = pd.Timestamp('2021-01-29 00:00:00')
offset = get_date_offset(time_unit, 1)
assert friday + offset == pd.Timestamp('2021-02-01 00:00:00')
friday = pd.Timestamp('2021-01-29 00:00:00')
offset = get_date_offset(time_unit, 2)
assert friday + offset == pd.Timestamp('2021-02-02 00:00:00')
saturday = pd.Timestamp('2021-01-30 00:00:00')
offset = get_date_offset(time_unit, 1)
assert saturday + offset == pd.Timestamp('2021-02-01 00:00:00')
saturday = pd.Timestamp('2021-02-04 00:00:00')
offset = get_date_offset(time_unit, 1)
assert saturday + offset == pd.Timestamp('2021-02-05 00:00:00')
def test_generate_date_range_month(self, config):
config["time_unit"] = "months"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
end_time = pd.Timestamp('2021-06-20 00:00:00')
start_time = pd.Timestamp('2021-01-31 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-23 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-31 10:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-31 10:00:00').tz_localize("CET")
end_time = pd.Timestamp('2021-06-20 00:00:00').tz_localize("CET")
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(
['2021-01-31 00:00:00+01:00', '2021-03-31 00:00:00+02:00', '2021-05-31 00:00:00+02:00', '2021-07-31 00:00:00+02:00']))
start_time = pd.Timestamp('2021-01-31 10:00:00')
end_time = pd.Timestamp('2021-06-20 00:00:00')
date_range = generate_date_range(start_time, end_time, 1, 0, 1, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-03-31', '2021-05-31', '2021-07-31']))
def test_generate_date_range_week(self, config):
config["time_unit"] = "weeks"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
start_time = pd.Timestamp('2020-12-23 00:00:00')
end_time = pd.Timestamp('2021-01-18 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-27', '2021-01-10', '2021-01-24']))
end_time = pd.Timestamp('2021-01-24 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-27', '2021-01-10', '2021-01-24', '2021-02-07']))
date_range = generate_date_range(start_time, end_time, 1, 0, 1, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-10', '2021-01-24', '2021-02-07']))
config["time_unit"] = "weeks"
config["time_unit_end_of_week"] = "WED"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-23', '2021-01-6', '2021-01-20', '2021-02-03']))
def test_generate_date_range_quarters(self, config):
config["time_step"] = 1
config["time_unit"] = "quarters"
start_time = pd.Timestamp('2020-01-23 00:00:00')
end_time = pd.Timestamp('2021-01-18 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-01-31', '2020-04-30', '2020-07-31', '2020-10-31', '2021-01-31']))
def test_generate_date_range_half_year(self, config):
config["time_step"] = 1
config["time_unit"] = "semi_annual"
start_time = pd.Timestamp('2020-01-01 00:00:00')
end_time = pd.Timestamp('2021-06-18 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-01-31', '2020-07-31', '2021-01-31', '2021-07-31']))
def test_generate_date_range_b_days(self, config):
config["time_unit"] = "business_days"
config["time_step"] = 1
start_time = pd.Timestamp('2021-01-02 00:00:00')
end_time = pd.Timestamp('2021-01-10 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-04', '2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08', '2021-01-11']))
clip_start = 1
clip_end = 1
shift = 0
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-04', '2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08', '2021-01-11']))
clip_start = 2
clip_end = 2
shift = 0
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08']))
def test_generate_date_range_days(self, config):
config["time_unit"] = "days"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190214 01:59:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-02-07 00:00:00+01:00', '2019-02-08 00:00:00+01:00',
'2019-02-09 00:00:00+01:00', '2019-02-10 00:00:00+01:00',
'2019-02-11 00:00:00+01:00', '2019-02-12 00:00:00+01:00',
'2019-02-13 00:00:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_hours(self, config):
config["time_unit"] = "hours"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 11:59:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 09:00:00+01:00', '2019-01-31 10:00:00+01:00',
'2019-01-31 11:00:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_minutes(self, config):
config["time_unit"] = "minutes"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 02:15:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 02:06:00+01:00', '2019-01-31 02:07:00+01:00',
'2019-01-31 02:08:00+01:00', '2019-01-31 02:09:00+01:00',
'2019-01-31 02:10:00+01:00', '2019-01-31 02:11:00+01:00',
'2019-01-31 02:12:00+01:00', '2019-01-31 02:13:00+01:00',
'2019-01-31 02:14:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_seconds(self, config):
config["time_unit"] = "seconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 01:59:12').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:07+01:00', '2019-01-31 01:59:08+01:00',
'2019-01-31 01:59:09+01:00', '2019-01-31 01:59:10+01:00',
'2019-01-31 01:59:11+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_milliseconds(self, config):
config["time_unit"] = "milliseconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('2019-01-31 01:59:00.015000').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:00.007000+01:00',
'2019-01-31 01:59:00.008000+01:00',
'2019-01-31 01:59:00.009000+01:00',
'2019-01-31 01:59:00.010000+01:00',
'2019-01-31 01:59:00.011000+01:00',
'2019-01-31 01:59:00.012000+01:00',
'2019-01-31 01:59:00.013000+01:00',
'2019-01-31 01:59:00.014000+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_microseconds(self, config):
config["time_unit"] = "microseconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('2019-01-31 01:59:00.000016').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:00.000007+01:00',
'2019-01-31 01:59:00.000008+01:00',
'2019-01-31 01:59:00.000009+01:00',
'2019-01-31 01:59:00.000010+01:00',
'2019-01-31 01:59:00.000011+01:00',
'2019-01-31 01:59:00.000012+01:00',
'2019-01-31 01:59:00.000013+01:00',
'2019-01-31 01:59:00.000014+01:00',
'2019-01-31 01:59:00.000015+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_nanoseconds(self, config):
config["time_unit"] = "nanoseconds"
config["time_step"] = 1
start_time = pd.Timestamp('2019-01-31T00:59:00.000000000')
end_time = pd.Timestamp('2019-01-31T00:59:00.000000009')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2019-01-31 00:59:00.000000007',
'2019-01-31 00:59:00.000000008']))
| 49.166144 | 153 | 0.615851 |
4022402da5e16d22ae3c7e740940234e009b4fd9 | 4,161 | py | Python | src/libs/easy_select2/widgets.py | ivanjo39191/ivankao-erp | 04ed4e5128b419c995dfcb57f2eead2af498b156 | [
"Apache-2.0"
] | null | null | null | src/libs/easy_select2/widgets.py | ivanjo39191/ivankao-erp | 04ed4e5128b419c995dfcb57f2eead2af498b156 | [
"Apache-2.0"
] | null | null | null | src/libs/easy_select2/widgets.py | ivanjo39191/ivankao-erp | 04ed4e5128b419c995dfcb57f2eead2af498b156 | [
"Apache-2.0"
] | null | null | null | import json
import django
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
class Select2Mixin(object):
"""
This mixin provides a mechanism to construct custom widget
class, that will be rendered using Select2 input.
Generally should be mixed with widgets that render select input.
"""
html = """<div class="field-easy-select2"
style="display:none"
id="{id}"
{options}></div>"""
def __init__(self, select2attrs=None, *args, **kwargs):
"""
Initialize default select2 attributes.
If width is not provided, sets Select2 width to 250px.
Args:
select2attrs: a dictionary, which then passed to
Select2 constructor function as options.
"""
self.select2attrs = select2attrs or {}
assert_msg = "select2attrs attribute must be dict, not {}"
assert isinstance(self.select2attrs, dict), assert_msg.format(
self.select2attrs.__class__.__name__
)
if 'width' not in self.select2attrs:
self.select2attrs.update({'width': '250px'})
self.static_settings()
super(Select2Mixin, self).__init__(*args, **kwargs)
def static_settings(self):
SELECT2_JS = getattr(
settings,
'SELECT2_JS',
'easy_select2/vendor/select2/js/select2.min.js',
)
SELECT2_CSS = getattr(
settings,
'SELECT2_CSS',
'easy_select2/vendor/select2/css/select2.min.css',
)
SELECT2_USE_BUNDLED_JQUERY = getattr(
settings, 'SELECT2_USE_BUNDLED_JQUERY', True)
self.SELECT2_WIDGET_JS = [
'easy_select2/js/init.js',
'easy_select2/js/easy_select2.js',
SELECT2_JS,
]
if SELECT2_USE_BUNDLED_JQUERY:
jquery_min_file = 'easy_select2/vendor/jquery/jquery.min.js'
self.SELECT2_WIDGET_JS.insert(0, jquery_min_file)
else:
self.SELECT2_WIDGET_JS.insert(0, 'admin/js/jquery.init.js')
self.SELECT2_WIDGET_CSS = {
'screen': [
SELECT2_CSS,
'easy_select2/css/easy_select2.css',
],
}
# This function is taken from django-select2
def get_options(self):
"""Return dictionary of options to be used by Select2."""
return self.select2attrs
# This function is taken from django-select2
def render_select2_options_code(self, options, id_):
"""Render options for select2."""
output = []
for key, value in options.items():
if isinstance(value, (dict, list)):
value = json.dumps(value)
output.append("data-{name}='{value}'".format(
name=key,
value=mark_safe(value)))
return mark_safe(' '.join(output))
def render_js_code(self, id_, *args, **kwargs):
"""Render html container for Select2 widget with options."""
if id_:
options = self.render_select2_options_code(
dict(self.get_options()), id_)
return mark_safe(self.html.format(id=id_, options=options))
return u''
def render(self, name, value, attrs=None, **kwargs):
"""
Extend base class's `render` method by appending
javascript inline text to html output.
"""
output = super(Select2Mixin, self).render(
name, value, attrs=attrs, **kwargs)
id_ = attrs['id']
output += self.render_js_code(
id_, name, value, attrs=attrs, **kwargs)
return mark_safe(output)
@property
def media(self):
return forms.Media(
css=self.SELECT2_WIDGET_CSS,
js=self.SELECT2_WIDGET_JS
)
class Select2(Select2Mixin, forms.Select):
"""Implement single-valued select widget with Select2."""
pass
class Select2Multiple(Select2Mixin, forms.SelectMultiple):
"""Implement multiple select widget with Select2."""
pass
| 32.76378 | 72 | 0.592165 |
bec667b2f30ff7f5b21227902aa68a653f07d466 | 17,378 | py | Python | python/fate_client/pipeline/param/logistic_regression_param.py | QuantumA/FATE | 89a3dd593252128c1bf86fb1014b25a629bdb31a | [
"Apache-2.0"
] | 1 | 2022-02-07T06:23:15.000Z | 2022-02-07T06:23:15.000Z | python/fate_client/pipeline/param/logistic_regression_param.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | 11 | 2020-10-09T09:53:50.000Z | 2021-12-06T16:14:51.000Z | python/fate_client/pipeline/param/logistic_regression_param.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from pipeline.param.base_param import BaseParam
from pipeline.param.cross_validation_param import CrossValidationParam
from pipeline.param.encrypt_param import EncryptParam
from pipeline.param.encrypted_mode_calculation_param import EncryptedModeCalculatorParam
from pipeline.param.init_model_param import InitParam
from pipeline.param.predict_param import PredictParam
from pipeline.param.stepwise_param import StepwiseParam
from pipeline.param.sqn_param import StochasticQuasiNewtonParam
from pipeline.param.callback_param import CallbackParam
from pipeline.param import consts
class LogisticParam(BaseParam):
"""
Parameters used for Logistic Regression both for Homo mode or Hetero mode.
Parameters
----------
penalty : {'L2', 'L1' or None}
Penalty method used in LR. Please note that, when using encrypted version in HomoLR,
'L1' is not supported.
tol : float, default: 1e-4
The tolerance of convergence
alpha : float, default: 1.0
Regularization strength coefficient.
optimizer : {'rmsprop', 'sgd', 'adam', 'nesterov_momentum_sgd', 'sqn', 'adagrad'}, default: 'rmsprop'
Optimize method, if 'sqn' has been set, sqn_param will take effect. Currently, 'sqn' support hetero mode only.
batch_size : int, default: -1
Batch size when updating model. -1 means use all data in a batch. i.e. Not to use mini-batch strategy.
learning_rate : float, default: 0.01
Learning rate
max_iter : int, default: 100
The maximum iteration for training.
early_stop : {'diff', 'weight_diff', 'abs'}, default: 'diff'
Method used to judge converge or not.
a) diff: Use difference of loss between two iterations to judge whether converge.
b) weight_diff: Use difference between weights of two consecutive iterations
c) abs: Use the absolute value of loss to judge whether converge. i.e. if loss < eps, it is converged.
Please note that for hetero-lr multi-host situation, this parameter support "weight_diff" only.
decay: int or float, default: 1
Decay rate for learning rate. learning rate will follow the following decay schedule.
lr = lr0/(1+decay*t) if decay_sqrt is False. If decay_sqrt is True, lr = lr0 / sqrt(1+decay*t)
where t is the iter number.
decay_sqrt: bool, default: True
lr = lr0/(1+decay*t) if decay_sqrt is False, otherwise, lr = lr0 / sqrt(1+decay*t)
encrypt_param: EncryptParam object, default: default EncryptParam object
encrypt param
predict_param: PredictParam object, default: default PredictParam object
predict param
callback_param: CallbackParam object
callback param
cv_param: CrossValidationParam object, default: default CrossValidationParam object
cv param
multi_class: {'ovr'}, default: 'ovr'
If it is a multi_class task, indicate what strategy to use. Currently, support 'ovr' short for one_vs_rest only.
validation_freqs: int or list or tuple or set, or None, default None
validation frequency during training.
early_stopping_rounds: int, default: None
Will stop training if one metric doesn’t improve in last early_stopping_round rounds
metrics: list or None, default: None
Indicate when executing evaluation during train process, which metrics will be used. If set as empty,
default metrics for specific task type will be used. As for binary classification, default metrics are
['auc', 'ks']
use_first_metric_only: bool, default: False
Indicate whether use the first metric only for early stopping judgement.
floating_point_precision: None or integer
if not None, use floating_point_precision-bit to speed up calculation,
e.g.: convert an x to round(x * 2**floating_point_precision) during Paillier operation, divide
the result by 2**floating_point_precision in the end.
"""
def __init__(self, penalty='L2',
tol=1e-4, alpha=1.0, optimizer='rmsprop',
batch_size=-1, learning_rate=0.01, init_param=InitParam(),
max_iter=100, early_stop='diff', encrypt_param=EncryptParam(),
predict_param=PredictParam(), cv_param=CrossValidationParam(),
decay=1, decay_sqrt=True,
multi_class='ovr', validation_freqs=None, early_stopping_rounds=None,
stepwise_param=StepwiseParam(), floating_point_precision=23,
metrics=None,
use_first_metric_only=False,
callback_param=CallbackParam()
):
super(LogisticParam, self).__init__()
self.penalty = penalty
self.tol = tol
self.alpha = alpha
self.optimizer = optimizer
self.batch_size = batch_size
self.learning_rate = learning_rate
self.init_param = copy.deepcopy(init_param)
self.max_iter = max_iter
self.early_stop = early_stop
self.encrypt_param = encrypt_param
self.predict_param = copy.deepcopy(predict_param)
self.cv_param = copy.deepcopy(cv_param)
self.decay = decay
self.decay_sqrt = decay_sqrt
self.multi_class = multi_class
self.validation_freqs = validation_freqs
self.stepwise_param = copy.deepcopy(stepwise_param)
self.early_stopping_rounds = early_stopping_rounds
self.metrics = metrics or []
self.use_first_metric_only = use_first_metric_only
self.floating_point_precision = floating_point_precision
self.callback_param = copy.deepcopy(callback_param)
def check(self):
descr = "logistic_param's"
if self.penalty is None:
pass
elif type(self.penalty).__name__ != "str":
raise ValueError(
"logistic_param's penalty {} not supported, should be str type".format(self.penalty))
else:
self.penalty = self.penalty.upper()
if self.penalty not in [consts.L1_PENALTY, consts.L2_PENALTY, 'NONE']:
raise ValueError(
"logistic_param's penalty not supported, penalty should be 'L1', 'L2' or 'none'")
if not isinstance(self.tol, (int, float)):
raise ValueError(
"logistic_param's tol {} not supported, should be float type".format(self.tol))
if type(self.alpha).__name__ not in ["float", 'int']:
raise ValueError(
"logistic_param's alpha {} not supported, should be float or int type".format(self.alpha))
if type(self.optimizer).__name__ != "str":
raise ValueError(
"logistic_param's optimizer {} not supported, should be str type".format(self.optimizer))
else:
self.optimizer = self.optimizer.lower()
if self.optimizer not in ['sgd', 'rmsprop', 'adam', 'adagrad', 'nesterov_momentum_sgd', 'sqn']:
raise ValueError(
"logistic_param's optimizer not supported, optimizer should be"
" 'sgd', 'rmsprop', 'adam', 'nesterov_momentum_sgd', 'sqn' or 'adagrad'")
if self.batch_size != -1:
if type(self.batch_size).__name__ not in ["int"] \
or self.batch_size < consts.MIN_BATCH_SIZE:
raise ValueError(descr + " {} not supported, should be larger than {} or "
"-1 represent for all data".format(self.batch_size, consts.MIN_BATCH_SIZE))
if not isinstance(self.learning_rate, (float, int)):
raise ValueError(
"logistic_param's learning_rate {} not supported, should be float or int type".format(
self.learning_rate))
self.init_param.check()
if type(self.max_iter).__name__ != "int":
raise ValueError(
"logistic_param's max_iter {} not supported, should be int type".format(self.max_iter))
elif self.max_iter <= 0:
raise ValueError(
"logistic_param's max_iter must be greater or equal to 1")
if type(self.early_stop).__name__ != "str":
raise ValueError(
"logistic_param's early_stop {} not supported, should be str type".format(
self.early_stop))
else:
self.early_stop = self.early_stop.lower()
if self.early_stop not in ['diff', 'abs', 'weight_diff']:
raise ValueError(
"logistic_param's early_stop not supported, converge_func should be"
" 'diff', 'weight_diff' or 'abs'")
self.encrypt_param.check()
self.predict_param.check()
if self.encrypt_param.method not in [consts.PAILLIER, None]:
raise ValueError(
"logistic_param's encrypted method support 'Paillier' or None only")
if type(self.decay).__name__ not in ["int", 'float']:
raise ValueError(
"logistic_param's decay {} not supported, should be 'int' or 'float'".format(
self.decay))
if type(self.decay_sqrt).__name__ not in ['bool']:
raise ValueError(
"logistic_param's decay_sqrt {} not supported, should be 'bool'".format(
self.decay_sqrt))
self.stepwise_param.check()
if self.early_stopping_rounds is None:
pass
elif isinstance(self.early_stopping_rounds, int):
if self.early_stopping_rounds < 1:
raise ValueError("early stopping rounds should be larger than 0 when it's integer")
if self.validation_freqs is None:
raise ValueError("validation freqs must be set when early stopping is enabled")
if self.metrics is not None and not isinstance(self.metrics, list):
raise ValueError("metrics should be a list")
if not isinstance(self.use_first_metric_only, bool):
raise ValueError("use_first_metric_only should be a boolean")
if self.floating_point_precision is not None and \
(not isinstance(self.floating_point_precision, int) or\
self.floating_point_precision < 0 or self.floating_point_precision > 63):
raise ValueError("floating point precision should be null or a integer between 0 and 63")
return True
class HomoLogisticParam(LogisticParam):
"""
Parameters
----------
re_encrypt_batches : int, default: 2
Required when using encrypted version HomoLR. Since multiple batch updating coefficient may cause
overflow error. The model need to be re-encrypt for every several batches. Please be careful when setting
this parameter. Too large batches may cause training failure.
aggregate_iters : int, default: 1
Indicate how many iterations are aggregated once.
use_proximal: bool, default: False
Whether to turn on additional proximial term. For more details of FedProx, Please refer to
https://arxiv.org/abs/1812.06127
mu: float, default 0.1
To scale the proximal term
"""
def __init__(self, penalty='L2',
tol=1e-4, alpha=1.0, optimizer='rmsprop',
batch_size=-1, learning_rate=0.01, init_param=InitParam(),
max_iter=100, early_stop='diff',
encrypt_param=EncryptParam(method=None), re_encrypt_batches=2,
predict_param=PredictParam(), cv_param=CrossValidationParam(),
decay=1, decay_sqrt=True,
aggregate_iters=1, multi_class='ovr', validation_freqs=None,
early_stopping_rounds=None,
metrics=['auc', 'ks'],
use_first_metric_only=False,
use_proximal=False,
mu=0.1, callback_param=CallbackParam()
):
super(HomoLogisticParam, self).__init__(penalty=penalty, tol=tol, alpha=alpha, optimizer=optimizer,
batch_size=batch_size,
learning_rate=learning_rate,
init_param=init_param, max_iter=max_iter, early_stop=early_stop,
encrypt_param=encrypt_param, predict_param=predict_param,
cv_param=cv_param, multi_class=multi_class,
validation_freqs=validation_freqs,
decay=decay, decay_sqrt=decay_sqrt,
early_stopping_rounds=early_stopping_rounds,
metrics=metrics, use_first_metric_only=use_first_metric_only,
callback_param=callback_param)
self.re_encrypt_batches = re_encrypt_batches
self.aggregate_iters = aggregate_iters
self.use_proximal = use_proximal
self.mu = mu
def check(self):
super().check()
if type(self.re_encrypt_batches).__name__ != "int":
raise ValueError(
"logistic_param's re_encrypt_batches {} not supported, should be int type".format(
self.re_encrypt_batches))
elif self.re_encrypt_batches < 0:
raise ValueError(
"logistic_param's re_encrypt_batches must be greater or equal to 0")
if not isinstance(self.aggregate_iters, int):
raise ValueError(
"logistic_param's aggregate_iters {} not supported, should be int type".format(
self.aggregate_iters))
if self.encrypt_param.method == consts.PAILLIER:
if self.optimizer != 'sgd':
raise ValueError("Paillier encryption mode supports 'sgd' optimizer method only.")
if self.penalty == consts.L1_PENALTY:
raise ValueError("Paillier encryption mode supports 'L2' penalty or None only.")
if self.optimizer == 'sqn':
raise ValueError("'sqn' optimizer is supported for hetero mode only.")
return True
class HeteroLogisticParam(LogisticParam):
def __init__(self, penalty='L2',
tol=1e-4, alpha=1.0, optimizer='rmsprop',
batch_size=-1, learning_rate=0.01, init_param=InitParam(),
max_iter=100, early_stop='diff',
encrypted_mode_calculator_param=EncryptedModeCalculatorParam(),
predict_param=PredictParam(), cv_param=CrossValidationParam(),
decay=1, decay_sqrt=True, sqn_param=StochasticQuasiNewtonParam(),
multi_class='ovr', validation_freqs=None, early_stopping_rounds=None,
metrics=['auc', 'ks'], floating_point_precision=23,
encrypt_param=EncryptParam(),
use_first_metric_only=False, stepwise_param=StepwiseParam(),
callback_param=CallbackParam()
):
super(HeteroLogisticParam, self).__init__(penalty=penalty, tol=tol, alpha=alpha, optimizer=optimizer,
batch_size=batch_size,
learning_rate=learning_rate,
init_param=init_param, max_iter=max_iter, early_stop=early_stop,
predict_param=predict_param, cv_param=cv_param,
decay=decay,
decay_sqrt=decay_sqrt, multi_class=multi_class,
validation_freqs=validation_freqs,
early_stopping_rounds=early_stopping_rounds,
metrics=metrics, floating_point_precision=floating_point_precision,
encrypt_param=encrypt_param,
use_first_metric_only=use_first_metric_only,
stepwise_param=stepwise_param,
callback_param=callback_param)
self.encrypted_mode_calculator_param = copy.deepcopy(encrypted_mode_calculator_param)
self.sqn_param = copy.deepcopy(sqn_param)
def check(self):
super().check()
self.encrypted_mode_calculator_param.check()
self.sqn_param.check()
return True
| 47.873278 | 120 | 0.618426 |
2a58dfe6f37ac8b8b359a9091d8cfce43e519646 | 1,110 | py | Python | django_project/accounts/migrations/0001_initial.py | Jeffhabs/cs2450 | d003bb2817db071a5384e9939ea02fc9c7df5436 | [
"MIT"
] | null | null | null | django_project/accounts/migrations/0001_initial.py | Jeffhabs/cs2450 | d003bb2817db071a5384e9939ea02fc9c7df5436 | [
"MIT"
] | null | null | null | django_project/accounts/migrations/0001_initial.py | Jeffhabs/cs2450 | d003bb2817db071a5384e9939ea02fc9c7df5436 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('address_1', models.CharField(max_length=128)),
('address_2', models.CharField(max_length=128, null=True, blank=True)),
('city', models.CharField(max_length=128)),
('state', models.CharField(max_length=2, null=True, blank=True)),
('zip_code', models.CharField(max_length=32, null=True, blank=True)),
('phone', models.CharField(max_length=32)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| 34.6875 | 114 | 0.588288 |
a0b8d96f9db93a3931b3524309e93a2aa522046f | 1,925 | py | Python | Python36_x86_Template/Lib/crypt.py | iveskim/cloudbase-init-installer | bc7630a7fb7dd527618dac3938147e2d9439c285 | [
"Apache-2.0"
] | null | null | null | Python36_x86_Template/Lib/crypt.py | iveskim/cloudbase-init-installer | bc7630a7fb7dd527618dac3938147e2d9439c285 | [
"Apache-2.0"
] | null | null | null | Python36_x86_Template/Lib/crypt.py | iveskim/cloudbase-init-installer | bc7630a7fb7dd527618dac3938147e2d9439c285 | [
"Apache-2.0"
] | 4 | 2019-12-11T18:50:22.000Z | 2020-08-10T19:25:11.000Z | """Wrapper to the POSIX crypt library call and associated functionality."""
import _crypt
import string as _string
from random import SystemRandom as _SystemRandom
from collections import namedtuple as _namedtuple
_saltchars = _string.ascii_letters + _string.digits + './'
_sr = _SystemRandom()
class _Method(_namedtuple('_Method', 'name ident salt_chars total_size')):
"""Class representing a salt method per the Modular Crypt Format or the
legacy 2-character crypt method."""
def __repr__(self):
return '<crypt.METHOD_{}>'.format(self.name)
def mksalt(method=None):
"""Generate a salt for the specified method.
If not specified, the strongest available method will be used.
"""
if method is None:
method = methods[0]
s = '${}$'.format(method.ident) if method.ident else ''
s += ''.join(_sr.choice(_saltchars) for char in range(method.salt_chars))
return s
def crypt(word, salt=None):
"""Return a string representing the one-way hash of a password, with a salt
prepended.
If ``salt`` is not specified or is ``None``, the strongest
available method will be selected and a salt generated. Otherwise,
``salt`` may be one of the ``crypt.METHOD_*`` values, or a string as
returned by ``crypt.mksalt()``.
"""
if salt is None or isinstance(salt, _Method):
salt = mksalt(salt)
return _crypt.crypt(word, salt)
# available salting/crypto methods
METHOD_CRYPT = _Method('CRYPT', None, 2, 13)
METHOD_MD5 = _Method('MD5', '1', 8, 34)
METHOD_SHA256 = _Method('SHA256', '5', 16, 63)
METHOD_SHA512 = _Method('SHA512', '6', 16, 106)
methods = []
for _method in (METHOD_SHA512, METHOD_SHA256, METHOD_MD5, METHOD_CRYPT):
_result = crypt('', _method)
if _result and len(_result) == _method.total_size:
methods.append(_method)
del _result, _method
| 31.048387 | 80 | 0.669091 |
636a7f6cf357060f64919dc02df49ffe624f39fa | 2,365 | py | Python | indicator.py | NahsiN/MPBParser | 2f9a3c0b2d2b0aa9f9c7f0b25d49dd73a3114577 | [
"MIT"
] | 1 | 2016-09-13T08:06:06.000Z | 2016-09-13T08:06:06.000Z | indicator.py | NahsiN/MPBParser | 2f9a3c0b2d2b0aa9f9c7f0b25d49dd73a3114577 | [
"MIT"
] | null | null | null | indicator.py | NahsiN/MPBParser | 2f9a3c0b2d2b0aa9f9c7f0b25d49dd73a3114577 | [
"MIT"
] | null | null | null | # test hole integral step function
from MPBParser import MPBBandStructure, readfield
import numpy as np
import matplotlib.pyplot as plt
#mpb = MPBBandStructure('/home/nishan/Code/thales/MPB/w14/w14.out', 'zeven')
#mpb.csvparser()
#mpb.readbanddata()
#eps_slab = 10.0489
#eps_air = 1.0
#TOL = 1e-3
# no need since I start with an array of zeros
# mask_air_holes_supercell = abs(epsilon.dset[:]- eps_air) <= TOL
# step_func[mask_air_holes_supercell] = 0
def indicator_func(mpb, eps_slab, eps_air, type, TOL=1e-3):
# type air_slab_bdry, slab_only, air_hole_slab_bdry
epsilon = readfield(mpb, field_type='epsilon_isotropic_trace')
if type == 'slab_only':
indicator_slab = np.zeros(epsilon.dset.shape)
mask_slab = abs(epsilon.dset[:] - eps_slab) <= TOL
indicator_slab[mask_slab] = 1
indicator = indicator_slab
elif type == 'air_slab_bdry':
indicator_air_slab_bdry = np.zeros(epsilon.dset.shape)
# still keeps the slab-hole boundary
mask_air_slab_bdry = np.logical_not(np.logical_or(abs(epsilon.dset[:]- eps_air) <= TOL, \
abs(epsilon.dset[:]- eps_slab) <= TOL))
indicator_air_slab_bdry[mask_slab_hole_bdry] = 1
indicator = indicator_air_slab_bdry
elif type == 'air_hole_slab_bdry':
indicator_air_hole_slab_bdry = np.zeros(epsilon.dset.shape)
# still keeps the slab-hole boundary
mask_air_hole_slab_bdry = np.logical_not(np.logical_or(abs(epsilon.dset[:]- eps_air) <= TOL, \
abs(epsilon.dset[:]- eps_slab) <= TOL))
# loop over the z dimension and weed out slab-supercell boundary by looking
# at mean epsilon in the xy cross section
for k in range(epsilon.dset.shape[2]):
# 0.5*(eps_air + eps_slab) is a reasonable estimate.
if np.mean(epsilon.dset[:,:,k]) < 0.5*(eps_air + eps_slab):
mask_air_hole_slab_bdry[:,:,k] = 0
indicator_air_hole_slab_bdry[mask_slab_hole_bdry] = 1
indicator = indicator_air_hole_slab_bdry
# FOR DEBUGGING
# plt.figure()
# plt.imshow(indicator_slab_hole_bdry[:,:, int(epsilon.dset.shape[2]/2)])
# plt.colorbar()
# plt.show()
else:
print('Invalid type entereted. Valid options are')
epsilon.close()
return None
epsilon.close()
return indicator
| 39.416667 | 102 | 0.665962 |
5dd08fa4cbbf9b30c23a6c4961b7081febc8922e | 2,130 | py | Python | aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/MoveAppResourceRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/MoveAppResourceRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/MoveAppResourceRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class MoveAppResourceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vod', '2017-03-21', 'MoveAppResource','vod')
self.set_method('POST')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_TargetAppId(self):
return self.get_query_params().get('TargetAppId')
def set_TargetAppId(self,TargetAppId):
self.add_query_param('TargetAppId',TargetAppId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ResourceType(self):
return self.get_query_params().get('ResourceType')
def set_ResourceType(self,ResourceType):
self.add_query_param('ResourceType',ResourceType)
def get_ResourceIds(self):
return self.get_query_params().get('ResourceIds')
def set_ResourceIds(self,ResourceIds):
self.add_query_param('ResourceIds',ResourceIds) | 34.354839 | 74 | 0.771831 |
f8775451dfded80c9ce645d490bc00bacb76b48c | 5,023 | py | Python | src/quantum/azext_quantum/vendored_sdks/azure_quantum/aio/operations/_storage_operations.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/quantum/azext_quantum/vendored_sdks/azure_quantum/aio/operations/_storage_operations.py | ravithanneeru/azure-cli-extensions | e0de87f3563ae39525370e9912589aac33e7bded | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | azure-quantum/azure/quantum/_client/aio/operations/_storage_operations.py | slowy07/qdk-python | e4ce0c433cc986bc1c746e9a58f3f05733c657e2 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StorageOperations:
"""StorageOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.quantum.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def sas_uri(
self,
blob_details: "_models.BlobDetails",
**kwargs: Any
) -> "_models.SasUriResponse":
"""Gets a URL with SAS token for a container/blob in the storage account associated with the
workspace. The SAS URL can be used to upload job input and/or download job output.
:param blob_details: The details (name and container) of the blob to store or download data.
:type blob_details: ~azure.quantum.models.BlobDetails
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SasUriResponse, or the result of cls(response)
:rtype: ~azure.quantum.models.SasUriResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasUriResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.sas_uri.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'workspaceName': self._serialize.url("self._config.workspace_name", self._config.workspace_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(blob_details, 'BlobDetails')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.RestError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SasUriResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
sas_uri.metadata = {'url': '/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Quantum/workspaces/{workspaceName}/storage/sasUri'} # type: ignore
| 49.245098 | 191 | 0.690026 |
c21c87a1477888264a502e7cd2571fb9c77b08c9 | 12,424 | py | Python | kpconv/utils/mayavi_visu.py | SergioRAgostinho/KPConv-PyTorch | 4044c959a9c1b07b150dac54cef0a8f43b2fdb06 | [
"MIT"
] | null | null | null | kpconv/utils/mayavi_visu.py | SergioRAgostinho/KPConv-PyTorch | 4044c959a9c1b07b150dac54cef0a8f43b2fdb06 | [
"MIT"
] | null | null | null | kpconv/utils/mayavi_visu.py | SergioRAgostinho/KPConv-PyTorch | 4044c959a9c1b07b150dac54cef0a8f43b2fdb06 | [
"MIT"
] | null | null | null | #
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Script for various visualization with mayavi
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import torch
import numpy as np
from sklearn.neighbors import KDTree
from os import makedirs, remove, rename, listdir
from os.path import exists, join
import time
import sys
# PLY reader
from .ply import write_ply, read_ply
# Configuration class
from .config import Config
def show_ModelNet_models(all_points):
from mayavi import mlab
###########################
# Interactive visualization
###########################
# Create figure for features
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800))
fig1.scene.parallel_projection = False
# Indices
global file_i
file_i = 0
def update_scene():
# clear figure
mlab.clf(fig1)
# Plot new data feature
points = all_points[file_i]
# Rescale points for visu
points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
# Show point clouds colorized with activations
activations = mlab.points3d(points[:, 0],
points[:, 1],
points[:, 2],
points[:, 2],
scale_factor=3.0,
scale_mode='none',
figure=fig1)
# New title
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
mlab.orientation_axes()
return
def keyboard_callback(vtk_obj, event):
global file_i
if vtk_obj.GetKeyCode() in ['g', 'G']:
file_i = (file_i - 1) % len(all_points)
update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']:
file_i = (file_i + 1) % len(all_points)
update_scene()
return
# Draw a first plot
update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
mlab.show()
def show_ModelNet_examples(clouds, cloud_normals=None, cloud_labels=None):
from mayavi import mlab
###########################
# Interactive visualization
###########################
# Create figure for features
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800))
fig1.scene.parallel_projection = False
if cloud_labels is None:
cloud_labels = [points[:, 2] for points in clouds]
# Indices
global file_i, show_normals
file_i = 0
show_normals = True
def update_scene():
# clear figure
mlab.clf(fig1)
# Plot new data feature
points = clouds[file_i]
labels = cloud_labels[file_i]
if cloud_normals is not None:
normals = cloud_normals[file_i]
else:
normals = None
# Rescale points for visu
points = (points * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
# Show point clouds colorized with activations
activations = mlab.points3d(points[:, 0],
points[:, 1],
points[:, 2],
labels,
scale_factor=3.0,
scale_mode='none',
figure=fig1)
if normals is not None and show_normals:
activations = mlab.quiver3d(points[:, 0],
points[:, 1],
points[:, 2],
normals[:, 0],
normals[:, 1],
normals[:, 2],
scale_factor=10.0,
scale_mode='none',
figure=fig1)
# New title
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
mlab.orientation_axes()
return
def keyboard_callback(vtk_obj, event):
global file_i, show_normals
if vtk_obj.GetKeyCode() in ['g', 'G']:
file_i = (file_i - 1) % len(clouds)
update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']:
file_i = (file_i + 1) % len(clouds)
update_scene()
elif vtk_obj.GetKeyCode() in ['n', 'N']:
show_normals = not show_normals
update_scene()
return
# Draw a first plot
update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
mlab.show()
def show_neighbors(query, supports, neighbors):
from mayavi import mlab
###########################
# Interactive visualization
###########################
# Create figure for features
fig1 = mlab.figure('Models', bgcolor=(1, 1, 1), size=(1000, 800))
fig1.scene.parallel_projection = False
# Indices
global file_i
file_i = 0
def update_scene():
# clear figure
mlab.clf(fig1)
# Rescale points for visu
p1 = (query * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
p2 = (supports * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
l1 = p1[:, 2]*0
l1[file_i] = 1
l2 = p2[:, 2]*0 + 2
l2[neighbors[file_i]] = 3
# Show point clouds colorized with activations
activations = mlab.points3d(p1[:, 0],
p1[:, 1],
p1[:, 2],
l1,
scale_factor=2.0,
scale_mode='none',
vmin=0.0,
vmax=3.0,
figure=fig1)
activations = mlab.points3d(p2[:, 0],
p2[:, 1],
p2[:, 2],
l2,
scale_factor=3.0,
scale_mode='none',
vmin=0.0,
vmax=3.0,
figure=fig1)
# New title
mlab.title(str(file_i), color=(0, 0, 0), size=0.3, height=0.01)
text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
mlab.orientation_axes()
return
def keyboard_callback(vtk_obj, event):
global file_i
if vtk_obj.GetKeyCode() in ['g', 'G']:
file_i = (file_i - 1) % len(query)
update_scene()
elif vtk_obj.GetKeyCode() in ['h', 'H']:
file_i = (file_i + 1) % len(query)
update_scene()
return
# Draw a first plot
update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
mlab.show()
def show_input_batch(batch):
from mayavi import mlab
###########################
# Interactive visualization
###########################
# Create figure for features
fig1 = mlab.figure('Input', bgcolor=(1, 1, 1), size=(1000, 800))
fig1.scene.parallel_projection = False
# Unstack batch
all_points = batch.unstack_points()
all_neighbors = batch.unstack_neighbors()
all_pools = batch.unstack_pools()
# Indices
global b_i, l_i, neighb_i, show_pools
b_i = 0
l_i = 0
neighb_i = 0
show_pools = False
def update_scene():
# clear figure
mlab.clf(fig1)
# Rescale points for visu
p = (all_points[l_i][b_i] * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
labels = p[:, 2]*0
if show_pools:
p2 = (all_points[l_i+1][b_i][neighb_i:neighb_i+1] * 1.5 + np.array([1.0, 1.0, 1.0])) * 50.0
p = np.vstack((p, p2))
labels = np.hstack((labels, np.ones((1,), dtype=np.int32)*3))
pool_inds = all_pools[l_i][b_i][neighb_i]
pool_inds = pool_inds[pool_inds >= 0]
labels[pool_inds] = 2
else:
neighb_inds = all_neighbors[l_i][b_i][neighb_i]
neighb_inds = neighb_inds[neighb_inds >= 0]
labels[neighb_inds] = 2
labels[neighb_i] = 3
# Show point clouds colorized with activations
mlab.points3d(p[:, 0],
p[:, 1],
p[:, 2],
labels,
scale_factor=2.0,
scale_mode='none',
vmin=0.0,
vmax=3.0,
figure=fig1)
"""
mlab.points3d(p[-2:, 0],
p[-2:, 1],
p[-2:, 2],
labels[-2:]*0 + 3,
scale_factor=0.16 * 1.5 * 50,
scale_mode='none',
mode='cube',
vmin=0.0,
vmax=3.0,
figure=fig1)
mlab.points3d(p[-1:, 0],
p[-1:, 1],
p[-1:, 2],
labels[-1:]*0 + 2,
scale_factor=0.16 * 2 * 2.5 * 1.5 * 50,
scale_mode='none',
mode='sphere',
vmin=0.0,
vmax=3.0,
figure=fig1)
"""
# New title
title_str = '<([) b_i={:d} (])> <(,) l_i={:d} (.)> <(N) n_i={:d} (M)>'.format(b_i, l_i, neighb_i)
mlab.title(title_str, color=(0, 0, 0), size=0.3, height=0.90)
if show_pools:
text = 'pools (switch with G)'
else:
text = 'neighbors (switch with G)'
mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.3)
mlab.orientation_axes()
return
def keyboard_callback(vtk_obj, event):
global b_i, l_i, neighb_i, show_pools
if vtk_obj.GetKeyCode() in ['[', '{']:
b_i = (b_i - 1) % len(all_points[l_i])
neighb_i = 0
update_scene()
elif vtk_obj.GetKeyCode() in [']', '}']:
b_i = (b_i + 1) % len(all_points[l_i])
neighb_i = 0
update_scene()
elif vtk_obj.GetKeyCode() in [',', '<']:
if show_pools:
l_i = (l_i - 1) % (len(all_points) - 1)
else:
l_i = (l_i - 1) % len(all_points)
neighb_i = 0
update_scene()
elif vtk_obj.GetKeyCode() in ['.', '>']:
if show_pools:
l_i = (l_i + 1) % (len(all_points) - 1)
else:
l_i = (l_i + 1) % len(all_points)
neighb_i = 0
update_scene()
elif vtk_obj.GetKeyCode() in ['n', 'N']:
neighb_i = (neighb_i - 1) % all_points[l_i][b_i].shape[0]
update_scene()
elif vtk_obj.GetKeyCode() in ['m', 'M']:
neighb_i = (neighb_i + 1) % all_points[l_i][b_i].shape[0]
update_scene()
elif vtk_obj.GetKeyCode() in ['g', 'G']:
if l_i < len(all_points) - 1:
show_pools = not show_pools
neighb_i = 0
update_scene()
return
# Draw a first plot
update_scene()
fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
mlab.show()
| 28.430206 | 120 | 0.438265 |
c13874190349ab637917fc8250410bcf1f3c38d2 | 403 | py | Python | ljosmyndasida/wsgi.py | dadisigursveinn/VEF-Lokaverkefni | a862124b0958738a106e938d9ae95060f0cabec9 | [
"BSD-3-Clause"
] | null | null | null | ljosmyndasida/wsgi.py | dadisigursveinn/VEF-Lokaverkefni | a862124b0958738a106e938d9ae95060f0cabec9 | [
"BSD-3-Clause"
] | null | null | null | ljosmyndasida/wsgi.py | dadisigursveinn/VEF-Lokaverkefni | a862124b0958738a106e938d9ae95060f0cabec9 | [
"BSD-3-Clause"
] | null | null | null | """
WSGI config for ljosmyndasida project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ljosmyndasida.settings")
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
37ea05c1c73496ae2e1383e05d95b2b3e2d97994 | 3,471 | py | Python | envoy.github.abstract/tests/test_manager.py | phlax/abstracts | 53fbbee68d1f56effe0ded1ed4e28be870693877 | [
"Apache-2.0"
] | 1 | 2021-12-09T19:24:48.000Z | 2021-12-09T19:24:48.000Z | envoy.github.abstract/tests/test_manager.py | phlax/abstracts | 53fbbee68d1f56effe0ded1ed4e28be870693877 | [
"Apache-2.0"
] | 392 | 2021-08-24T15:55:32.000Z | 2022-03-28T14:26:22.000Z | envoy.github.abstract/tests/test_manager.py | phlax/abstracts | 53fbbee68d1f56effe0ded1ed4e28be870693877 | [
"Apache-2.0"
] | 3 | 2021-10-06T13:43:11.000Z | 2021-11-29T13:48:56.000Z |
from unittest.mock import AsyncMock, PropertyMock
import pytest
import packaging.version
import abstracts
from aio.functional import async_property
from envoy.github.abstract import manager
@abstracts.implementer(manager.AGithubReleaseManager)
class DummyGithubReleaseManager:
async def __aenter__(self):
return super().__aenter__()
async def __aexit__(self, *args):
return super().__aexit__(*args)
def __getitem__(self, version):
return super().__getitem__(version)
@property
def github(self):
return super().github
@async_property
async def latest(self):
return super().latest
@property
def log(self):
return super().log
@async_property
async def releases(self):
return super().releases
@async_property
def releases_url(self):
return super().releases_url
@property
def session(self):
return super().session
def fail(self, message):
return super().fail(message)
def format_version(self, version):
return super().format_version(version)
def parse_version(self, version):
return super().parse_version(version)
@pytest.mark.parametrize("continues", [None, True, False])
@pytest.mark.parametrize("create", [None, True, False])
@pytest.mark.parametrize("user", [None, "USER"])
@pytest.mark.parametrize("oauth_token", [None, "OAUTH TOKEN"])
@pytest.mark.parametrize("log", [None, "LOG"])
@pytest.mark.parametrize("asset_types", [None, "ASSET TYPES"])
@pytest.mark.parametrize("github", [None, "GITHUB"])
@pytest.mark.parametrize("session", [None, "SESSION"])
def test_release_manager_constructor(
continues, create, user, oauth_token,
log, asset_types, github, session):
kwargs = dict(
continues=continues,
create=create,
user=user,
oauth_token=oauth_token,
log=log,
asset_types=asset_types,
github=github,
session=session)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
releaser = DummyGithubReleaseManager("PATH", "REPOSITORY", **kwargs)
assert releaser._path == "PATH"
assert releaser.repository == "REPOSITORY"
assert (
releaser.continues
== (continues
if continues is not None
else False))
assert (
releaser.create
== (create
if create is not None
else True))
assert releaser._log == log
assert releaser.oauth_token == oauth_token
assert releaser.user == (user or "")
assert releaser._asset_types == asset_types
assert releaser._github == github
assert releaser._session == session
# assert releaser._version_re == r"v(\w+)"
assert (
releaser.version_min
== manager.VERSION_MIN
== packaging.version.Version("0"))
assert "version_min" not in releaser.__dict__
@pytest.mark.parametrize("session", [True, False])
async def test_release_manager_close(patches, session):
releaser = DummyGithubReleaseManager("PATH", "REPOSITORY")
patched = patches(
("AGithubReleaseManager.session", dict(new_callable=PropertyMock)),
prefix="envoy.github.abstract.manager")
if session:
releaser.__dict__["session"] = "SESSION"
with patched as (m_session, ):
m_session.return_value.close = AsyncMock()
assert not await releaser.close()
assert "session" not in releaser.__dict__
| 27.991935 | 75 | 0.663786 |
05b76e07b8821ea5bfce6bcc2749ffd3ebe8343b | 8,125 | py | Python | salt/beacons/wtmp.py | eirinikos/salt | 7fb420c14a034a3c6c6775f4dad4cb8bdbefd0a8 | [
"Apache-2.0"
] | null | null | null | salt/beacons/wtmp.py | eirinikos/salt | 7fb420c14a034a3c6c6775f4dad4cb8bdbefd0a8 | [
"Apache-2.0"
] | null | null | null | salt/beacons/wtmp.py | eirinikos/salt | 7fb420c14a034a3c6c6775f4dad4cb8bdbefd0a8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Beacon to fire events at login of users as registered in the wtmp file
.. code-block:: yaml
beacons:
wtmp: []
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
import os
import struct
import time
# Import salt libs
import salt.utils.stringutils
import salt.utils.files
# Import 3rd-party libs
import salt.ext.six
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
__virtualname__ = 'wtmp'
WTMP = '/var/log/wtmp'
FMT = b'hi32s4s32s256shhiii4i20x'
FIELDS = [
'type',
'PID',
'line',
'inittab',
'user',
'hostname',
'exit_status',
'session',
'time',
'addr'
]
SIZE = struct.calcsize(FMT)
LOC_KEY = 'wtmp.loc'
log = logging.getLogger(__name__)
# pylint: disable=import-error
try:
import dateutil.parser as dateutil_parser
_TIME_SUPPORTED = True
except ImportError:
_TIME_SUPPORTED = False
def __virtual__():
if os.path.isfile(WTMP):
return __virtualname__
return False
def _validate_time_range(trange, status, msg):
'''
Check time range
'''
# If trange is empty, just return the current status & msg
if not trange:
return status, msg
if not isinstance(trange, dict):
status = False
msg = ('The time_range parameter for '
'wtmp beacon must '
'be a dictionary.')
if not all(k in trange for k in ('start', 'end')):
status = False
msg = ('The time_range parameter for '
'wtmp beacon must contain '
'start & end options.')
return status, msg
def _gather_group_members(group, groups, users):
'''
Gather group members
'''
_group = __salt__['group.info'](group)
if not _group:
log.warning('Group %s does not exist, ignoring.', group)
return
for member in _group['members']:
if member not in users:
users[member] = groups[group]
def _check_time_range(time_range, now):
'''
Check time range
'''
if _TIME_SUPPORTED:
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
return bool(_start <= now <= _end)
else:
log.error('Dateutil is required.')
return False
def _get_loc():
'''
return the active file location
'''
if LOC_KEY in __context__:
return __context__[LOC_KEY]
def validate(config):
'''
Validate the beacon configuration
'''
vstatus = True
vmsg = 'Valid beacon configuration'
# Configuration for wtmp beacon should be a list of dicts
if not isinstance(config, list):
vstatus = False
vmsg = ('Configuration for wtmp beacon must be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'users' in _config:
if not isinstance(_config['users'], dict):
vstatus = False
vmsg = ('User configuration for wtmp beacon must '
'be a dictionary.')
else:
for user in _config['users']:
_time_range = _config['users'][user].get('time_range', {})
vstatus, vmsg = _validate_time_range(_time_range,
vstatus,
vmsg)
if not vstatus:
return vstatus, vmsg
if 'groups' in _config:
if not isinstance(_config['groups'], dict):
vstatus = False
vmsg = ('Group configuration for wtmp beacon must '
'be a dictionary.')
else:
for group in _config['groups']:
_time_range = _config['groups'][group].get('time_range', {})
vstatus, vmsg = _validate_time_range(_time_range,
vstatus,
vmsg)
if not vstatus:
return vstatus, vmsg
if 'defaults' in _config:
if not isinstance(_config['defaults'], dict):
vstatus = False
vmsg = ('Defaults configuration for wtmp beacon must '
'be a dictionary.')
else:
_time_range = _config['defaults'].get('time_range', {})
vstatus, vmsg = _validate_time_range(_time_range,
vstatus,
vmsg)
if not vstatus:
return vstatus, vmsg
return vstatus, vmsg
def beacon(config):
'''
Read the last wtmp file and return information on the logins
.. code-block:: yaml
beacons:
wtmp: []
beacons:
wtmp:
- users:
gareth:
- defaults:
time_range:
start: '8am'
end: '4pm'
beacons:
wtmp:
- users:
gareth:
time_range:
start: '8am'
end: '4pm'
- defaults:
time_range:
start: '8am'
end: '4pm'
beacons:
wtmp:
- groups:
users:
time_range:
start: '8am'
end: '4pm'
- defaults:
time_range:
start: '8am'
end: '4pm'
.. versionadded:: Fluorine
'''
ret = []
users = {}
groups = {}
defaults = None
for config_item in config:
if 'users' in config_item:
users = config_item['users']
if 'groups' in config_item:
groups = config_item['groups']
if 'defaults' in config_item:
defaults = config_item['defaults']
with salt.utils.files.fopen(WTMP, 'rb') as fp_:
loc = __context__.get(LOC_KEY, 0)
if loc == 0:
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
return ret
else:
fp_.seek(loc)
while True:
now = int(time.time())
raw = fp_.read(SIZE)
if len(raw) != SIZE:
return ret
__context__[LOC_KEY] = fp_.tell()
pack = struct.unpack(FMT, raw)
event = {}
for ind, field in enumerate(FIELDS):
event[field] = pack[ind]
if isinstance(event[field], salt.ext.six.string_types):
if isinstance(event[field], bytes):
event[field] = salt.utils.stringutils.to_unicode(event[field])
event[field] = event[field].strip('\x00')
for group in groups:
_gather_group_members(group, groups, users)
if users:
if event['user'] in users:
_user = users[event['user']]
if isinstance(_user, dict) and 'time_range' in _user:
if _check_time_range(_user['time_range'], now):
ret.append(event)
else:
if defaults and 'time_range' in defaults:
if _check_time_range(defaults['time_range'],
now):
ret.append(event)
else:
ret.append(event)
else:
if defaults and 'time_range' in defaults:
if _check_time_range(defaults['time_range'], now):
ret.append(event)
else:
ret.append(event)
return ret
| 28.211806 | 89 | 0.490954 |
4013f7487fbe0bc1a62b7b13fa342ad643865077 | 1,140 | py | Python | .githooks/check-ansible.py | remcovergoossen/deepops | 52577cddabfcb317a833c0e89386d428625ae16a | [
"BSD-3-Clause"
] | 1 | 2020-08-25T13:41:50.000Z | 2020-08-25T13:41:50.000Z | .githooks/check-ansible.py | remcovergoossen/deepops | 52577cddabfcb317a833c0e89386d428625ae16a | [
"BSD-3-Clause"
] | 2 | 2019-06-14T19:59:52.000Z | 2019-07-12T00:22:56.000Z | .githooks/check-ansible.py | remcovergoossen/deepops | 52577cddabfcb317a833c0e89386d428625ae16a | [
"BSD-3-Clause"
] | 1 | 2019-03-26T16:50:04.000Z | 2019-03-26T16:50:04.000Z | #!/usr/bin/env python
"""
Get a list of Ansible playbooks and roles that have changes staged in Git.
Run ansible-lint on only those playbooks and roles.
"""
from __future__ import print_function
import subprocess
import re
import sys
def get_changed_ansible_paths():
"""
Get a list of playbook files and role directories that are staged for commit
"""
git_diff = subprocess.check_output("git diff --name-only --cached".split())
ansible_lint_paths_to_check = []
for f in git_diff.split("\n"):
# Add playbook files
if re.match(r"^playbooks/.*(yml|yaml)$", f):
ansible_lint_paths_to_check.append(f)
# Add role directories
role_match = re.match(r"^roles/(\w+)/.*", f)
if role_match:
ansible_lint_paths_to_check.append(
"roles/{}".format(role_match.group(1)))
return ansible_lint_paths_to_check
def run_ansible_lint(paths):
cmd = ["ansible-lint"] + paths
return subprocess.call(cmd)
if __name__ == "__main__":
changed = get_changed_ansible_paths()
if len(changed) > 0:
sys.exit(run_ansible_lint(changed))
| 27.804878 | 80 | 0.668421 |
1aa03409aa3d71ae9712c1b48b17887e9164798c | 2,143 | py | Python | tests/integration/gcp/test_running_in_notebooks.py | suomitekai/fairing | 9ca6a1138529b3f0b21979d62c7cb1f303bc52e0 | [
"Apache-2.0"
] | 334 | 2018-09-03T23:10:02.000Z | 2022-03-07T23:12:24.000Z | tests/integration/gcp/test_running_in_notebooks.py | suomitekai/fairing | 9ca6a1138529b3f0b21979d62c7cb1f303bc52e0 | [
"Apache-2.0"
] | 562 | 2018-09-03T21:33:42.000Z | 2022-03-29T12:47:43.000Z | tests/integration/gcp/test_running_in_notebooks.py | suomitekai/fairing | 9ca6a1138529b3f0b21979d62c7cb1f303bc52e0 | [
"Apache-2.0"
] | 160 | 2018-11-06T17:55:32.000Z | 2022-02-15T09:59:10.000Z | import os
import pytest
from ..helpers import run_notebook_test
@pytest.mark.skip(reason="GCPManaged backend needs to take build context as input")
def test_xgboost_highlevel_apis_gcp_managed():
file_dir = os.path.dirname(__file__)
notebook_rel_path = "../../../examples/prediction/xgboost-high-level-apis.ipynb"
notebook_abs_path = os.path.normpath(
os.path.join(file_dir, notebook_rel_path))
expected_messages = [
"Model export success: trained_ames_model.dat", # KF training
"Access job logs at the following URL:", # GCP managed submission success
"Prediction endpoint: http", # create endpoint success
]
parameters = {
"FAIRING_BACKEND": "GCPManagedBackend"
}
run_notebook_test(notebook_abs_path, expected_messages, parameters=parameters)
@pytest.mark.skip(reason="debugging tests")
def test_xgboost_highlevel_apis_gke():
file_dir = os.path.dirname(__file__)
notebook_rel_path = "../../../examples/prediction/xgboost-high-level-apis.ipynb"
notebook_abs_path = os.path.normpath(
os.path.join(file_dir, notebook_rel_path))
expected_messages = [
"Model export success: trained_ames_model.dat", #KF training
"Prediction endpoint: http", #create endpoint success
]
parameters = {
"FAIRING_BACKEND": "KubeflowGKEBackend"
}
run_notebook_test(notebook_abs_path, expected_messages, parameters=parameters)
@pytest.mark.skip(reason="debugging tests")
def test_lightgbm():
file_dir = os.path.dirname(__file__)
notebook_rel_path = "../../../examples/lightgbm/distributed-training.ipynb"
notebook_abs_path = os.path.normpath(
os.path.join(file_dir, notebook_rel_path))
expected_messages = [
"Copying gs://fairing-lightgbm/regression-example/regression.train.weight",
"[LightGBM] [Info] Finished initializing network", # dist training setup
"[LightGBM] [Info] Iteration:10, valid_1 l2 : 0.2",
"[LightGBM] [Info] Finished training",
"Prediction mean: 0.5",
", count: 500"
]
run_notebook_test(notebook_abs_path, expected_messages)
| 41.211538 | 84 | 0.707886 |
adac13169fdc560c9da08455cef6b2b6dfde61bd | 23,800 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_ddos_protection_plans_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_ddos_protection_plans_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_ddos_protection_plans_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations(object):
"""DdosProtectionPlansOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosProtectionPlan"]
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_07_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
| 49.173554 | 207 | 0.665924 |
3cb4f1fac3e7f9a6211ac9b81c65afe925b4259d | 2,885 | py | Python | lig_maker.py | miguelsousa/robothon | f2ac88884e04a6e77f79c91e1709ab8c84f46043 | [
"MIT"
] | 7 | 2015-02-23T15:14:42.000Z | 2021-07-09T16:14:19.000Z | lig_maker.py | miguelsousa/robothon | f2ac88884e04a6e77f79c91e1709ab8c84f46043 | [
"MIT"
] | null | null | null | lig_maker.py | miguelsousa/robothon | f2ac88884e04a6e77f79c91e1709ab8c84f46043 | [
"MIT"
] | 1 | 2017-06-21T19:53:21.000Z | 2017-06-21T19:53:21.000Z | from robofab.world import CurrentFont
from robofab.gString import splitAccent
position = {'Aacute':['acute', (318, 0)], 'Abreve':['breve', (248, 0)], 'Acircumflex':['circumflex', (255, 0)], 'Adieresis':['dieresis', (239, 0)], 'Atilde':['tilde', (240, 0)], 'Agrave':['grave', (183, 0)], 'Amacron':['macron', (239, 0)], 'Jcircumflex':['circumflex', (387, 0)], 'Lacute':['acute', (156, 0)], 'Lcaron':['caron', (91, 0)], 'Lcommaaccent':['commaaccent', (209, 0)], 'Tcaron':['caron', (276, 0)], 'uni021A':['commaaccent', (269, 0)], 'Tcommaaccent':['cedilla', (231, 0)], }
accented = {'A':['A','Aacute', 'Abreve', 'Acircumflex', 'Adieresis', 'Atilde', 'Agrave', 'Amacron',], 'J':['J','Jcircumflex'], 'L':['L','Lacute', 'Lcaron', 'Lcommaaccent',], 'T':['T','Tcaron', 'uni021A', 'Tcommaaccent',]}
base = ['A_T', 'T_A', 'A_T_A', 'F_J', 'P_J', 'T_J', 'L_T', 'T_AE', 'Thorn_J', 'A_T_AE', 'Lslash_T', 'Aring_T', 'T_Aring', 'Aring_T_A', 'A_T_Aring', 'Aring_T_Aring', 'Aogonek_T', 'T_Aogonek', 'Aogonek_T_A', 'A_T_Aogonek', 'Aogonek_T_Aogonek',]
font = CurrentFont()
def combinations(lists):
if not lists: return [ [] ]
more = combinations(lists[1:])
return [ [i]+js for i in lists[0] for js in more ]
for lig in base:
parts = lig.split('_')
temp = []
for x in parts:
if x in accented.keys():
temp.append(accented[x])
else:
temp.append([x])
toMake = combinations(temp)
for i in toMake:
name = ''
for x in i:
name = name + '_' + x
name = name[1:]
if name not in font.keys():
font.newGlyph(name)
font[name].appendComponent(lig)
font[name].mark = 200
font[name].rightMargin = 20
glyphs = name.split('_')
previous = ''
index = 1
for n in glyphs:
if n in position.keys():
if index == 1:
font[name].appendComponent(position[n][0], position[n][1])
if index == 2:
if splitAccent(n)[0] == 'J':
p = (position[n][1][0] + 854, position[n][1][1])
elif previous == 'A':
p = (position[n][1][0] + 865, position[n][1][1])
elif previous == 'L':
p = (position[n][1][0] + 781, position[n][1][1])
else:
p = (position[n][1][0] + 921, position[n][1][1])
font[name].appendComponent(position[n][0], p)
if index == 3:
p = (position[n][1][0] + 1786, position[n][1][1])
font[name].appendComponent(position[n][0], p)
previous = splitAccent(glyphs[0])[0]
index = index + 1
font.update()
print 'done' | 48.898305 | 487 | 0.487695 |
d5d37cc5a60890c59d3186c7fe9db93511bdf2bc | 3,225 | py | Python | examples/ad_manager/v201808/report_service/run_report_with_custom_fields.py | khanhnhk/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | 1 | 2021-12-30T15:21:42.000Z | 2021-12-30T15:21:42.000Z | examples/ad_manager/v201808/report_service/run_report_with_custom_fields.py | benlistyg/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201808/report_service/run_report_with_custom_fields.py | benlistyg/googleads-python-lib | 1e882141b8eb663b55dd582ce0f4fbf3cd2f672d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a report with custom fields found in the line items of an order."""
import tempfile
# Import appropriate modules from the client library.
from googleads import ad_manager
from googleads import errors
# Set the ID of the order to get line items from.
ORDER_ID = 'INSERT_ORDER_ID_HERE'
def main(client, order_id):
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201808')
# Initialize a DataDownloader.
report_downloader = client.GetDataDownloader(version='v201808')
# Filter for line items of a given order.
statement = (ad_manager.StatementBuilder()
.Where('orderId = :orderId')
.WithBindVariable('orderId', long(order_id)))
# Collect all line item custom field IDs for an order.
custom_field_ids = set()
# Get users by statement.
while True:
response = line_item_service.getLineItemsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
# Get custom field IDs from the line items of an order.
for line_item in response['results']:
if 'customFieldValues' in line_item:
for custom_field_value in line_item['customFieldValues']:
custom_field_ids.add(custom_field_value['customFieldId'])
statement.offset += statement.limit
else:
break
# Modify statement for reports
statement.limit = None
statement.offset = None
statement.Where('ORDER_ID = :orderId')
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['LINE_ITEM_ID', 'LINE_ITEM_NAME'],
'statement': statement.ToStatement(),
'columns': ['AD_SERVER_IMPRESSIONS'],
'dateRangeType': 'LAST_MONTH',
'customFieldIds': list(custom_field_ids)
}
}
try:
# Run the report and wait for it to finish.
report_job_id = report_downloader.WaitForReport(report_job)
except errors.AdManagerReportError, e:
print 'Failed to generate report. Error was: %s' % e
# Change to your preferred export format.
export_format = 'CSV_DUMP'
report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False)
# Download report data.
report_downloader.DownloadReportToFile(
report_job_id, export_format, report_file)
report_file.close()
# Display results.
print 'Report job with id "%s" downloaded to:\n%s' % (
report_job_id, report_file.name)
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, ORDER_ID)
| 32.908163 | 77 | 0.714729 |
f6a9dcfb0be435a50df28a077951474068089a19 | 1,341 | py | Python | yaaredis/scripting.py | ProjectHentai/yaaredis | be6fcaf4c66f98272bfdeae33d34bb4e6fc13f1f | [
"MIT"
] | null | null | null | yaaredis/scripting.py | ProjectHentai/yaaredis | be6fcaf4c66f98272bfdeae33d34bb4e6fc13f1f | [
"MIT"
] | 5 | 2021-11-26T17:18:07.000Z | 2021-12-07T06:07:54.000Z | yaaredis/scripting.py | ProjectHentai/yaaredis | be6fcaf4c66f98272bfdeae33d34bb4e6fc13f1f | [
"MIT"
] | null | null | null | import hashlib
from yaaredis.exceptions import NoScriptError
from yaaredis.pipeline import BasePipeline
from yaaredis.utils import b
class Script:
"""An executable Lua script object returned by ``register_script``"""
def __init__(self, registered_client, script):
self.registered_client = registered_client
self.script = script
self.sha = hashlib.sha1(b(script)).hexdigest()
async def execute(self, keys=None, args=None, client=None):
"""Executes the script, passing any required ``args``"""
keys = keys or []
args = args or []
if client is None:
client = self.registered_client
args = tuple(keys) + tuple(args)
# make sure the Redis server knows about the script
if isinstance(client, BasePipeline):
# make sure this script is good to go on pipeline
client.scripts.add(self)
try:
return await client.evalsha(self.sha, len(keys), *args)
except NoScriptError:
# Maybe the client is pointed to a differnet server than the client
# that created this instance?
# Overwrite the sha just in case there was a discrepancy.
self.sha = await client.script_load(self.script)
return await client.evalsha(self.sha, len(keys), *args)
| 38.314286 | 79 | 0.647278 |
b5c579e5d1cec0c0b3f3de614673508b318b3e08 | 5,490 | py | Python | tests/test_examples.py | merwok-forks/cookiecutter | 7e7a09b22440aab415671958e1a5862ae65f9300 | [
"BSD-3-Clause"
] | null | null | null | tests/test_examples.py | merwok-forks/cookiecutter | 7e7a09b22440aab415671958e1a5862ae65f9300 | [
"BSD-3-Clause"
] | null | null | null | tests/test_examples.py | merwok-forks/cookiecutter | 7e7a09b22440aab415671958e1a5862ae65f9300 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_examples
--------------
Tests for the Cookiecutter example repos.
"""
from __future__ import unicode_literals
import errno
import logging
import os
import shutil
import subprocess
import sys
PY3 = sys.version > '3'
if PY3:
from unittest.mock import patch
input_str = 'builtins.input'
from io import StringIO
else:
import __builtin__
from mock import patch
input_str = '__builtin__.raw_input'
from cStringIO import StringIO
if sys.version_info[:3] < (2, 7):
import unittest2 as unittest
else:
import unittest
try:
travis = os.environ[u'TRAVIS']
except KeyError:
travis = False
try:
nonetwork = os.environ[u'DISABLE_NETWORK_TESTS']
except KeyError:
nonetwork = False
from cookiecutter import config, utils
from tests import force_delete, CookiecutterCleanSystemTestCase
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
@unittest.skipIf(condition=travis, reason='Works locally with tox but fails on Travis.')
@unittest.skipIf(condition=nonetwork, reason='Needs a network connection to GitHub.')
class TestPyPackage(CookiecutterCleanSystemTestCase):
def tearDown(self):
if os.path.isdir('cookiecutter-pypackage'):
shutil.rmtree('cookiecutter-pypackage', onerror=force_delete)
if os.path.isdir('boilerplate'):
shutil.rmtree('boilerplate', onerror=force_delete)
super(TestPyPackage, self).tearDown()
def test_cookiecutter_pypackage(self):
"""
Tests that https://github.com/audreyr/cookiecutter-pypackage.git works.
"""
proc = subprocess.Popen(
'git clone https://github.com/audreyr/cookiecutter-pypackage.git',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
proc = subprocess.Popen(
'cookiecutter --no-input cookiecutter-pypackage/',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
self.assertTrue(os.path.isdir('cookiecutter-pypackage'))
self.assertTrue(os.path.isfile('boilerplate/README.rst'))
@unittest.skipIf(condition=travis, reason='Works locally with tox but fails on Travis.')
@unittest.skipIf(condition=nonetwork, reason='Needs a network connection to GitHub.')
class TestJQuery(CookiecutterCleanSystemTestCase):
def tearDown(self):
if os.path.isdir('cookiecutter-jquery'):
shutil.rmtree('cookiecutter-jquery', onerror=force_delete)
if os.path.isdir('boilerplate'):
shutil.rmtree('boilerplate', onerror=force_delete)
super(TestJQuery, self).tearDown()
def test_cookiecutter_jquery(self):
"""
Tests that https://github.com/audreyr/cookiecutter-jquery.git works.
"""
proc = subprocess.Popen(
'git clone https://github.com/audreyr/cookiecutter-jquery.git',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
proc = subprocess.Popen(
'cookiecutter --no-input cookiecutter-jquery/',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
self.assertTrue(os.path.isdir('cookiecutter-jquery'))
self.assertTrue(os.path.isfile('boilerplate/README.md'))
@unittest.skipIf(condition=travis, reason='Works locally with tox but fails on Travis.')
@unittest.skipIf(condition=nonetwork, reason='Needs a network connection to GitHub.')
class TestExamplesRepoArg(CookiecutterCleanSystemTestCase):
def tearDown(self):
with utils.work_in(config.DEFAULT_CONFIG['cookiecutters_dir']):
if os.path.isdir('cookiecutter-pypackage'):
shutil.rmtree('cookiecutter-pypackage', onerror=force_delete)
if os.path.isdir('boilerplate'):
shutil.rmtree('boilerplate', onerror=force_delete)
super(TestExamplesRepoArg, self).tearDown()
def test_cookiecutter_pypackage_git(self):
proc = subprocess.Popen(
'cookiecutter https://github.com/audreyr/cookiecutter-pypackage.git',
stdin=subprocess.PIPE,
shell=True
)
# Just skip all the prompts
proc.communicate(input=b'\n\n\n\n\n\n\n\n\n\n\n\n')
self.assertTrue(os.path.isfile('boilerplate/README.rst'))
@unittest.skipIf(condition=travis, reason='Works locally with tox but fails on Travis.')
@unittest.skipIf(condition=nonetwork, reason='Needs a network connection to GitHub.')
class TestGitBranch(CookiecutterCleanSystemTestCase):
def tearDown(self):
with utils.work_in(config.DEFAULT_CONFIG['cookiecutters_dir']):
if os.path.isdir('cookiecutter-pypackage'):
shutil.rmtree('cookiecutter-pypackage', onerror=force_delete)
if os.path.isdir('boilerplate'):
shutil.rmtree('boilerplate', onerror=force_delete)
super(TestGitBranch, self).tearDown()
def test_branch(self):
proc = subprocess.Popen(
'cookiecutter -c console-script https://github.com/audreyr/cookiecutter-pypackage.git',
stdin=subprocess.PIPE,
shell=True
)
# Just skip all the prompts
proc.communicate(input=b'\n\n\n\n\n\n\n\n\n\n\n\n')
self.assertTrue(os.path.isfile('boilerplate/README.rst'))
self.assertTrue(os.path.isfile('boilerplate/boilerplate/main.py'))
if __name__ == '__main__':
unittest.main()
| 31.193182 | 99 | 0.667395 |
fb7858deec1b0da539fe552b4bc68a2880a1d3b2 | 20,907 | py | Python | data/process.py | LauJames/key_phrase_extract | 5c93353e5f0d7641ce9390f4621b1cedc20220c3 | [
"Apache-2.0"
] | 1 | 2019-03-29T08:50:17.000Z | 2019-03-29T08:50:17.000Z | data/process.py | JiangYee/key_phrase_extract | 7bd32a8b1809566b7512b3cc9b324c8d073a0167 | [
"Apache-2.0"
] | 1 | 2019-03-10T06:04:40.000Z | 2019-03-10T06:04:40.000Z | data/process.py | JiangYee/key_phrase_extract | 7bd32a8b1809566b7512b3cc9b324c8d073a0167 | [
"Apache-2.0"
] | 1 | 2019-03-07T05:01:14.000Z | 2019-03-07T05:01:14.000Z | #! /user/bin/evn python
# -*- coding:utf8 -*-
import os
import re
import codecs
import numpy as np
import gensim
from numpy import linalg
import operator
import nltk
import json
from ir.config import Config
from ir.search import Search
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
:param string:
:return: string
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
# string = re.sub('[_A-Z ]', ' ', string)
return string.strip().lower()
def get_stopword():
stop_words = []
with codecs.open(filename='stopwords.txt', encoding='utf-8') as fp:
while True:
line = fp.readline().strip()
if not line:
print('停用词加载完毕!')
return stop_words
stop_words.append(line)
# 加载词典
def load_vocab(vacab_dir):
vocab = []
with codecs.open(filename=vacab_dir, encoding='utf-8') as fp:
while True:
line = fp.readline()
if not line:
print('get vacab successful!')
return vocab
tmp = line.strip().split(' ')
vocab.append(tmp[0])
# txt
def load_all_data(txt_file_path, vocab):
print('开始读取txt数据...')
docs = []
key_phrases = []
key_phrase_extracs = []
line_num = 0
with codecs.open(filename=txt_file_path, encoding='utf-8') as fp:
while True:
is_error = False
line_num += 1
print(line_num)
line = fp.readline()
if not line:
print('txt数据读取完毕!')
# docs = [clean_str(str(doc)) for doc in docs]
return [docs, key_phrases, key_phrase_extracs]
tmp = line.strip().split('\t')
extracs_tmp = tmp[2].split(';')
doc_phrase_weight = {}
for i in range(len(extracs_tmp)):
extracs_phrase_weight = extracs_tmp[i].split('|||')
try:
doc_phrase_weight.update({extracs_phrase_weight[1]: float(extracs_phrase_weight[0])})
except (Exception) as e:
print('Exception:' + str(e))
print('该行提取的关键术语数据有误:' + str(tmp[2]))
print('具体数据错误:' + str(extracs_phrase_weight))
i = len(extracs_tmp) + 1
is_error = True
continue
if not is_error:
key_phrase_extracs.append(doc_phrase_weight)
doc_split = clean_str(tmp[0]).split(' ')
for m in range(len(doc_split)):
if not vocab.__contains__(doc_split[m]):
doc_split[m] = 'unknown'
docs.append(doc_split)
key_phrases.append(tmp[1].split(';'))
# 按value升序排序
# doc_phrase_weight = sorted(doc_phrase_weight.items(), key=operator.itemgetter(1))
# 按value值降序排序 =================转成了list
# doc_phrase_weight = sorted(doc_phrase_weight.items(), key=lambda d: d[1], reverse=True)
# print(key_phrase_extracs[:,:,0:3] )
# print(doc_phrase_weight[0:3] + ' '+ str(doc_phrase_weight[0][1]))
# print(tmp[2])
# print("=====" + str(doc_phrase_weight) + '\n')
# json
def load_all_data_json(json_file_path, vocab):
print('开始读取json数据...')
docs = []
key_phrases = []
key_phrase_extracs = []
file = open(json_file_path, encoding='utf-8')
json_dict = json.load(file)
for one_doc in json_dict:
is_error = False
keywords = one_doc['keywords']
doc_text = one_doc['extract_text']
rake_extract = one_doc['rake_extract']
extracs_tmp = rake_extract.split('###')
doc_phrase_weight = {}
# ============================================
# 添加判断 如果出现异常 舍弃整条数据
for i in range(len(extracs_tmp)):
extracs_phrase_weight = extracs_tmp[i].split('|||')
try:
doc_phrase_weight.update({extracs_phrase_weight[1]: float(extracs_phrase_weight[0])})
except (Exception) as e:
print('Exception:', str(e))
print('该行提取的关键术语数据有误:' + str(rake_extract))
print('具体数据错误:' + str(extracs_phrase_weight))
i = len(extracs_tmp) + 1
is_error = True
continue
if not is_error:
# 添加抽取的关键词
key_phrase_extracs.append(doc_phrase_weight)
# 添加摘要文本
doc_split = clean_str(doc_text).split(' ')
for m in range(len(doc_split)):
if not vocab.__contains__(doc_split[m]):
doc_split[m] = 'unknown'
docs.append(doc_split)
# 添加原始关键术语
key_phrases.append(keywords.split(';'))
print('json数据读取完毕!')
return [docs, key_phrases, key_phrase_extracs]
# 使用es获取全部文档的topn篇相关文档(相似性计算文档)
def get_es_results(abstracts, top_n):
es_results = []
config = Config()
search = Search()
for abstract in abstracts:
print(search.search_by_abstract(abstract, top_n, config))
es_results.append(search.search_by_abstract(abstract, top_n, config))
return es_results
# 获取文章的abstract
def get_docs(file_path):
docs = []
with codecs.open(filename=file_path, encoding='utf-8') as fp:
while True:
line = fp.readline()
if not line:
print('get docs successful!')
return docs
tmp = line.strip().split('\t')
docs.append(tmp[0])
# doc_split = tmp[0].split(' ')
# for m in range(len(doc_split)):
# if not vocab.__contains__(doc_split[m]):
# doc_split[m] = 'unknown'
# docs.append(doc_split)
# 加载词向量/计算文档向量
def doc2vec(vector_model, docs):
all_doc_vectors = []
# v1 = word2vec_model['virtually']
# print(v1)
# word2vev_model = gensim.models.keyedvectors._load_word2vec_format(datapath(vector_dir), binary=False)
for i in range(len(docs)):
doc = docs[i]
print(doc)
vector = np.zeros(300)
for j in range(len(doc)):
vector = vector + np.array(vector_model[doc[j]])
# 文档向量:词向量取均值
doc_vector = vector / len(doc)
all_doc_vectors.append(doc_vector)
print('文档向量计算完毕!\n')
return np.array(all_doc_vectors)
# 计算全部文档两两相似度 并按相似度降序排序
# [[(1,1),(2,0.8),(3,0.5)...],[(1,0.6),(2,1),(3,0.5)...],[]]
def calculate_doc_sim(doc_vectors):
doc_num = len(doc_vectors)
# all_doc_sim = np.zeros([doc_num, doc_num])
all_doc_sim = []
for i in range(doc_num):
v1 = doc_vectors[i]
v1_sim = {}
for j in range(doc_num):
v2 = doc_vectors[j]
v1_v2_dot = np.dot(v1, v2)
denom = linalg.norm(v1) * linalg.norm(v2)
cos = v1_v2_dot / denom # 余弦值
v1_sim.update({j: cos})
# all_doc_sim[i][j] = cos
# 按value值降序排序 ============v1_sim转换成了list
v1_sim = sorted(v1_sim.items(), key=lambda d: d[1], reverse=True)
all_doc_sim.append(v1_sim)
print('文档相似度计算完毕!\n')
return all_doc_sim
# 对内部抽取和外部融合后的dict的weight做归一化 (weight - min) /(max - min)
def normalization(kp_weight_dict):
max_weight = max(kp_weight_dict.values())
min_weight = min(kp_weight_dict.values())
scale = max_weight - min_weight
for key in kp_weight_dict:
weight = (kp_weight_dict[key] - min_weight) / scale
kp_weight_dict.update({key: weight})
return kp_weight_dict
# 对于一篇文档: 融合其topN篇相似的外部文档的全部key phrase
def get_external(topN_doc_sims, all_original_kp, currunt_docID):
# topN_doc_sims:[(6,0.9),(10,0.8),(3,0.5)...],topN * 2 一篇文档的相似文档及相似度集合
# all_original_kp: 所有文档的原始关键术语
external_key_phrase = {}
key_phrases = {} # {'k1':[0.2,0.3]; 'k2':[0.5,0.6,0.8]}
for i in range(len(topN_doc_sims)):
# 获取第i篇与本篇doc相似的文档sim
sim = topN_doc_sims[i][1]
# 获取第i篇与本篇doc相似的文档id
sim_docID = topN_doc_sims[i][0]
# 跳过当前文档
if sim_docID != currunt_docID:
# 根据相似文档id获取相似文档的关键术语
sim_doc_keys = all_original_kp[sim_docID]
for j in range(len(sim_doc_keys)):
key = sim_doc_keys[j]
if not key_phrases.__contains__(sim_doc_keys[j]):
key_phrases.update({sim_doc_keys[j]: [sim]})
else:
sim_list = key_phrases[sim_doc_keys[j]]
sim_list.append(sim)
key_phrases.update({sim_doc_keys[j]: sim_list})
# 计算每个key phrase的权重均值
for key in key_phrases:
sim_array = np.array(key_phrases[key])
# 融合权重:取均值
# key_weight = np.average(sim_array)
# 融合权重:求和
key_weight = np.sum(sim_array)
external_key_phrase.update({key: key_weight})
return external_key_phrase
# 对于一篇文档:融合内外部关键术语
# 目标文档本身权重 p 外部文档权重 1-p
def merge(original_dict, external_dict, p):
merge_dict = {}
# all_keys = original_dict.keys() | external_dict.keys()
for original_key in original_dict:
# 原文档有 外部文档没有
if not external_dict.__contains__(original_key):
weight = p * original_dict[original_key]
# 原文档有 外部文档也有
else:
weight = p * original_dict[original_key] + (1 - p) * external_dict[original_key]
merge_dict.update({original_key: weight})
# 原文档没有 外部文档有
for external_key in external_dict:
if not merge_dict.__contains__(external_key):
weight = (1 - p) * external_dict[external_key]
merge_dict.update({external_key: weight})
return merge_dict
def extract_all(all_doc_sim, all_original_kp, topN, all_kp_extracs, p):
all_merged_kp = []
for i in range(len(all_doc_sim)):
# 取topN篇相似文档
topN_doc_sims = all_doc_sim[i][:topN + 1] # 相似文档里包含里目标文档本身
external_dict = get_external(topN_doc_sims, all_original_kp, currunt_docID=i)
original_dict = all_kp_extracs[i]
# 添加归一化操作
external_dict = normalization(external_dict)
original_dict = normalization(original_dict)
# print('归一化后......')
# print('外部结果:' + str(sorted(external_dict.items(), key=lambda d: d[1], reverse=True)))
# print('内部结果:' + str(sorted(original_dict.items(), key=lambda d: d[1], reverse=True)))
one_merge_dict = merge(original_dict, external_dict, p)
all_merged_kp.append(one_merge_dict)
return all_merged_kp
def extract_all_es(es_results, vector_model, topN, p):
all_merged_kp = []
# 对一篇文档:
for es_result in es_results:
# es_result 包含目标文档的数据
is_error = False
# 获取当前文档的rake抽取结果
rake_extract = es_result[0][3] # 目标文档在es 搜索结果的第一条
# ============================================
# 对rake_extract 处理 rake_extract ->字符串
rake_extract_dict = {}
extracs_tmp = rake_extract.split('###')
doc_phrase_weight = {}
for m in range(len(extracs_tmp)):
extracs_phrase_weight = extracs_tmp[m].split('|||')
try:
rake_extract_dict.update({extracs_phrase_weight[1]: float(extracs_phrase_weight[0])})
except (Exception) as e:
print('Exception:', str(e))
print('该行提取的关键术语数据有误:' + str(rake_extract))
print('具体数据错误:' + str(extracs_phrase_weight))
is_error = True
m = len(extracs_tmp) + 1
continue
# ================================================
if not is_error:
abstracts = []
keywords = []
for data in es_result:
# 获取当前文档的es检索结果文档
abs_split = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9~!@#$%^&*()_+<>?:,./;’,。、‘:“《》?~!@#¥%……()]', ' ', data[1]).split(' ')
for j in range(len(abs_split)):
if not vocab.__contains__(abs_split[j]):
abs_split[j] = 'unknown'
abstracts.append(abs_split)
# abstracts.append(re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9~!@#$%^&*()_+<>?:,./;’,。、‘:“《》?~!@#¥%……()]', ' ', data[1]))
# 获取结果文档的原始关键术语
keywords.append(data[2])
doc_vectors = doc2vec(vector_model, abstracts)
doc_sims = calculate_doc_sim(doc_vectors)
# 根据向量相似度大小取topN篇相似文档
topN_doc_sims = doc_sims[:topN + 1] # 相似文档里包含里目标文档本身
# =============相似性计算不同
external_dict = get_external(topN_doc_sims, keywords, currunt_docID=0)
# 添加归一化操作
external_dict = normalization(external_dict)
rake_extract_dict = normalization(rake_extract_dict)
one_merge_dict = merge(rake_extract_dict, external_dict, p)
all_merged_kp.append(one_merge_dict)
#
# for i in range(len(all_doc_sim)):
# # 取topN篇相似文档
# topN_doc_sims = all_doc_sim[i][:topN + 1] # 相似文档里包含里目标文档本身
#
# external_dict = get_external(topN_doc_sims, all_original_kp, currunt_docID=i)
# original_dict = all_kp_extracs[i]
# # 添加归一化操作
# external_dict = normalization(external_dict)
# original_dict = normalization(original_dict)
# # print('归一化后......')
# # print('外部结果:' + str(sorted(external_dict.items(), key=lambda d: d[1], reverse=True)))
# # print('内部结果:' + str(sorted(original_dict.items(), key=lambda d: d[1], reverse=True)))
#
# one_merge_dict = merge(original_dict, external_dict, p)
# all_merged_kp.append(one_merge_dict)
return all_merged_kp
# 获取每篇文档的topK个融合的关键术语
def get_topK_kp(all_merged_kp, k):
topK_merged_kp = []
for i in range(len(all_merged_kp)):
sorted_list = sorted(all_merged_kp[i].items(), key=lambda d: d[1], reverse=True)
one_doc_kp_list = []
for j in range(k):
one_doc_kp_list.append(sorted_list[j][0])
topK_merged_kp.append(one_doc_kp_list)
return topK_merged_kp
def evaluate(topK_merged_kp, original_kp):
precision = []
recall = []
# k可能小于标准关键术语个数
doc_num = len(topK_merged_kp)
for i in range(doc_num):
# 计算每一篇文档的p和r
correct_num = 0
for j in range(len(topK_merged_kp[i])):
if original_kp[i].__contains__(topK_merged_kp[i][j]):
correct_num += 1
pi = correct_num / len(topK_merged_kp[i])
ri = correct_num / len(original_kp[i])
precision.append(pi)
recall.append(ri)
# 计算全部文档的平均p和r
precision = np.array(precision)
recall = np.array(recall)
precision_avg = np.average(precision)
recall_avg = np.average(recall)
f = (2 * precision_avg * recall_avg) / (precision_avg + recall_avg)
return precision_avg, recall_avg, f, precision, recall
# 去停用词和词干提取
def stemming(kp_list, stop_words):
stemmer = nltk.stem.PorterStemmer()
all_stem_result = []
for i in range(len(kp_list)):
one_stem_result = []
for j in range(len(kp_list[i])):
one_kp_split = kp_list[i][j].split(' ')
one_stem_kp = stemmer.stem(one_kp_split[0])
for k in range(1, len(one_kp_split)):
if not stop_words.__contains__(one_kp_split[k]):
one_stem_kp = one_stem_kp + ' ' + stemmer.stem(one_kp_split[k])
one_stem_result.append(one_stem_kp)
all_stem_result.append(one_stem_result)
return all_stem_result
def evaluate_stem(topK_merged_kp, original_kp, stop_words):
topK_merged_kp = stemming(topK_merged_kp, stop_words)
original_kp = stemming(original_kp, stop_words)
precision = []
recall = []
# k可能小于标准关键术语个数
doc_num = len(topK_merged_kp)
for i in range(doc_num):
# print('关键术语topK: ' + str(topK_merged_kp[i]))
# print('原始关键术语:' + str(original_kp[i]))
# 计算每一篇文档的p和r
correct_num = 0
for j in range(len(topK_merged_kp[i])):
if original_kp[i].__contains__(topK_merged_kp[i][j]):
correct_num += 1
pi = correct_num / len(topK_merged_kp[i])
ri = correct_num / len(original_kp[i])
precision.append(pi)
recall.append(ri)
# 计算全部文档的平均p和r
precision = np.array(precision)
recall = np.array(recall)
precision_avg = np.average(precision)
recall_avg = np.average(recall)
f = (2 * precision_avg * recall_avg) / (precision_avg + recall_avg)
return precision_avg, recall_avg, f, precision, recall
def save_results(result_array, save_path):
# fp = open(file=save_dir, mode='w', encoding='utf-8')
fp = codecs.open(filename=save_path, mode='w', encoding='utf-8')
for i in range(len(result_array)):
line = str(result_array[i])
fp.write(str(i) + ":" + line + '\n')
fp.close()
# 对融合的全部结果排序后写入文件
def save_all_merged_results(result_list, save_dir):
fp = codecs.open(filename=save_dir, mode='w', encoding='utf-8')
for i in range(len(result_list)):
line = str(sorted(result_list[i].items(), key=lambda d: d[1], reverse=True))
fp.write(line + '\n')
fp.close()
if __name__ == '__main__':
vector_dir = 'sg.word2vec.300d'
file_path = 'doc_test.txt'
file_path_json = 'rake_extract_keyphrase.json'
vocab_dir = 'vocab_sg300d.txt'
merged_results_dir = 'all_merged_results.txt'
# evaluate dir:
evaluate_dir = '../evaluate/'
topK_merged_dir = 'topK_merged_results.txt'
precision_dir = 'precision.txt'
recall_dir = 'recall.txt'
topN = 10 # 10篇相似文档
p_list = [0.2, 0.5, 0.6, 0.8] #
k_list = [2, 4, 6]
stop_words = get_stopword()
print('加载词向量模型...')
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(fname=vector_dir, binary=False)
print('词向量模型加载完毕!')
# prepare for data
vocab = load_vocab(vocab_dir)
# docs, all_original_kp, all_kp_extracs = load_all_data(file_path, vocab)
# docs, all_original_kp, all_kp_extracs = load_all_data_json(file_path_json, vocab)
# all_doc_vectors = doc2vec(word2vec_model, docs)
# all_doc_sim = calculate_doc_sim(all_doc_vectors)
# doc_sim = calculate_doc_sim(all_doc_vectors)
# for i in range(len(doc_sim)):
# print('doc'+ str(i))
# print(str(doc_sim[i][:11]))
# print('\n')
abstract_list = get_docs(file_path)
es_results = get_es_results(abstract_list, 5)
# merge:
for p in p_list:
print('概率p为 ' + str(p) + ' 的结果:')
if not os.path.exists(evaluate_dir):
os.makedirs(evaluate_dir)
p_evaluate_dir = os.path.join(evaluate_dir, 'P' + str(p) + '/')
if not os.path.exists(p_evaluate_dir):
os.makedirs(p_evaluate_dir)
all_merged_dir = os.path.join(p_evaluate_dir, 'all_merged.txt')
# all_merged_kp = extract_all(all_doc_sim, all_original_kp, topN, all_kp_extracs, p)
all_merged_kp =extract_all_es(es_results, word2vec_model, 3, p)
# print('内外部融合结果:')
# for i in range(len(all_merged_kp)):
# print(sorted(all_merged_kp[i].items(), key=lambda d: d[1], reverse=True))
save_all_merged_results(all_merged_kp, all_merged_dir)
for k in k_list:
print('取前 ' + str(k) + ' 个关键术语的结果:')
# 文件夹k
p_k_evaluate_dir = os.path.join(p_evaluate_dir, 'top' + str(k) + '/')
if not os.path.exists(p_k_evaluate_dir):
os.makedirs(p_k_evaluate_dir)
p_k_merged_results_dir = os.path.join(p_k_evaluate_dir, 'top' + str(k) + '_phrases.txt')
topK_merged_kp = get_topK_kp(all_merged_kp, k)
save_results(topK_merged_kp, p_k_merged_results_dir)
# evaluate:
precision_dir = os.path.join(p_k_evaluate_dir, 'precision_' + str(k) + '.txt')
recall_dir = os.path.join(p_k_evaluate_dir, 'recall_' + str(k) + '.txt')
precision_avg, recall_avg, f, precision, recall = evaluate_stem(topK_merged_kp, all_original_kp, stop_words)
save_results(precision, precision_dir)
save_results(recall, recall_dir)
print('平均检准率: ', precision_avg)
print('平均检全率: ', recall_avg)
print('F值: ', f)
print('\n')
| 35.92268 | 130 | 0.587889 |
f899059deb0d3c264f81f659514c9766086233ea | 481 | py | Python | d05/part2.py | burk3/aoc2020 | 541d6102276978ea5e4e7abbd25a8811268be148 | [
"MIT"
] | null | null | null | d05/part2.py | burk3/aoc2020 | 541d6102276978ea5e4e7abbd25a8811268be148 | [
"MIT"
] | null | null | null | d05/part2.py | burk3/aoc2020 | 541d6102276978ea5e4e7abbd25a8811268be148 | [
"MIT"
] | null | null | null | import argparse
def to_n(s: str) -> int:
s = s.replace("F", "0").replace("B", "1")
s = s.replace("L", "0").replace("R", "1")
return int(s, base=2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input", type=argparse.FileType("r"))
args = parser.parse_args()
r = set(range(891))
for line in args.input:
r.remove(to_n(line.strip()))
for n in sorted(r):
print(r)
if __name__ == "__main__":
main() | 20.913043 | 61 | 0.567568 |
e601137d3f021d2811e7e8844627280853b124a7 | 3,589 | py | Python | google/cloud/identitytoolkit/v2/identitytoolkit-v2-py/noxfile.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/identitytoolkit/v2/identitytoolkit-v2-py/noxfile.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/identitytoolkit/v2/identitytoolkit-v2-py/noxfile.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pathlib
import shutil
import subprocess
import sys
import nox # type: ignore
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt"
PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8")
nox.sessions = [
"unit",
"cover",
"mypy",
"check_lower_bounds"
# exclude update_lower_bounds from default
"docs",
]
@nox.session(python=['3.6', '3.7', '3.8', '3.9'])
def unit(session):
"""Run the unit test suite."""
session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/cloud/identity_toolkit_v2/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python='3.7')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=['3.6', '3.7'])
def mypy(session):
"""Run the type checker."""
session.install('mypy', 'types-pkg_resources')
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session
def update_lower_bounds(session):
"""Update lower bounds in constraints.txt to match setup.py"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'update',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session
def check_lower_bounds(session):
"""Check lower bounds in setup.py are reflected in constraints file"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'check',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session(python='3.6')
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| 26.984962 | 96 | 0.627751 |
4564bd90ebf81b9ebf6f370d04d0b317420fb601 | 1,563 | py | Python | pyspedas/utilities/tcopy.py | amanotk/pyspedas | ba38f9a318fe96911a0fb3d6fce53e8b1a534ff4 | [
"MIT"
] | null | null | null | pyspedas/utilities/tcopy.py | amanotk/pyspedas | ba38f9a318fe96911a0fb3d6fce53e8b1a534ff4 | [
"MIT"
] | null | null | null | pyspedas/utilities/tcopy.py | amanotk/pyspedas | ba38f9a318fe96911a0fb3d6fce53e8b1a534ff4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
File:
tcopy.py
Description:
Creates a deep copy of a tplot_variable, with a new name.
Parameters:
names_in: str/list of str
List of pytplot names.
names_out: str/list of str
List of pytplot names.
If it is not provided, then suffix '-copy' is used.
suffix:
A suffix to apply. Default is '-copy'.
Notes:
Allowed wildcards are ? for a single character, * from multiple characters.
"""
import pytplot
import pyspedas
import copy
def tcopy_one(name_in, name_out):
# Copies one pytplot variable
tvar_old = pytplot.data_quants[name_in]
tvar_new = copy.deepcopy(tvar_old)
tvar_new.name = name_out
pytplot.data_quants.update({name_out: tvar_new})
print(name_in + ' copied to ' + name_out)
def tcopy(names_in, names_out=None, suffix=None):
names_in = pyspedas.tnames(names_in)
if len(names_in) < 1:
print('tcopy error: No pytplot variables found.')
return
if suffix is None:
suffix = '-copy'
if names_out is None:
names_out = [s + suffix for s in names_in]
if isinstance(names_out, str):
names_out = [names_out]
if len(names_in) != len(names_out):
print('tcopy error: List with the names_in does not match list\
with the names out.')
return
for i in range(len(names_in)):
n = names_in[i]
o = names_out[i]
if len(pyspedas.tnames(n)) == 1:
tcopy_one(n, o)
else:
print('tplot name not found: ' + n)
| 24.421875 | 79 | 0.623161 |
3a1fd7027b826a63ac8a0933597156b19329bdbf | 1,657 | py | Python | decopatch/tests/test_introspection_base.py | smarie/python-decopatch | 7172d09c83ec3b6eef947f37fca7a1ddabfb09cc | [
"BSD-3-Clause"
] | 17 | 2019-03-12T10:09:20.000Z | 2022-02-27T08:50:15.000Z | decopatch/tests/test_introspection_base.py | smarie/python-decopatch | 7172d09c83ec3b6eef947f37fca7a1ddabfb09cc | [
"BSD-3-Clause"
] | 27 | 2019-03-05T13:18:47.000Z | 2022-03-01T08:51:30.000Z | decopatch/tests/test_introspection_base.py | smarie/python-decopatch | 7172d09c83ec3b6eef947f37fca7a1ddabfb09cc | [
"BSD-3-Clause"
] | 3 | 2019-11-13T18:59:02.000Z | 2022-02-02T14:16:28.000Z | import sys
import pytest
from decopatch import FirstArgDisambiguation
from decopatch.utils_disambiguation import disambiguate_using_introspection
def generate_decorator():
def my_deco(*args):
def apply(f):
my_deco.success = True
return f
if len(args) == 1 \
and disambiguate_using_introspection(2, args[0]) is FirstArgDisambiguation.is_decorated_target:
assert args[0].__name__.lower() == 'foo'
return apply(args[0])
else:
return apply
my_deco.success = False
return my_deco
NO_PARENTHESIS = 0
EMPTY_PARENTHESIS = 1
ARGS_IN_PARENTHESIS = 2
@pytest.mark.parametrize('is_class', [False, True], ids="isclass={}".format)
@pytest.mark.parametrize('call_mode', [NO_PARENTHESIS, EMPTY_PARENTHESIS, ARGS_IN_PARENTHESIS],
ids="call_mode={}".format)
def test_introspection(is_class, call_mode):
my_deco = generate_decorator()
if call_mode is NO_PARENTHESIS:
if is_class:
@my_deco
class Foo:
pass
else:
@my_deco
def foo():
pass
elif call_mode is EMPTY_PARENTHESIS:
if is_class:
@my_deco()
class Foo:
pass
else:
@my_deco()
def foo():
pass
elif call_mode is ARGS_IN_PARENTHESIS:
if is_class:
@my_deco(generate_decorator)
class Foo:
pass
else:
@my_deco(generate_decorator)
def foo():
pass
assert my_deco.success
| 24.367647 | 111 | 0.575136 |
ada424a9f06419ff7dcb47be7d501b577d01a861 | 4,896 | py | Python | src/fetch_data.py | maximecharpentierdata/image-captioning | 1f2bf5149ecc84bb4917bb62bd70915157e526c6 | [
"MIT"
] | 2 | 2022-01-13T10:49:34.000Z | 2022-01-23T15:10:58.000Z | src/fetch_data.py | maximecharpentierdata/image-captioning | 1f2bf5149ecc84bb4917bb62bd70915157e526c6 | [
"MIT"
] | 1 | 2022-01-26T08:22:25.000Z | 2022-01-26T08:22:25.000Z | src/fetch_data.py | maximecharpentierdata/image-captioning | 1f2bf5149ecc84bb4917bb62bd70915157e526c6 | [
"MIT"
] | null | null | null | import kaggle
import shutil
import os
import urllib.request
import zipfile
from tqdm import tqdm
from .config import DATA_ROOT_PATH, DATASET, Datasets
# For progress bar
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_url(url, output_path):
with DownloadProgressBar(
unit="B", unit_scale=True, miniters=1, desc=url.split("/")[-1]
) as t:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
def fetch_flickr8k(root):
"""Will download and place the Flickr8k data appropriately in the root dir given"""
# Downloading Flickr8k data
kaggle.api.authenticate()
kaggle.api.dataset_download_files(
"shadabhussain/flickr8k", path=root, unzip=True, quiet=False
)
# Removing unused files
os.remove(os.path.join(root, "Flickr_Data/Flickr_TextData/CrowdFlowerAnnotations.txt"))
os.remove(os.path.join(root, "Flickr_Data/Flickr_TextData/ExpertAnnotations.txt"))
os.remove(os.path.join(root, "Flickr_Data/Flickr_TextData/Flickr_8k.devImages.txt"))
os.remove(os.path.join(root, "Flickr_Data/Flickr_TextData/Flickr_8k.testImages.txt"))
os.remove(os.path.join(root, "Flickr_Data/Flickr_TextData/Flickr_8k.trainImages.txt"))
os.remove(os.path.join(root, "train_encoded_images.p"))
# Removing unused folders
shutil.rmtree(os.path.join(root, "Flickr_Data/flickr8ktextfiles"))
def fetch_glove():
# Downloading GloVE files
url = "https://huggingface.co/stanfordnlp/glove/resolve/main/glove.twitter.27B.zip"
download_url(url, "./glove.twitter.27B.zip")
# Unzipping
with zipfile.ZipFile("./glove.twitter.27B.zip", "r") as zip_ref:
zip_ref.extractall("./glove")
# Removing unused iles
os.remove("./glove/glove.twitter.27B.25d.txt")
os.remove("./glove/glove.twitter.27B.50d.txt")
os.remove("./glove/glove.twitter.27B.100d.txt")
os.remove("./glove.twitter.27B.zip")
def fetch_COCO(root):
"""Will download and place the COCO data appropriately in the root dir given"""
coco_root = os.path.join(root, "COCO")
splits = ["train", "test", "val"]
# Prepare dirs
for split in splits:
os.makedirs(os.path.join(coco_root, split), exist_ok=True)
# Downloading annotations files
url = "http://images.cocodataset.org/annotations/annotations_trainval2014.zip"
download_url(url, os.path.join(coco_root, "annotations_trainval2014.zip"))
# Unzipping
with zipfile.ZipFile(os.path.join(coco_root, "annotations_trainval2014.zip"), "r") as zip_ref:
zip_ref.extractall(coco_root)
# Removing unused iles
os.remove(os.path.join(coco_root, "annotations/instances_train2014.json"))
os.remove(os.path.join(coco_root, "annotations/instances_val2014.json"))
os.remove(os.path.join(coco_root, "annotations/person_keypoints_train2014.json"))
os.remove(os.path.join(coco_root, "annotations/person_keypoints_val2014.json"))
os.remove(os.path.join(coco_root, "annotations_trainval2014.zip"))
os.remove(os.path.join(coco_root, "image_info_test2014.zip"))
shutil.move(os.path.join(coco_root, "annotations/captions_val2014.json"), os.path.join(coco_root, "val", "captions.json"))
shutil.move(os.path.join(coco_root, "annotations/captions_train2014.json"), os.path.join(coco_root, "train", "captions.json"))
# Downloading test set info file
url = "http://images.cocodataset.org/annotations/image_info_test2014.zip"
download_url(url, os.path.join(coco_root, "image_info_test2014.zip"))
with zipfile.ZipFile(os.path.join(coco_root, "image_info_test2014.zip"), "r") as zip_ref:
zip_ref.extractall(coco_root)
shutil.move(os.path.join(coco_root, "annotations/image_info_test2014.json"), os.path.join(coco_root, "test", "images_info"))
os.remove(os.path.join(coco_root, "annotations"))
# Downloading images
for split in splits:
url = f"http://images.cocodataset.org/zips/{split}2014.zip"
filename = os.path.join(coco_root, split, f"{split}2014.zip")
if not os.path.exists(filename):
download_url(url, filename)
with zipfile.ZipFile(filename, "r") as zip_ref:
zip_ref.extractall(os.path.join(coco_root, split))
os.rename(os.path.join(coco_root, split, f"{split}2014"), os.path.join(coco_root, split, "images"))
# os.remove(filename)
if __name__ == "__main__":
if DATASET == Datasets.FLICKR:
print("#### Downloading Flickr8k dataset ####\n")
fetch_flickr8k(DATA_ROOT_PATH)
elif DATASET == Datasets.COCO:
print("#### Downloading COCO dataset ####\n")
fetch_COCO(DATA_ROOT_PATH)
print("#### Downloding and unzipping GloVE embedding weights ####\n")
fetch_glove()
print("Finished!")
| 41.491525 | 130 | 0.704861 |
c6565947b2cd82c0f835047e1ef1ce1b53022e22 | 30,679 | py | Python | sdk/python/pulumi_openstack/sharedfilesystem/share_network.py | pulumi/pulumi-openstack | 945eed22a82784e9f0b3aa56168b2397c2f503e8 | [
"ECL-2.0",
"Apache-2.0"
] | 34 | 2018-09-12T12:37:51.000Z | 2022-02-04T19:32:13.000Z | sdk/python/pulumi_openstack/sharedfilesystem/share_network.py | pulumi/pulumi-openstack | 945eed22a82784e9f0b3aa56168b2397c2f503e8 | [
"ECL-2.0",
"Apache-2.0"
] | 72 | 2018-08-15T13:04:57.000Z | 2022-03-31T15:39:49.000Z | sdk/python/pulumi_openstack/sharedfilesystem/share_network.py | pulumi/pulumi-openstack | 945eed22a82784e9f0b3aa56168b2397c2f503e8 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2019-03-14T08:28:49.000Z | 2021-12-29T04:23:55.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ShareNetworkArgs', 'ShareNetwork']
@pulumi.input_type
class ShareNetworkArgs:
def __init__(__self__, *,
neutron_net_id: pulumi.Input[str],
neutron_subnet_id: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
security_service_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ShareNetwork resource.
:param pulumi.Input[str] neutron_net_id: The UUID of a neutron network when setting up or updating
a share network. Changing this updates the existing share network if it's not used by
shares.
:param pulumi.Input[str] neutron_subnet_id: The UUID of the neutron subnet when setting up or
updating a share network. Changing this updates the existing share network if it's
not used by shares.
:param pulumi.Input[str] description: The human-readable description for the share network.
Changing this updates the description of the existing share network.
:param pulumi.Input[str] name: The name for the share network. Changing this updates the name
of the existing share network.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share network. If omitted, the
`region` argument of the provider is used. Changing this creates a new
share network.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_service_ids: The list of security service IDs to associate with
the share network. The security service must be specified by ID and not name.
"""
pulumi.set(__self__, "neutron_net_id", neutron_net_id)
pulumi.set(__self__, "neutron_subnet_id", neutron_subnet_id)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if region is not None:
pulumi.set(__self__, "region", region)
if security_service_ids is not None:
pulumi.set(__self__, "security_service_ids", security_service_ids)
@property
@pulumi.getter(name="neutronNetId")
def neutron_net_id(self) -> pulumi.Input[str]:
"""
The UUID of a neutron network when setting up or updating
a share network. Changing this updates the existing share network if it's not used by
shares.
"""
return pulumi.get(self, "neutron_net_id")
@neutron_net_id.setter
def neutron_net_id(self, value: pulumi.Input[str]):
pulumi.set(self, "neutron_net_id", value)
@property
@pulumi.getter(name="neutronSubnetId")
def neutron_subnet_id(self) -> pulumi.Input[str]:
"""
The UUID of the neutron subnet when setting up or
updating a share network. Changing this updates the existing share network if it's
not used by shares.
"""
return pulumi.get(self, "neutron_subnet_id")
@neutron_subnet_id.setter
def neutron_subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "neutron_subnet_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The human-readable description for the share network.
Changing this updates the description of the existing share network.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for the share network. Changing this updates the name
of the existing share network.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share network. If omitted, the
`region` argument of the provider is used. Changing this creates a new
share network.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="securityServiceIds")
def security_service_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of security service IDs to associate with
the share network. The security service must be specified by ID and not name.
"""
return pulumi.get(self, "security_service_ids")
@security_service_ids.setter
def security_service_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_service_ids", value)
@pulumi.input_type
class _ShareNetworkState:
def __init__(__self__, *,
cidr: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
ip_version: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
network_type: Optional[pulumi.Input[str]] = None,
neutron_net_id: Optional[pulumi.Input[str]] = None,
neutron_subnet_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
security_service_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
segmentation_id: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering ShareNetwork resources.
:param pulumi.Input[str] cidr: The share network CIDR.
:param pulumi.Input[str] description: The human-readable description for the share network.
Changing this updates the description of the existing share network.
:param pulumi.Input[int] ip_version: The IP version of the share network. Can either be 4 or 6.
:param pulumi.Input[str] name: The name for the share network. Changing this updates the name
of the existing share network.
:param pulumi.Input[str] network_type: The share network type. Can either be VLAN, VXLAN, GRE, or flat.
:param pulumi.Input[str] neutron_net_id: The UUID of a neutron network when setting up or updating
a share network. Changing this updates the existing share network if it's not used by
shares.
:param pulumi.Input[str] neutron_subnet_id: The UUID of the neutron subnet when setting up or
updating a share network. Changing this updates the existing share network if it's
not used by shares.
:param pulumi.Input[str] project_id: The owner of the Share Network.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share network. If omitted, the
`region` argument of the provider is used. Changing this creates a new
share network.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_service_ids: The list of security service IDs to associate with
the share network. The security service must be specified by ID and not name.
:param pulumi.Input[int] segmentation_id: The share network segmentation ID.
"""
if cidr is not None:
pulumi.set(__self__, "cidr", cidr)
if description is not None:
pulumi.set(__self__, "description", description)
if ip_version is not None:
pulumi.set(__self__, "ip_version", ip_version)
if name is not None:
pulumi.set(__self__, "name", name)
if network_type is not None:
pulumi.set(__self__, "network_type", network_type)
if neutron_net_id is not None:
pulumi.set(__self__, "neutron_net_id", neutron_net_id)
if neutron_subnet_id is not None:
pulumi.set(__self__, "neutron_subnet_id", neutron_subnet_id)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if region is not None:
pulumi.set(__self__, "region", region)
if security_service_ids is not None:
pulumi.set(__self__, "security_service_ids", security_service_ids)
if segmentation_id is not None:
pulumi.set(__self__, "segmentation_id", segmentation_id)
@property
@pulumi.getter
def cidr(self) -> Optional[pulumi.Input[str]]:
"""
The share network CIDR.
"""
return pulumi.get(self, "cidr")
@cidr.setter
def cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cidr", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The human-readable description for the share network.
Changing this updates the description of the existing share network.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="ipVersion")
def ip_version(self) -> Optional[pulumi.Input[int]]:
"""
The IP version of the share network. Can either be 4 or 6.
"""
return pulumi.get(self, "ip_version")
@ip_version.setter
def ip_version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ip_version", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for the share network. Changing this updates the name
of the existing share network.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkType")
def network_type(self) -> Optional[pulumi.Input[str]]:
"""
The share network type. Can either be VLAN, VXLAN, GRE, or flat.
"""
return pulumi.get(self, "network_type")
@network_type.setter
def network_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_type", value)
@property
@pulumi.getter(name="neutronNetId")
def neutron_net_id(self) -> Optional[pulumi.Input[str]]:
"""
The UUID of a neutron network when setting up or updating
a share network. Changing this updates the existing share network if it's not used by
shares.
"""
return pulumi.get(self, "neutron_net_id")
@neutron_net_id.setter
def neutron_net_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "neutron_net_id", value)
@property
@pulumi.getter(name="neutronSubnetId")
def neutron_subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The UUID of the neutron subnet when setting up or
updating a share network. Changing this updates the existing share network if it's
not used by shares.
"""
return pulumi.get(self, "neutron_subnet_id")
@neutron_subnet_id.setter
def neutron_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "neutron_subnet_id", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The owner of the Share Network.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share network. If omitted, the
`region` argument of the provider is used. Changing this creates a new
share network.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="securityServiceIds")
def security_service_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of security service IDs to associate with
the share network. The security service must be specified by ID and not name.
"""
return pulumi.get(self, "security_service_ids")
@security_service_ids.setter
def security_service_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_service_ids", value)
@property
@pulumi.getter(name="segmentationId")
def segmentation_id(self) -> Optional[pulumi.Input[int]]:
"""
The share network segmentation ID.
"""
return pulumi.get(self, "segmentation_id")
@segmentation_id.setter
def segmentation_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "segmentation_id", value)
class ShareNetwork(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
neutron_net_id: Optional[pulumi.Input[str]] = None,
neutron_subnet_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
security_service_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Use this resource to configure a share network.
A share network stores network information that share servers can use when
shares are created.
## Example Usage
### Basic share network
```python
import pulumi
import pulumi_openstack as openstack
network1 = openstack.networking.Network("network1", admin_state_up=True)
subnet1 = openstack.networking.Subnet("subnet1",
cidr="192.168.199.0/24",
ip_version=4,
network_id=network1.id)
sharenetwork1 = openstack.sharedfilesystem.ShareNetwork("sharenetwork1",
description="test share network",
neutron_net_id=network1.id,
neutron_subnet_id=subnet1.id)
```
### Share network with associated security services
```python
import pulumi
import pulumi_openstack as openstack
network1 = openstack.networking.Network("network1", admin_state_up=True)
subnet1 = openstack.networking.Subnet("subnet1",
cidr="192.168.199.0/24",
ip_version=4,
network_id=network1.id)
securityservice1 = openstack.sharedfilesystem.SecurityService("securityservice1",
description="created by terraform",
dns_ip="192.168.199.10",
domain="example.com",
ou="CN=Computers,DC=example,DC=com",
password="s8cret",
server="192.168.199.10",
type="active_directory",
user="joinDomainUser")
sharenetwork1 = openstack.sharedfilesystem.ShareNetwork("sharenetwork1",
description="test share network with security services",
neutron_net_id=network1.id,
neutron_subnet_id=subnet1.id,
security_service_ids=[securityservice1.id])
```
## Import
This resource can be imported by specifying the ID of the share network
```sh
$ pulumi import openstack:sharedfilesystem/shareNetwork:ShareNetwork sharenetwork_1 <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The human-readable description for the share network.
Changing this updates the description of the existing share network.
:param pulumi.Input[str] name: The name for the share network. Changing this updates the name
of the existing share network.
:param pulumi.Input[str] neutron_net_id: The UUID of a neutron network when setting up or updating
a share network. Changing this updates the existing share network if it's not used by
shares.
:param pulumi.Input[str] neutron_subnet_id: The UUID of the neutron subnet when setting up or
updating a share network. Changing this updates the existing share network if it's
not used by shares.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share network. If omitted, the
`region` argument of the provider is used. Changing this creates a new
share network.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_service_ids: The list of security service IDs to associate with
the share network. The security service must be specified by ID and not name.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ShareNetworkArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Use this resource to configure a share network.
A share network stores network information that share servers can use when
shares are created.
## Example Usage
### Basic share network
```python
import pulumi
import pulumi_openstack as openstack
network1 = openstack.networking.Network("network1", admin_state_up=True)
subnet1 = openstack.networking.Subnet("subnet1",
cidr="192.168.199.0/24",
ip_version=4,
network_id=network1.id)
sharenetwork1 = openstack.sharedfilesystem.ShareNetwork("sharenetwork1",
description="test share network",
neutron_net_id=network1.id,
neutron_subnet_id=subnet1.id)
```
### Share network with associated security services
```python
import pulumi
import pulumi_openstack as openstack
network1 = openstack.networking.Network("network1", admin_state_up=True)
subnet1 = openstack.networking.Subnet("subnet1",
cidr="192.168.199.0/24",
ip_version=4,
network_id=network1.id)
securityservice1 = openstack.sharedfilesystem.SecurityService("securityservice1",
description="created by terraform",
dns_ip="192.168.199.10",
domain="example.com",
ou="CN=Computers,DC=example,DC=com",
password="s8cret",
server="192.168.199.10",
type="active_directory",
user="joinDomainUser")
sharenetwork1 = openstack.sharedfilesystem.ShareNetwork("sharenetwork1",
description="test share network with security services",
neutron_net_id=network1.id,
neutron_subnet_id=subnet1.id,
security_service_ids=[securityservice1.id])
```
## Import
This resource can be imported by specifying the ID of the share network
```sh
$ pulumi import openstack:sharedfilesystem/shareNetwork:ShareNetwork sharenetwork_1 <id>
```
:param str resource_name: The name of the resource.
:param ShareNetworkArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ShareNetworkArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
neutron_net_id: Optional[pulumi.Input[str]] = None,
neutron_subnet_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
security_service_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ShareNetworkArgs.__new__(ShareNetworkArgs)
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
if neutron_net_id is None and not opts.urn:
raise TypeError("Missing required property 'neutron_net_id'")
__props__.__dict__["neutron_net_id"] = neutron_net_id
if neutron_subnet_id is None and not opts.urn:
raise TypeError("Missing required property 'neutron_subnet_id'")
__props__.__dict__["neutron_subnet_id"] = neutron_subnet_id
__props__.__dict__["region"] = region
__props__.__dict__["security_service_ids"] = security_service_ids
__props__.__dict__["cidr"] = None
__props__.__dict__["ip_version"] = None
__props__.__dict__["network_type"] = None
__props__.__dict__["project_id"] = None
__props__.__dict__["segmentation_id"] = None
super(ShareNetwork, __self__).__init__(
'openstack:sharedfilesystem/shareNetwork:ShareNetwork',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cidr: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
ip_version: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
network_type: Optional[pulumi.Input[str]] = None,
neutron_net_id: Optional[pulumi.Input[str]] = None,
neutron_subnet_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
security_service_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
segmentation_id: Optional[pulumi.Input[int]] = None) -> 'ShareNetwork':
"""
Get an existing ShareNetwork resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr: The share network CIDR.
:param pulumi.Input[str] description: The human-readable description for the share network.
Changing this updates the description of the existing share network.
:param pulumi.Input[int] ip_version: The IP version of the share network. Can either be 4 or 6.
:param pulumi.Input[str] name: The name for the share network. Changing this updates the name
of the existing share network.
:param pulumi.Input[str] network_type: The share network type. Can either be VLAN, VXLAN, GRE, or flat.
:param pulumi.Input[str] neutron_net_id: The UUID of a neutron network when setting up or updating
a share network. Changing this updates the existing share network if it's not used by
shares.
:param pulumi.Input[str] neutron_subnet_id: The UUID of the neutron subnet when setting up or
updating a share network. Changing this updates the existing share network if it's
not used by shares.
:param pulumi.Input[str] project_id: The owner of the Share Network.
:param pulumi.Input[str] region: The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share network. If omitted, the
`region` argument of the provider is used. Changing this creates a new
share network.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_service_ids: The list of security service IDs to associate with
the share network. The security service must be specified by ID and not name.
:param pulumi.Input[int] segmentation_id: The share network segmentation ID.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ShareNetworkState.__new__(_ShareNetworkState)
__props__.__dict__["cidr"] = cidr
__props__.__dict__["description"] = description
__props__.__dict__["ip_version"] = ip_version
__props__.__dict__["name"] = name
__props__.__dict__["network_type"] = network_type
__props__.__dict__["neutron_net_id"] = neutron_net_id
__props__.__dict__["neutron_subnet_id"] = neutron_subnet_id
__props__.__dict__["project_id"] = project_id
__props__.__dict__["region"] = region
__props__.__dict__["security_service_ids"] = security_service_ids
__props__.__dict__["segmentation_id"] = segmentation_id
return ShareNetwork(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def cidr(self) -> pulumi.Output[str]:
"""
The share network CIDR.
"""
return pulumi.get(self, "cidr")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The human-readable description for the share network.
Changing this updates the description of the existing share network.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="ipVersion")
def ip_version(self) -> pulumi.Output[int]:
"""
The IP version of the share network. Can either be 4 or 6.
"""
return pulumi.get(self, "ip_version")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name for the share network. Changing this updates the name
of the existing share network.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkType")
def network_type(self) -> pulumi.Output[str]:
"""
The share network type. Can either be VLAN, VXLAN, GRE, or flat.
"""
return pulumi.get(self, "network_type")
@property
@pulumi.getter(name="neutronNetId")
def neutron_net_id(self) -> pulumi.Output[str]:
"""
The UUID of a neutron network when setting up or updating
a share network. Changing this updates the existing share network if it's not used by
shares.
"""
return pulumi.get(self, "neutron_net_id")
@property
@pulumi.getter(name="neutronSubnetId")
def neutron_subnet_id(self) -> pulumi.Output[str]:
"""
The UUID of the neutron subnet when setting up or
updating a share network. Changing this updates the existing share network if it's
not used by shares.
"""
return pulumi.get(self, "neutron_subnet_id")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The owner of the Share Network.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 Shared File System client.
A Shared File System client is needed to create a share network. If omitted, the
`region` argument of the provider is used. Changing this creates a new
share network.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="securityServiceIds")
def security_service_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The list of security service IDs to associate with
the share network. The security service must be specified by ID and not name.
"""
return pulumi.get(self, "security_service_ids")
@property
@pulumi.getter(name="segmentationId")
def segmentation_id(self) -> pulumi.Output[int]:
"""
The share network segmentation ID.
"""
return pulumi.get(self, "segmentation_id")
| 43.516312 | 134 | 0.642459 |
89d229a897e1046dd1fd91a045fef6b9f9391a64 | 12,177 | py | Python | src/data_loader.py | jells123/Sound_event_detection | 7afeadd1eb9450f89bb159480d5a0ee0296e09dd | [
"MIT"
] | null | null | null | src/data_loader.py | jells123/Sound_event_detection | 7afeadd1eb9450f89bb159480d5a0ee0296e09dd | [
"MIT"
] | null | null | null | src/data_loader.py | jells123/Sound_event_detection | 7afeadd1eb9450f89bb159480d5a0ee0296e09dd | [
"MIT"
] | null | null | null | import os
import numpy as np
import random
import configparser
class data_loader(object):
def __init__(self, conf_dir):
""""
Load the data stream for training.
Count relevant parameters on the data set for DF and GL.
Args:
conf_dir: string
the path of configuration dir
Attributes:
conf_dir
feature_dir
label_dir
train_lst
vali_lst
test_lst
vali_csv
win_len_csv
LEN
DIM
CLASS
batch_size
events
dinsentangle_n
dinsentangle_m
ratio_for_win_len
ep_per_epochs
Interface:
init_data_conf: Initialize most of attribute values from data configuration file.
read_lst: Read multiple lines from a file and convert them to a list.
get_train: Get the file list of the training set.
get_vali: Get the file list and the groundtruths of the validation set.
get_test: Get the file list and the groundtruths of the test set.
count_disentangle: Count the coefficient for the DF dimension per class.
count_win_len_per_class: Count a group of adaptive window sizes for the median filters.
generator_train: Generate a generator for training.
generator_vali: Generate data from vali_lst.
generator_test: Generate data from test_lst.
generator_weak: Generate data from weak_lst.
generator_all: To generate feature data list and label data list for test_lst, vali_lst or weak_lst.
generator: Generate a generator for prediction.
"""
self.conf_dir = conf_dir
self.init_data_conf()
def init_data_conf(self):
""""
Initialize most of attribute values from data configuration file.
Args:
Return:
"""
conf_dir = self.conf_dir
data_cfg_path = os.path.join(conf_dir, 'data.cfg')
assert os.path.exists(data_cfg_path)
config = configparser.ConfigParser()
config.read(data_cfg_path)
assert 'path' in config.sections()
path_cfg = config['path']
self.feature_dir = path_cfg['feature_dir']
self.label_dir = path_cfg['label_dir']
self.train_lst = path_cfg['train_lst']
self.vali_lst = path_cfg['vali_lst']
self.test_lst = path_cfg['test_lst']
self.vali_csv = path_cfg['vali_csv']
self.test_csv = path_cfg['test_csv']
self.win_len_csv = path_cfg['win_len_csv']
files = [self.feature_dir,
self.label_dir,
self.train_lst,
self.test_lst,
self.vali_lst,
self.test_csv,
self.vali_csv,
self.win_len_csv]
#ensure that all the paths are valid
for f in files:
assert os.path.exists(f)
assert 'parameter' in config.sections()
parameter_cfg = config['parameter']
self.LEN = int(parameter_cfg['LEN'])
self.DIM = int(parameter_cfg['DIM'])
self.batch_size = int(parameter_cfg['batch_size'])
self.dinsentangle_n = int(parameter_cfg['dinsentangle_n'])
self.dinsentangle_m = float(parameter_cfg['dinsentangle_m'])
self.ratio_for_win_len = float(parameter_cfg['ratio_for_win_len'])
self.ep_per_epochs = float(parameter_cfg['ep_per_epochs'])
self.exponent = float(parameter_cfg['exponent'])
self.start_epoch = int(parameter_cfg['start_epoch'])
assert'events' in config.sections()
event_cfg = config['events']
self.events = event_cfg['events'].split(',')
self.CLASS = len(self.events)
def read_lst(self, lst):
""""
Read multiple lines from a file and convert them to a list.
(a general tool)
Args:
lst: list
the path of a file to read
Return:
files: list
multiple file ids from the file
f_len: integer
the length of the list to return
"""
with open(lst) as f:
files = f.readlines()
files = [f.rstrip() for f in files]
f_len = len(files)
return files, f_len
def get_train(self):
""""
Get the file list of the training set.
Args:
Return:
lst: list
multiple file ids from the train_lst
"""
lst, _ = self.read_lst(self.train_lst)
return lst
def get_vali(self):
""""
Get the file list and the groundtruths of the validation set.
Args:
Return:
lst: list
multiple file ids from the vali_lst
csv: list
multiple strong groundtruths from the vali_csv
"""
lst, _ = self.read_lst(self.vali_lst)
csv, _ = self.read_lst(self.vali_csv)
return lst, csv
def get_test(self):
""""
Get the file list and the groundtruths of the test set.
Args:
Return:
lst: list
multiple file ids from the test_lst
csv: list
multiple strong groundtruths from the test_csv
"""
lst, _ = self.read_lst(self.test_lst)
csv, _ = self.read_lst(self.test_csv)
return lst, csv
def count_disentangle(self):
""""
Count the coefficient for the DF dimension per class.
coefficient x hidden feature dimension = DF dimension
Args:
Return:
disentangle: list
a group of coefficients.
"""
n = self.dinsentangle_n
m = self.dinsentangle_m
CLASS = self.CLASS
#get the file list of the training set
lst = self.get_train()
label_dir = self.label_dir
disentangle = np.zeros([CLASS])
co_occurence = np.zeros([CLASS, CLASS + 1])
for f in lst:
path = os.path.join(label_dir, f + '.npy')
#ignore the unlabeled training data
if os.path.exists(path):
label = np.load(path)
#count the number of the clips containing n event classes in the training set
co_occ = int(np.sum(label))
if co_occ > n:
continue
co_occurence[:, co_occ] += label
weights = np.zeros([CLASS + 1, 1])
for i in range(CLASS):
weights[i + 1, 0] = 1 / (i + 1)
disentangle = np.matmul(co_occurence, weights)
disentangle = np.reshape(disentangle, [CLASS])
#nomalization
disentangle = disentangle / np.max(disentangle)
#prevent too-small DF coefficient
disentangle = disentangle * (1-m) + m
return disentangle
def count_win_len_per_class(self, top_len):
""""
Count a group of adaptive window sizes for the median filters.
Args:
top_len: integer
the sequence length (frames) of the final output of the model
Return:
out: list
the adaptive window sizes of the median filters
"""
path = self.win_len_csv
ratio_for_win_len = self.ratio_for_win_len
#get strong label (timestamps) from win_len_csv
csv, clen = self.read_lst(path)
label_cnt = {}
for event in self.events:
label_cnt[event] = {'num':0, 'frame':0}
#get the number of frames per second
frames_per_second = top_len / 10.0
#count the total number of frames and total number of occurrences per event class in win_len_csv
for c in csv:
cs = c.split('\t')
if len(cs) < 4:
continue
label = cs[3]
label_cnt[label]['num'] += 1
label_cnt[label]['frame'] += (
(float(cs[2])-float(cs[1])) * frames_per_second)
#count the number of frames per occurrence per event class
for label in label_cnt:
label_cnt[label]['win_len'] = int(label_cnt[label]['frame'] / label_cnt[label]['num'])
#get adaptive window sizes by multiplying by ratio_for_win_len
out = []
for label in label_cnt:
out += [int(label_cnt[label]['win_len'] * ratio_for_win_len)]
if out[-1] == 0:
out[-1] = 1
return out
def generator_train(self):
""""
Generate a generator for training.
Args:
Return:
generator: function
that can generate a generator for training
steps: integer
steps (the number of batches) per epoch
"""
train_lst = self.train_lst
batch_size = self.batch_size
feature_dir = self.feature_dir
label_dir = self.label_dir
CLASS = self.CLASS
LEN = self.LEN
DIM = self.DIM
start_epoch = self.start_epoch
exponent = self.exponent
ep_per_epochs = self.ep_per_epochs
#get file list from train_lst
files, f_len = self.read_lst(train_lst)
steps = (f_len * ep_per_epochs + batch_size-1) // batch_size
#shuffle train_lst
random.shuffle(files)
def generator():
#index of file list
i = 0
#index of a batch
cur = 0
#current epoch
epoch = 0
#current step in a epoch
step = 0
while True:
#get the ith file of the file list
f = files[i]
i = (i + 1)%f_len
data_f = os.path.join(feature_dir, f + '.npy')
assert os.path.exists(data_f)
data = np.load(data_f)
label_f = os.path.join(label_dir, f + '.npy')
#use mask to separate unlabeled data when calculating loss
if os.path.exists(label_f):
label = np.load(label_f)
mask = np.ones([CLASS])
else:
#unlabeled data
label = np.zeros([CLASS])
mask = np.zeros([CLASS])
#batch begin
if cur == 0:
labels = np.zeros([batch_size, CLASS])
masks = np.zeros([batch_size, CLASS])
train_data = np.zeros(
[batch_size, LEN, DIM])
#fill batch
train_data[cur] = data
labels[cur] = label
masks[cur] = mask
cur += 1
#batch end
if cur == batch_size:
cur = 0
#count the weight of unsupervised loss for the PT-model
if epoch > start_epoch:
a = 1-np.power(exponent, epoch-start_epoch)
else:
a = 0
#[feature, label, mask, the weight of unsupervised loss]
yield train_data, np.concatenate(
[labels, masks,
np.ones([batch_size, 1]) * a], axis = -1)
#count current step and epoch
step += 1
if step%steps == 0:
epoch += 1
step = 0
if i == 0:
#all data consumed in a round, shuffle
#print('[ epoch %d , a: %f ]'%(epoch, a))
random.shuffle(files)
return generator, steps
def generator_vali(self):
""""
Generate data from vali_lst.
Args:
Return:
/ : tuple
feature list and label list of vali_lst
"""
return self.generator_all('vali')
def generator_test(self):
""""
Generate data from test_lst.
Args:
Return:
/ : tuple
feature list and label list of test_lst
"""
return self.generator_all('test')
def generator_weak(self):
""""
Generate data from weak_lst.
Args:
Return:
/ : tuple
feature list and label list of weak_lst
"""
return self.generator_all('weak')
def generator_all(self, mode):
""""
To generate feature data list and label data list for test_lst, vali_lst or weak_lst.
Args:
mode: string in ['vali','test','weak']
prediction mode
Return:
data: list
feature data
labels: list
label data
"""
gt, steps = self.generator(mode)
gt = gt()
data = []
labels = []
for cnt, (X, Y) in enumerate(gt):
data += [X]
labels += [Y]
data = np.concatenate(data)
labels = np.concatenate(labels)
return data, labels
def generator(self, mode):
""""
Generate a generator for prediction
Args:
Return:
generator: function
that can generate a generator for prediction
steps: integer
the number of total batches
"""
#set file list to solve
if mode == 'vali':
gen_lst = self.vali_lst
elif mode == 'test':
gen_lst = self.test_lst
elif mode == 'weak':
gen_lst = self.train_lst
batch_size = self.batch_size
feature_dir = self.feature_dir
label_dir = self.label_dir
CLASS = self.CLASS
LEN = self.LEN
DIM = self.DIM
files, f_len = self.read_lst(gen_lst)
def generator():
#the index of the file list
cur = 0
for i in range(f_len):
#batch begin
if i%batch_size == 0:
train_data = np.zeros([batch_size,
LEN, DIM])
#[label, mask, weight] be consistent with the generator of training, but for prediction, we just use labels[:, :CLASS]
tclass = CLASS * 2 + 1
labels = np.ones([batch_size, tclass])
f = files[i]
data_f = os.path.join(feature_dir, f + '.npy')
assert os.path.exists(data_f)
data = np.load(data_f)
mask = np.ones([LEN, CLASS])
label_f = os.path.join(label_dir, f + '.npy')
#we can predict for unlabeled data, but can not calculate correct score without label files
if os.path.exists(label_f):
label = np.load(label_f)
else:
label = np.zeros([CLASS])
train_data[cur] = data
labels[cur, :CLASS] = label
cur += 1
#batch end
if cur == batch_size:
yield train_data, labels
cur = 0
#yield the last batch
if not f_len%batch_size == 0:
yield train_data, labels
steps = (f_len + batch_size-1) // batch_size
return generator, steps
| 25.316008 | 123 | 0.662396 |
1a7a0165607bd086041881b87ba9915f6487fa47 | 1,721 | py | Python | conda/cli/find_commands.py | melund/conda | 2348aa1863c2bd0c536f29c510a97fbc85a2ad89 | [
"BSD-3-Clause"
] | null | null | null | conda/cli/find_commands.py | melund/conda | 2348aa1863c2bd0c536f29c510a97fbc85a2ad89 | [
"BSD-3-Clause"
] | null | null | null | conda/cli/find_commands.py | melund/conda | 2348aa1863c2bd0c536f29c510a97fbc85a2ad89 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, division, absolute_import
import re
import os
import sys
import subprocess
from os.path import isdir, isfile, join
if sys.platform == 'win32':
dir_paths = [join(sys.prefix, 'Scripts')]
else:
dir_paths = [join(sys.prefix, 'bin')]
dir_paths.extend(os.environ['PATH'].split(os.pathsep))
def find_executable(cmd):
executable = 'conda-%s' % cmd
for dir_path in dir_paths:
if sys.platform == 'win32':
for ext in '.exe', '.bat':
path = join(dir_path, executable + ext)
if isfile(path):
return path
else:
path = join(dir_path, executable)
if isfile(path):
return path
return None
def find_commands():
if sys.platform == 'win32':
pat = re.compile(r'conda-(\w+)\.(exe|bat)$')
else:
pat = re.compile(r'conda-(\w+)$')
res = set()
for dir_path in dir_paths:
if not isdir(dir_path):
continue
for fn in os.listdir(dir_path):
m = pat.match(fn)
if m:
res.add(m.group(1))
return sorted(res)
def filter_descr(cmd):
args = [find_executable(cmd), '--help']
try:
output = subprocess.check_output(args)
except subprocess.CalledProcessError:
print('failed: %s' % (' '.join(args)))
return
try:
descr = output.decode('utf-8').split('\n\n')[1]
except IndexError:
descr = '<could not extract description>'
print(' %-12s %s' % (cmd, descr))
def help():
print("\nexternal commands:")
for cmd in find_commands():
filter_descr(cmd)
if __name__ == '__main__':
help()
| 23.575342 | 64 | 0.568274 |
59919fc669b4700caae296a47f33e8c2d39932c0 | 2,922 | py | Python | samples/basic/executor/models/cisco-ios-xr/Cisco-IOS-XR-snmp-test-trap-act/nc-execute-xr-snmp-test-trap-act-446-ydk.py | deom119/ydk-py-samples | 1ad6cc2b798f358ff835df93d12924df308b85fc | [
"Apache-2.0"
] | 104 | 2016-03-15T17:04:01.000Z | 2021-12-31T06:09:35.000Z | samples/basic/executor/models/cisco-ios-xr/Cisco-IOS-XR-snmp-test-trap-act/nc-execute-xr-snmp-test-trap-act-446-ydk.py | https-maxus-github-com/ydk-py-samples | 1ad6cc2b798f358ff835df93d12924df308b85fc | [
"Apache-2.0"
] | 15 | 2016-03-15T23:09:47.000Z | 2020-08-13T12:13:18.000Z | samples/basic/executor/models/cisco-ios-xr/Cisco-IOS-XR-snmp-test-trap-act/nc-execute-xr-snmp-test-trap-act-446-ydk.py | https-maxus-github-com/ydk-py-samples | 1ad6cc2b798f358ff835df93d12924df308b85fc | [
"Apache-2.0"
] | 87 | 2016-04-15T16:59:23.000Z | 2021-09-18T18:05:47.000Z | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Execute RPC for model Cisco-IOS-XR-snmp-test-trap-act.
usage: nc-execute-xr-snmp-test-trap-act-446-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import ExecutorService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_snmp_test_trap_act \
as xr_snmp_test_trap_act
import logging
def prepare_platform_hfr_bundle_state(platform_hfr_bundle_state):
"""Add RPC input data to platform_hfr_bundle_state object."""
platform_hfr_bundle_state.input.bundle_name = "F0/SM0/FM/0"
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create executor service
executor = ExecutorService()
platform_hfr_bundle_state = xr_snmp_test_trap_act.PlatformHfrBundleState() # create object
prepare_platform_hfr_bundle_state(platform_hfr_bundle_state) # add RPC input
# execute RPC on NETCONF device
executor.execute_rpc(provider, platform_hfr_bundle_state)
exit()
# End of script
| 35.204819 | 95 | 0.680356 |
4f046e95a64e3b130714b1440f93cc121fd7aaf0 | 620 | py | Python | manage.py | jcarder5/songbase | c409a4ea9b13eef40e7496d5fe2c2a1e0b0953c3 | [
"MIT"
] | null | null | null | manage.py | jcarder5/songbase | c409a4ea9b13eef40e7496d5fe2c2a1e0b0953c3 | [
"MIT"
] | null | null | null | manage.py | jcarder5/songbase | c409a4ea9b13eef40e7496d5fe2c2a1e0b0953c3 | [
"MIT"
] | null | null | null | from flask_script import Manager
from songbase import app, db, Artist, Song
manager = Manager(app)
# reset the database and create two artists
@manager.command
def deploy():
db.drop_all()
db.create_all()
coldplay = Artist(name='Coldplay', about='Coldplay is a British rock band.')
maroon5 = Artist(name='Maroon 5', about='Maroon 5 is an American pop rock band.')
song1 = Song(name='yellow', year = 2004, lyrics = 'bla bla', artist = coldplay)
db.session.add(coldplay)
db.session.add(maroon5)
db.session.add(song1)
db.session.commit()
if __name__ == '__main__':
manager.run()
| 26.956522 | 85 | 0.68871 |
d52f3d74e5f2e5c00ed3733a74b224b7a4a8afe5 | 127 | py | Python | tests/scripts/echo2.py | pquentin/trio-asyncio | 2440c92a2b6674e834d6f791d46aba0aa82b1ffd | [
"Apache-2.0",
"MIT"
] | 160 | 2018-01-12T20:17:07.000Z | 2022-03-18T15:44:56.000Z | tests/scripts/echo2.py | pquentin/trio-asyncio | 2440c92a2b6674e834d6f791d46aba0aa82b1ffd | [
"Apache-2.0",
"MIT"
] | 108 | 2018-01-21T12:53:14.000Z | 2022-02-08T10:44:54.000Z | tests/scripts/echo2.py | pquentin/trio-asyncio | 2440c92a2b6674e834d6f791d46aba0aa82b1ffd | [
"Apache-2.0",
"MIT"
] | 33 | 2018-01-21T09:29:38.000Z | 2022-01-16T21:11:50.000Z | import os
if __name__ == '__main__':
buf = os.read(0, 1024)
os.write(1, b'OUT:' + buf)
os.write(2, b'ERR:' + buf)
| 18.142857 | 30 | 0.551181 |
79449e7c21add45309112648bfb8de3f2b32d5f6 | 1,077 | py | Python | test/ml/classification/test_classification.py | xenron/coco | e318d534127b769612716c05d40e3d5b090eb5a3 | [
"MIT"
] | null | null | null | test/ml/classification/test_classification.py | xenron/coco | e318d534127b769612716c05d40e3d5b090eb5a3 | [
"MIT"
] | null | null | null | test/ml/classification/test_classification.py | xenron/coco | e318d534127b769612716c05d40e3d5b090eb5a3 | [
"MIT"
] | null | null | null |
import unittest as ut
import sys
sys.path.append("../../../package")
import ml.classification
class TestClassification(ut.TestCase):
def setUp(self):
super(TestClassification, self).setUp()
def test_knn(self):
model = ml.classification.getClassifierByName("KNN")
data = dict()
data["features"] = [[0], [1], [2], [3]]
data["label"] = [0, 0, 1, 1]
model.train(data)
self.assertEquals(model.predict([[1.1]]), [0])
def test_svm(self):
model = ml.classification.getClassifierByName("SVM")
data = dict()
data["features"] = [[0], [1], [2], [3]]
data["label"] = [0, 0, 1, 1]
model.train(data)
self.assertEquals(model.predict([[1.1]]), [0])
def test_bayes(self):
model = ml.classification.getClassifierByName("Bayes")
data = dict()
data["features"] = [[0], [1], [2], [3]]
data["label"] = [0, 0, 1, 1]
model.train(data)
self.assertEquals(model.predict([[1.1]]), [0])
if __name__ == "__main__":
ut.main()
| 25.642857 | 62 | 0.551532 |
27b9003fd5c57b643d62f488448caa2b55ff2bd2 | 239 | py | Python | reusable_components/__init__.py | blozano824/dash-docs | f2b5a9dcbf60603aa0d0caabcfa31dccc6face7d | [
"MIT"
] | 1 | 2019-03-04T03:17:19.000Z | 2019-03-04T03:17:19.000Z | reusable_components/__init__.py | blozano824/dash-docs | f2b5a9dcbf60603aa0d0caabcfa31dccc6face7d | [
"MIT"
] | 3 | 2021-03-31T19:16:27.000Z | 2021-12-13T20:27:16.000Z | reusable_components/__init__.py | blozano824/dash-docs | f2b5a9dcbf60603aa0d0caabcfa31dccc6face7d | [
"MIT"
] | 1 | 2022-03-18T09:41:34.000Z | 2022-03-18T09:41:34.000Z | from .Column import Column # noqa: F401
from .Header import Header # noqa: F401
from .Row import Row # noqa: F401
from .Section import Section
from .Chapter import Chapter
from .Blockquote import Blockquote
from .Notebox import Notebox
| 29.875 | 40 | 0.778243 |
13dc5edd5a4d0cb6d4c666c76fa64d04e79f0aa4 | 10,339 | py | Python | activemri/baselines/loupe_codes/samplers.py | tianweiy/SeqMRI | 930c056284ab5538881cb6cf4138a107944fd29c | [
"MIT"
] | 16 | 2021-05-14T01:14:24.000Z | 2022-02-11T10:02:04.000Z | activemri/baselines/loupe_codes/samplers.py | tianweiy/SeqMRI | 930c056284ab5538881cb6cf4138a107944fd29c | [
"MIT"
] | 1 | 2021-10-21T13:31:35.000Z | 2021-10-21T13:31:35.000Z | activemri/baselines/loupe_codes/samplers.py | tianweiy/SeqMRI | 930c056284ab5538881cb6cf4138a107944fd29c | [
"MIT"
] | 2 | 2021-05-14T04:44:57.000Z | 2021-11-08T03:08:58.000Z | from enum import auto
import torch
from torch import nn
from torch.nn import functional as F
from activemri.baselines.loupe_codes.layers import *
from activemri.baselines.loupe_codes import transforms
from torch.autograd import Function
import matplotlib.pyplot as plt
import numpy as np
import sigpy
import sigpy.mri
class LOUPESampler(nn.Module):
"""
LOUPE Sampler
"""
def __init__(self, shape=[320, 320], slope=5, sparsity=0.25, line_constrained=False,
conjugate_mask=False, preselect=False, preselect_num=2, random=False, poisson=False,
spectrum=False, equispaced=False):
"""
shape ([int. int]): Shape of the reconstructed image
slope (float): Slope for the Loupe probability mask. Larger slopes make the mask converge faster to
deterministic state.
sparsity (float): Predefined sparsity of the learned probability mask. 1 / acceleration_ratio
line_constrained (bool): Sample kspace measurements column by column
conjugate_mask (bool): For real image, the corresponding kspace measurements have conjugate symmetry property
(point reflection). Therefore, the information in the left half of the kspace image is the same as the
other half. To take advantage of this, we can force the model to only sample right half of the kspace
(when conjugate_mask is set to True)
preselect: preselect center regions
"""
super().__init__()
assert conjugate_mask is False
# probability mask
if line_constrained:
self.gen_mask = LineConstrainedProbMask(shape, slope, preselect=preselect, preselect_num=preselect_num)
else:
self.gen_mask = ProbMask(shape, slope, preselect_num=preselect_num)
self.rescale = RescaleProbMap
self.binarize = ThresholdRandomMaskSigmoidV1.apply # FIXME
self.preselect =preselect
self.preselect_num_one_side = preselect_num // 2
self.shape = shape
self.line_constrained = line_constrained
self.random_baseline = random
self.poisson_baseline = poisson
self.spectrum_baseline = spectrum
self.equispaced_baseline = equispaced
if self.poisson_baseline:
self.acc = 1 / (sparsity + (self.preselect_num_one_side*2)**2 / (128*128))
print("generate variable density mask with acceleration {}".format(self.acc))
if self.spectrum_baseline:
acc = 1 / (sparsity + (self.preselect_num_one_side*2)**2 / (128*128))
print("generate spectrum mask with acceleration {}".format(acc))
mask = torch.load('resources/spectrum_{}x_128.pt'.format(int(acc)))
mask = mask.reshape(1, 128, 128, 1).float()
self.spectrum_mask = nn.Parameter(mask, requires_grad=False)
if self.equispaced_baseline:
acc = 1 / (sparsity + (self.preselect_num_one_side)*2 / (128))
assert self.line_constrained
assert acc == 4
mask = torch.load('resources/equispaced_4x_128.pt').reshape(1, 128, 128, 1).float()
self.equispaced_mask = nn.Parameter(mask, requires_grad=False)
def _gen_poisson_mask(self):
mask = sigpy.mri.poisson((128, 128), self.acc, dtype='int32', crop_corner=False)
mask = torch.tensor(mask).reshape(1, 128, 128, 1).float()
return mask
def _mask_neg_entropy(self, mask, eps=1e-10):
# negative of pixel wise entropy
entropy = mask * torch.log(mask+eps) + (1-mask) * torch.log(1-mask+eps)
return entropy
def forward(self, kspace, sparsity):
# kspace: NHWC
# sparsity (float)
prob_mask = self.gen_mask(kspace)
if self.random_baseline:
prob_mask = torch.ones_like(prob_mask) / 4
if not self.line_constrained:
prob_mask[:, :self.preselect_num_one_side, :self.preselect_num_one_side] = 0
prob_mask[:, :self.preselect_num_one_side, -self.preselect_num_one_side:] = 0
prob_mask[:, -self.preselect_num_one_side:, :self.preselect_num_one_side] = 0
prob_mask[:, -self.preselect_num_one_side:, -self.preselect_num_one_side:] = 0
else:
prob_mask[..., :self.preselect_num_one_side, :] = 0
prob_mask[..., -self.preselect_num_one_side:, :] = 0
if not self.preselect:
rescaled_mask = self.rescale(prob_mask, sparsity)
binarized_mask = self.binarize(rescaled_mask)
else:
rescaled_mask = self.rescale(prob_mask, sparsity)
if self.training:
binarized_mask = self.binarize(rescaled_mask)
else:
binarized_mask = self.binarize(rescaled_mask)
if not self.line_constrained:
binarized_mask[:, :self.preselect_num_one_side, :self.preselect_num_one_side] = 1
binarized_mask[:, :self.preselect_num_one_side, -self.preselect_num_one_side:] = 1
binarized_mask[:, -self.preselect_num_one_side:, :self.preselect_num_one_side] = 1
binarized_mask[:, -self.preselect_num_one_side:, -self.preselect_num_one_side:] = 1
else:
binarized_mask[..., :self.preselect_num_one_side, :] = 1
binarized_mask[..., -self.preselect_num_one_side:, :] = 1
neg_entropy = self._mask_neg_entropy(rescaled_mask)
if self.poisson_baseline:
assert not self.line_constrained
binarized_mask = transforms.fftshift(self._gen_poisson_mask(), dim=(1,2)).to(kspace.device)
if self.spectrum_baseline:
assert not self.line_constrained
binarized_mask = self.spectrum_mask # DC are in the corners
if self.equispaced_baseline:
binarized_mask = transforms.fftshift(self.equispaced_mask, dim=(1, 2))
masked_kspace = binarized_mask * kspace
data_to_vis_sampler = {'prob_mask': transforms.fftshift(prob_mask[0,:,:,0],dim=(0,1)).cpu().detach().numpy(),
'rescaled_mask': transforms.fftshift(rescaled_mask[0,:,:,0],dim=(0,1)).cpu().detach().numpy(),
'binarized_mask': transforms.fftshift(binarized_mask[0,:,:,0],dim=(0,1)).cpu().detach().numpy()}
return masked_kspace, binarized_mask, neg_entropy, data_to_vis_sampler
class BiLOUPESampler(nn.Module):
"""
LOUPE Sampler
"""
def __init__(self, shape=[320, 320], slope=5, sparsity=0.25, line_constrained=False,
conjugate_mask=False, preselect=False, preselect_num=2):
"""
shape ([int. int]): Shape of the reconstructed image
slope (float): Slope for the Loupe probability mask. Larger slopes make the mask converge faster to
deterministic state.
sparsity (float): Predefined sparsity of the learned probability mask. 1 / acceleration_ratio
line_constrained (bool): Sample kspace measurements column by column
conjugate_mask (bool): For real image, the corresponding kspace measurements have conjugate symmetry property
(point reflection). Therefore, the information in the left half of the kspace image is the same as the
other half. To take advantage of this, we can force the model to only sample right half of the kspace
(when conjugate_mask is set to True)
preselect: preselect center regions
"""
super().__init__()
assert conjugate_mask is False
assert line_constrained
# probability mask
if line_constrained:
self.gen_mask = BiLineConstrainedProbMask([shape[0]*2], slope, preselect=preselect, preselect_num=preselect_num)
else:
assert 0
self.gen_mask = ProbMask(shape, slope)
self.rescale = RescaleProbMap
self.binarize = ThresholdRandomMaskSigmoidV1.apply # FIXME
self.preselect =preselect
self.preselect_num = preselect_num
self.shape = shape
def _mask_neg_entropy(self, mask, eps=1e-10):
# negative of pixel wise entropy
entropy = mask * torch.log(mask+eps) + (1-mask) * torch.log(1-mask+eps)
return entropy
def forward(self, kspace, sparsity):
# kspace: NHWC
# sparsity (float)
prob_mask = self.gen_mask(kspace)
batch_size = kspace.shape[0]
if not self.preselect:
assert 0
else:
rescaled_mask = self.rescale(prob_mask, sparsity/2)
if self.training:
binarized_mask = self.binarize(rescaled_mask)
else:
binarized_mask = self.binarize(rescaled_mask)
# always preselect vertical lines
binarized_vertical_mask, binarized_horizontal_mask = torch.chunk(binarized_mask, dim=2, chunks=2)
binarized_horizontal_mask = binarized_horizontal_mask.transpose(1, 2)
binarized_mask = torch.clamp(binarized_vertical_mask + binarized_horizontal_mask, max=1, min=0)
binarized_mask[..., :self.preselect_num, :] = 1
masked_kspace = binarized_mask * kspace
neg_entropy = self._mask_neg_entropy(rescaled_mask)
# for visualization purpose
vertical_mask, horizontal_mask = torch.chunk(prob_mask.reshape(1, -1), dim=-1, chunks=2)
prob_mask =vertical_mask.reshape(1, 1, 1, -1)+horizontal_mask.reshape(1, 1, -1, 1)
rescaled_vertical_mask, rescaled_horizontal_mask = torch.chunk(rescaled_mask.reshape(1, -1), dim=-1, chunks=2)
rescaled_mask = rescaled_vertical_mask.reshape(1, 1, 1, -1)+rescaled_horizontal_mask.reshape(1, 1, -1, 1)
data_to_vis_sampler = {'prob_mask': transforms.fftshift(prob_mask[0, 0],dim=(0,1)).cpu().detach().numpy(),
'rescaled_mask': transforms.fftshift(rescaled_mask[0, 0],dim=(0,1)).cpu().detach().numpy(),
'binarized_mask': transforms.fftshift(binarized_mask[0,:,:,0],dim=(0,1)).cpu().detach().numpy()}
return masked_kspace, binarized_mask, neg_entropy, data_to_vis_sampler
| 45.747788 | 127 | 0.640487 |
740d1566331d76840cc9c2d47fe10ea989dda52d | 356 | py | Python | flarestack/data/icecube/gfu/__init__.py | robertdstein/flarestack | 2ce7e67da336514f6f38f06126a1fbd82131e441 | [
"MIT"
] | null | null | null | flarestack/data/icecube/gfu/__init__.py | robertdstein/flarestack | 2ce7e67da336514f6f38f06126a1fbd82131e441 | [
"MIT"
] | 25 | 2019-11-14T15:46:24.000Z | 2020-11-27T11:14:22.000Z | flarestack/data/icecube/gfu/__init__.py | robertdstein/flarestack | 2ce7e67da336514f6f38f06126a1fbd82131e441 | [
"MIT"
] | 2 | 2020-01-06T19:39:27.000Z | 2020-07-16T20:32:29.000Z | import numpy as np
gfu_binning = (
np.unique(
np.concatenate(
[
np.linspace(-1.0, -0.93, 4 + 1),
np.linspace(-0.93, -0.3, 10 + 1),
np.linspace(-0.3, 0.05, 9 + 1),
np.linspace(0.05, 1.0, 18 + 1),
]
)
),
np.arange(1.0, 9.5 + 0.01, 0.125),
)
| 22.25 | 49 | 0.382022 |
05d11248dac6b8ff4505f991957f7776b06073be | 781 | py | Python | exams/2011/6_external_stability.py | JoaoCostaIFG/MNUM | 6e042d8a6f64feb9eae9c79afec2fbab51f46fbd | [
"MIT"
] | 1 | 2019-12-07T10:34:30.000Z | 2019-12-07T10:34:30.000Z | exams/2011/6_external_stability.py | JoaoCostaIFG/MNUM | 6e042d8a6f64feb9eae9c79afec2fbab51f46fbd | [
"MIT"
] | null | null | null | exams/2011/6_external_stability.py | JoaoCostaIFG/MNUM | 6e042d8a6f64feb9eae9c79afec2fbab51f46fbd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
n = 3
A = [[18, -1, 1], [3, -5, 4], [6, 8, 29]]
x = [0.552949, -0.15347, -0.10655]
db = da = 0.1
b = [db for i in range(n)]
dA = [[da for j in range(n)] for i in range(n)]
for i in range(n):
for j in range(n):
b[i] -= dA[i][j] * x[j]
print("new b:", b)
# Gauss elim
for i in range(n):
pivot = A[i][i]
b[i] /= pivot
for j in range(n):
A[i][j] /= pivot
for j in range(i + 1, n):
times = A[j][i]
b[j] -= times * b[i]
for k in range(n):
A[j][k] -= times * A[i][k]
print(A)
print(b)
# Get x (external stability)
x = b.copy()
for k in range(n - 1, -1, -1):
s = 0
for j in range(k + 1, n):
s += A[k][j] * x[j]
x[k] = (b[k] - s) / A[k][k]
print("extern stab:", x)
| 18.162791 | 47 | 0.449424 |
d6719836b5cc85aa0ea6fb2c697f1f748f668b3e | 404 | py | Python | src/arrays/beggars-outside-temple.py | vighnesh153/ds-algo | 79c401dad2d2e575ce1913184ca8665f2712a5b8 | [
"MIT"
] | null | null | null | src/arrays/beggars-outside-temple.py | vighnesh153/ds-algo | 79c401dad2d2e575ce1913184ca8665f2712a5b8 | [
"MIT"
] | null | null | null | src/arrays/beggars-outside-temple.py | vighnesh153/ds-algo | 79c401dad2d2e575ce1913184ca8665f2712a5b8 | [
"MIT"
] | 1 | 2020-08-09T06:37:21.000Z | 2020-08-09T06:37:21.000Z | def solve(count_beggars, amount):
beggars_katori = [0] * count_beggars
for start, end, amt in amount:
beggars_katori[start - 1] += amt
if end < count_beggars:
beggars_katori[end] -= amt
for i in range(1, count_beggars):
beggars_katori[i] += beggars_katori[i-1]
return beggars_katori
res = solve(5, [[1, 2, 10], [2, 3, 20], [2, 5, 25]])
print(res)
| 23.764706 | 52 | 0.601485 |
6193e02da9d4ecb20f019ebae65552a4515244ed | 464 | py | Python | main.py | 17jrb17/01-Introduction | 32d6ee73d80b8e75ad0033120f40574338e50814 | [
"MIT"
] | null | null | null | main.py | 17jrb17/01-Introduction | 32d6ee73d80b8e75ad0033120f40574338e50814 | [
"MIT"
] | null | null | null | main.py | 17jrb17/01-Introduction | 32d6ee73d80b8e75ad0033120f40574338e50814 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import utils
utils.check_version((3,7))
utils.clear()
print('Hello, my name is Jacob Bailey')
print('My favorite game is Bioshock Infinite')
print('My only concern is getting back into the groove of coding for this class')
print('I just want to learn more about what goes into creating the things I love and more about the industry in general.')
print('stackoverflow number: user:12003507')
print('github url: https://github.com/17jrb17') | 35.692308 | 122 | 0.762931 |
9b59a471d4b1f7728bbb52b6533a7e6a727bb02f | 2,842 | py | Python | CNN/CNN.py | MohammadJRanjbar/Recognition-of-Handwritten-Digit-using-CNN-and-ANN | 1b102f1227fb673a6b1ada7fd9d594c09df163c7 | [
"MIT"
] | null | null | null | CNN/CNN.py | MohammadJRanjbar/Recognition-of-Handwritten-Digit-using-CNN-and-ANN | 1b102f1227fb673a6b1ada7fd9d594c09df163c7 | [
"MIT"
] | null | null | null | CNN/CNN.py | MohammadJRanjbar/Recognition-of-Handwritten-Digit-using-CNN-and-ANN | 1b102f1227fb673a6b1ada7fd9d594c09df163c7 | [
"MIT"
] | null | null | null | from keras.datasets.mnist import load_data
import keras
from keras.models import Sequential
from keras.layers import Convolution2D as Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense , Activation , Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD , Adam
from sklearn.model_selection import train_test_split
from keras.losses import categorical_crossentropy,binary_crossentropy
import numpy as np
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import cv2
(train_digits, train_labels), (test_digits, test_labels) = load_data()
image_height = train_digits.shape[1]
image_width = train_digits.shape[2]
num_channels = 1
train_data = np.reshape(train_digits, (train_digits.shape[0], image_height, image_width, num_channels))
test_data = np.reshape(test_digits, (test_digits.shape[0],image_height, image_width, num_channels))
train_data = train_data.astype('float32') / 255.
test_data = test_data.astype('float32') / 255.
num_classes = 10
train_labels_cat = to_categorical(train_labels,num_classes)
test_labels_cat = to_categorical(test_labels,num_classes)
model = Sequential()
# add Convolutional layers
model.add(Conv2D(filters=32, kernel_size=(3,3), activation='sigmoid', padding='same',input_shape=(image_height, image_width, num_channels)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=64, kernel_size=(3,3), activation='sigmoid', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
# Densely connected layers
model.add(Dense(128, activation='sigmoid'))
# output layer
model.add(Dense(num_classes, activation='softmax'))
# compile with adam optimizer & categorical_crossentropy loss function
model.compile(optimizer=Adam(lr=0.01), loss='categorical_crossentropy', metrics=['accuracy'])
filepath='weights0.{epoch:02d}-{val_loss:.2f}.hdf5'
CB=keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
history = model.fit(train_data, train_labels_cat, epochs=100, batch_size=64,validation_split=0.2 , callbacks=[CB])
model.summary()
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label = 'val_loss')
plt.xlabel('Epoch')
plt.ylabel('loss')
plt.legend(loc='lower right')
plt.plot(history.history['acc'], label='acc')
plt.plot(history.history['val_acc'], label = 'val_acc')
plt.xlabel('Epoch')
plt.ylabel('accuracy')
plt.legend(loc='lower right')
from keras.models import load_model
model3=load_model('weights0.25-0.04.hdf5')
train_loss, train_acc = model3.evaluate(train_data, train_labels_cat, verbose=2)
print(train_loss, train_acc)
test_loss, test_acc = model3.evaluate(test_data, test_labels_cat, verbose=2)
print(test_loss, test_acc) | 48.169492 | 144 | 0.795215 |
99b8e49380df60c3065a4db1ecf679733ac5e9a8 | 570 | py | Python | pyramid_learning_journal/routes.py | ChristopherSClosser/pyramid-learning-journal | 3cd3126d24baef23c20ee75adcaf4cdc892ed71d | [
"MIT"
] | null | null | null | pyramid_learning_journal/routes.py | ChristopherSClosser/pyramid-learning-journal | 3cd3126d24baef23c20ee75adcaf4cdc892ed71d | [
"MIT"
] | 10 | 2018-03-10T18:45:50.000Z | 2018-03-15T22:39:43.000Z | pyramid_learning_journal/routes.py | ChristopherSClosser/pyramid-learning-journal | 3cd3126d24baef23c20ee75adcaf4cdc892ed71d | [
"MIT"
] | null | null | null | """Define routes."""
def includeme(config):
"""App routes."""
config.add_static_view(
'static',
'pyramid_learning_journal:static',
cache_max_age=3600
)
config.add_route('home', '/')
config.add_route('detail', '/journal/{id:\d+}')
config.add_route('new', '/journal/new-entry')
config.add_route('edit', '/journal/{id:\d+}/edit-entry')
config.add_route('delete', '/journal/{id:\d+}/delete-entry')
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.add_route('api', '/api')
| 30 | 64 | 0.612281 |
c8ffc3ff00911aa55445da88c815708568ba141c | 1,429 | py | Python | py/dcp/problems/recursion/robot_grid.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 1 | 2020-06-26T13:28:43.000Z | 2020-06-26T13:28:43.000Z | py/dcp/problems/recursion/robot_grid.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 7 | 2021-11-18T19:46:08.000Z | 2022-03-12T01:03:01.000Z | py/dcp/problems/recursion/robot_grid.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | null | null | null | '''Robot in a Grid.
Imagine a robot sitting on the upper left corner of grid with r rows and c columns.
The robot can only move in two directions, right and down, but certain cells are "off limits" such
that the robot cannot step on them. Design an algorithm to find a path for the robot from the top
left to the bottom right.
'''
def build_grid(rows, columns, obsticals):
grid = [[0 for x in range(rows)] for x in range(columns)]
for coord in obsticals:
grid[coord[0]][coord[1]] = 1
return grid
def robot_walk1(grid):
def walk(grid, path=None):
def step(path, x, y):
return path + [(x, y)]
if path == None:
path = [(0, 0)]
x, y = path[-1][0], path[-1][1]
rows, columns = len(grid), len(grid[0])
if x == rows - 1 and y == columns - 1:
return path
walk_right = y < columns - 1 and grid[x][y+1] == 0
if walk_right:
path = walk(grid, step(path, x, y + 1))
if path[-1] == (rows - 1, columns - 1):
return path
walk_down = x < rows - 1 and grid[x+1][y] == 0
if walk_down:
path = walk(grid, step(path, x + 1, y))
if path[-1] == (rows - 1, columns - 1):
return path
if not (walk_right or walk_down):
path = step(path, -1, -1) # stuck
return path
return walk(grid) | 25.517857 | 98 | 0.539538 |
af5876fdae03eadbb1aac31d337afa2fa66e468b | 2,908 | py | Python | my_util/sloth/sloth/driver/takeover.py | RecluseXU/learning_spider | 45fa790ed7970be57a21b40817cc66856de3d99b | [
"MIT"
] | 38 | 2020-08-30T11:41:53.000Z | 2022-03-23T04:30:26.000Z | my_util/sloth/sloth/driver/takeover.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
] | 2 | 2021-08-20T16:34:12.000Z | 2021-10-08T11:06:41.000Z | my_util/sloth/sloth/driver/takeover.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
] | 10 | 2020-11-24T09:15:42.000Z | 2022-02-25T06:05:16.000Z | # -*- encoding: utf-8 -*-
'''
@Time : 2021-08-07
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc : webdriver 接管操作系统启动的 Chrome
'''
# here put the import lib
from threading import Thread
from os import popen
from os.path import dirname, join as path_join
from platform import system as platform_system
from selenium import webdriver
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from sloth.plugins import Plugins
BASE_PATH = dirname(dirname(__file__))
DesiredCapabilities.CHROME['pageLoadStrategy'] = "eager"
def _get_webdriver_path():
system_type = platform_system().lower()
if system_type == 'windows':
driver_name = 'chromedriver.exe'
elif system_type == 'linux':
driver_name = 'chromedriver'
return path_join(BASE_PATH, 'static/', driver_name)
def webdriver_takeover(
chrome_path: str = 'C:/Program Files/Google/Chrome/Application/chrome.exe',
data_dir: str = 'Default',
debugger_port: int = 26660,
) -> WebDriver:
"""命令行系統啓動Chrome,webdriver 接管
:param chrome_path: 本地 Chrome.exe 的路徑
:param debugger_port: Chrome 提供的調試端口
:param data_dir: Chrome 保存數據的路徑
"""
# 命令計算
os_name = platform_system()
if 'Windows' == os_name:
command = '"{}" '.format(chrome_path)
elif 'Linux' == os_name:
command = 'google-chrome '
else:
raise Exception('Unknow OS:', os_name)
# Chrome啓動參數
# 參考:https://peter.sh/experiments/chromium-command-line-switches/
command += ' '.join((
'--remote-debugging-port={}'.format(debugger_port),
'--no-sandbox', # 关闭沙盒
'--disable-gpu', # 禁用GPU
'--lang=zh-CN', # 简体中文
'--disable-webrtc', # 禁用webrtc
'--disable-popup-blocking' # 允许弹出弹窗
))
if data_dir:
data_dir_path = path_join(BASE_PATH, 'temp', data_dir)
command += f' --user-data-dir="{data_dir_path}"'
# 插件读取
plugins_note = Plugins.load_local_plugins()
if plugins_note:
plugin_arg = ','.join([note['path'] for note in plugins_note])
command += f' --load-extension={plugin_arg}'
if 'Windows' == os_name:
command = "{}".format(command)
def start_chrome(command):
"""通过cmd命令启动一个chrome
"""
print('Command:', command)
popen(command)
chrome_thread = Thread(target=start_chrome, args=(command,))
chrome_thread.start()
# 命令行系統啓動Chrome,webdriver 接管
options = webdriver.ChromeOptions()
options.add_experimental_option(
"debuggerAddress", f'127.0.0.1:{debugger_port}')
driver = webdriver.Chrome(
_get_webdriver_path(),
options=options,
)
# 插件对象创建
plugins_note = Plugins.load_plugins_id(driver, plugins_note)
Plugins.create_plugins_instance(driver, plugins_note)
return driver
| 29.979381 | 79 | 0.661967 |
6d7ad42ea44b990479d8b51de69c4911f0b4c31e | 19,228 | py | Python | biocrnpyler/sbmlutil.py | zoltuz/BioCRNPyler | 814d514cbeb9d71ee1774169cee72a3131547d55 | [
"BSD-3-Clause"
] | null | null | null | biocrnpyler/sbmlutil.py | zoltuz/BioCRNPyler | 814d514cbeb9d71ee1774169cee72a3131547d55 | [
"BSD-3-Clause"
] | 1 | 2020-06-15T20:09:25.000Z | 2020-06-15T20:52:26.000Z | biocrnpyler/sbmlutil.py | zoltuz/BioCRNPyler | 814d514cbeb9d71ee1774169cee72a3131547d55 | [
"BSD-3-Clause"
] | null | null | null | # sbmlutil.py - libsbml helper functions
# RMM, 14 Aug 2018
#
# Copyright (c) 2018, Build-A-Cell. All rights reserved.
# See LICENSE file in the project root directory for details.
import libsbml
import numpy as np
from warnings import warn
# Reaction ID number (global)
reaction_id = 0
# Create an SBML model
def create_sbml_model(compartment_id="default", time_units='second', extent_units='mole', substance_units='mole',
length_units='metre', area_units='square_metre', volume_units='litre', volume = 1e-6, model_id = None):
'''
Creates an SBML Level 3 Version 2 model with some fixed standard settings.
Returns the SBMLDocument and the Model object as a tuple.
Refer to python-libsbml for more information on SBML API.
'''
document = libsbml.SBMLDocument(3, 2)
model = document.createModel()
if model_id is None:
model_id = 'biocrnpyler_'+str(np.random.randint(1e6))
model.setId(model_id)
model.setName(model_id)
# Define units for area (not used, but keeps COPASI from complaining)
unitdef = model.createUnitDefinition()
unitdef.setId('square_metre')
unit = unitdef.createUnit()
unit.setKind(libsbml.UNIT_KIND_METRE)
unit.setExponent(2)
unit.setScale(0)
unit.setMultiplier(1)
# Set up required units and containers
model.setTimeUnits(time_units) # set model-wide time units
model.setExtentUnits(extent_units) # set model units of extent
model.setSubstanceUnits(substance_units) # set model substance units
model.setLengthUnits(length_units) # area units (never used?)
model.setAreaUnits(area_units) # area units (never used?)
model.setVolumeUnits(volume_units) # default volume unit
# Define the default compartment
compartment = model.createCompartment()
compartment.setId(compartment_id)
compartment.setName(compartment_id)
compartment.setConstant(True) # keep compartment size constant
compartment.setSpatialDimensions(3) # 3 dimensional compartment
compartment.setVolume(volume) # 1 microliter
# Returning document is enough. document.getModel() gives the model, and model.getCompartment(0) gives the compartment.
return document, model
# Creates an SBML id from a chemical_reaction_network.species object
def species_sbml_id(species, document=None):
# Construct the species ID
all_ids = []
if document:
all_ids = getAllIds(document.getListOfAllElements())
trans = SetIdFromNames(all_ids)
species_id = trans.getValidIdForName(repr(species))
return species_id
# Helper function to add a species to the model
# species must be chemical_reaction_network.species objects
def add_species(model, compartment, species, debug=False, initial_concentration=None):
model = model # Get the model where we will store results
# Construct the species name
species_name = repr(species)
# Construct the species ID
species_id = species_sbml_id(species, model.getSBMLDocument())
if debug: print("Adding species", species_name, species_id)
sbml_species = model.createSpecies()
sbml_species.setName(species_name)
sbml_species.setId(species_id)
sbml_species.setCompartment(compartment.getId())
sbml_species.setConstant(False)
sbml_species.setBoundaryCondition(False)
sbml_species.setHasOnlySubstanceUnits(False)
if initial_concentration is None:
initial_concentration = 0
sbml_species.setInitialConcentration(initial_concentration)
return sbml_species
# Helper function to add a parameter to the model
def add_parameter(mixture, name, value=0, debug=False):
model = mixture.model # Get the model where we will store results
# Check to see if this parameter is already present
parameter = find_parameter(mixture, name) # ! TODO: add error checking
if parameter is None:
if debug: print("Adding parameter %s" % name)
parameter = model.createParameter()
all_ids = getAllIds(model.getSBMLDocument().getListOfAllElements())
trans = SetIdFromNames(all_ids)
parameter.setId(trans.getValidIdForName(name)) # ! TODO: add error checking
else:
if debug: print("add_parameter: %s already exists", parameter.getId())
# Set the value of the parameter
parameter.setValue(float(value))
parameter.setConstant(True)
return parameter
# Look for a parameter in the current model
def find_parameter(mixture, id):
model = mixture.model # Get model where parameters are stored
return model.getParameter(id) # ! TODO: add error checking
# Helper function to add a reaction to a model
# reaction must be a chemical_reaction_network.reaction object
#propensity params is a dictionary of the parameters for non-massaction propensities.
def add_reaction(model, inputs, input_coefs, outputs, output_coefs,
reaction_id, k = None, kname = None,
stochastic = False, propensity_type = "massaction",
propensity_params = None, propensity_annotation = True):
# Create the reaction
reaction = model.createReaction()
reaction.setReversible(False)
# reaction.setFast(False) # Deprecated in SBML
all_ids = getAllIds(model.getSBMLDocument().getListOfAllElements())
trans = SetIdFromNames(all_ids)
reaction.setId(trans.getValidIdForName(reaction_id))
reaction.setName(reaction.getId())
ratestring = "" #Stores the string representing the rate function
annotation_dict = {"type":propensity_type}
# Create a kinetic law for the reaction
ratelaw = reaction.createKineticLaw()
#Create Local Propensity Parameters
if propensity_type=="massaction":
if kname is None:
kname = "k"
param = ratelaw.createParameter()
param.setId(kname)
param.setConstant(True)
if k is not None and propensity_params is None:
param.setValue(k)
annotation_dict["k"] = k
elif 'k' in propensity_params:
param.setValue(propensity_params['k'])
annotation_dict["k"] = propensity_params['k']
elif k is not None and "k" in propensity_params and propensity_params['k'] != k:
raise ValueError("Keyword k and propensity_params['k'] have different values. Only one of these arguments is needed or they must match.")
else:
raise ValueError("Massaction propensities require a rate k which can be passed into add_reaction as a keyword k= or inside the propensity_params keyword dictionary.")
ratestring = kname
#Hill Function Propensities
elif propensity_type in ["hillpositive", "hillnegative", "proportionalhillpositive", "proportionalhillnegative"]:
if not ("k" in propensity_params and "K" in propensity_params and "n" in propensity_params):
raise ValueError(propensity_type+" requires the following keys in the propensity_params dictionary: "
"'k':rate constant (float)"
"'n':cooperativity(float), "
"and 'K':dissociationc constant (float).")
param_k = ratelaw.createParameter()
param_k.setId("k")
param_k.setConstant(True)
param_k.setValue(propensity_params['k'])
param_n = ratelaw.createParameter()
param_n.setId("n")
param_n.setConstant(True)
param_n.setValue(propensity_params['n'])
param_K = ratelaw.createParameter()
param_K.setId("K")
param_K.setConstant(True)
param_K.setValue(propensity_params['K'])
ratestring = "k"
annotation_dict["k"] = propensity_params['k']
annotation_dict["K"] = propensity_params['K']
annotation_dict["n"] = propensity_params['n']
elif propensity_type == "general":
raise NotImplementedError("SBML writing of general propensities not implemented")
else:
raise ValueError(propensity_type+" is not a supported propensity_type")
# Create the reactants
for i in range(len(inputs)):
species = str(inputs[i]).replace("'", "")
stoichiometry = input_coefs[i]
# species_id = species_sbml_id(species, model.getSBMLDocument())
# What to do when there are multiple species with same name?
species_id = getSpeciesByName(model,species).getId()
reactant = reaction.createReactant()
reactant.setSpecies(species_id) # ! TODO: add error checking
reactant.setConstant(False)
if stoichiometry is None or stoichiometry is np.nan:
stoichiometry = 1.0
reactant.setStoichiometry(stoichiometry)
#Create Rate-strings for massaction propensities
if propensity_type=="massaction" and stochastic:
for i in range(stoichiometry):
if i > 0:
ratestring += f" * ( {species_id} - {i} )"
else:
ratestring += f" * {species_id}"
elif propensity_type=="massaction" and not stochastic:
if stoichiometry > 1:
ratestring += f" * {species_id}^{stoichiometry}"
else:
ratestring += f" * {species_id}"
#Create ratestring for non-massaction propensities
if propensity_type == "hillpositive":
if not ("s1" in propensity_params):
raise ValueError("hillpositive propensities, p(s1; k, K, n) "
"= k*s1^n/(s1^n + K), require the following key in the propensity_params dictionary:"
"'s1':species (chemical_reaction_network.species)")
s = str(propensity_params['s1']).replace("'", "")
s_species_id = getSpeciesByName(model,s).getId()
ratestring+=f"*{s_species_id}^n/({s_species_id}^n+K)"
annotation_dict["s1"] = s_species_id
elif propensity_type == "hillnegative":
if not ("s1" in propensity_params):
raise ValueError("hillnegative propensities, "
"p(s1; k, K, n) = k*1/(s1^n + K), require the following key in the propensity_params dictionary:"
"'s1':species (chemical_reaction_network.species)")
s = str(propensity_params['s1']).replace("'", "")
s_species_id = getSpeciesByName(model,s).getId()
ratestring+=f"/({s_species_id}^n+K)"
annotation_dict["s1"] = s_species_id
elif propensity_type == "proportionalhillpositive":
if not ("s1" in propensity_params and "d" in propensity_params):
raise ValueError("proportionalhillpositive propensities, "
"p(s1, d; k, K, n) = k*d*s1^n/(s1^n + K), require the following key in the propensity_params dictionary:"
"'s1':species (chemical_reaction_network.species)"
"'d':species (chemical_reaction_network.species), ")
s = str(propensity_params['s1']).replace("'", "")
d = str(propensity_params['d']).replace("'", "")
s_species_id = getSpeciesByName(model,s).getId()
d_species_id = getSpeciesByName(model,d).getId()
ratestring+=f"*{d_species_id}*{s_species_id}^n/({s_species_id}^n + K)"
annotation_dict["s1"] = s_species_id
annotation_dict["d"] = d_species_id
elif propensity_type == "proportionalhillnegative":
if not ("s1" in propensity_params and "d" in propensity_params):
raise ValueError("proportionalhillnegative propensities, "
"p(s1, d; k, K, n) = k*d/(s1^n + K), require the following key in the propensity_params dictionary:"
"'s1':species (chemical_reaction_network.species)"
"'d':species (chemical_reaction_network.species), ")
s = str(propensity_params['s1']).replace("'", "")
d = str(propensity_params['d']).replace("'", "")
s_species_id = getSpeciesByName(model,s).getId()
d_species_id = getSpeciesByName(model,d).getId()
ratestring+=f"*{d_species_id}/({s_species_id}^n+K)"
annotation_dict["s1"] = s_species_id
annotation_dict["d"] = d_species_id
elif propensity_type == "general":
raise NotImplementedError("General propensity SBML Writing Not Implemented")
# Create the products
for i in range(len(outputs)):
species = str(outputs[i]).replace("'", "")
stoichiometry = output_coefs[i]
product = reaction.createProduct()
species_id = getSpeciesByName(model, species).getId()
product.setSpecies(species_id)
if stoichiometry is None or stoichiometry is np.nan:
stoichiometry = 1.0
product.setStoichiometry(stoichiometry)
product.setConstant(False)
# Set the ratelaw to the ratestring
math_ast = libsbml.parseL3Formula(ratestring)
ratelaw.setMath(math_ast)
if propensity_annotation:
annotation_string = "<PropensityType>"
for k in annotation_dict:
annotation_string += " "+k + "=" + str(annotation_dict[k])
annotation_string += "</PropensityType>"
reaction.appendAnnotation(annotation_string)
return reaction
# !/usr/bin/env python
##
## @file setIdFromNames.py
## @brief Utility program, renaming all SIds that also has
## names specified. The new id will be derived from
## the name, with all invalid characters removed.
##
## @author Frank T. Bergmann
##
##
## <!--------------------------------------------------------------------------
## This sample program is distributed under a different license than the rest
## of libSBML. This program uses the open-source MIT license, as follows:
##
## Copyright (c) 2013-2017 by the California Institute of Technology
## (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
## and the University of Heidelberg (Germany), with support from the National
## Institutes of Health (USA) under grant R01GM070923. All rights reserved.
##
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
## THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
## DEALINGS IN THE SOFTWARE.
##
## Neither the name of the California Institute of Technology (Caltech), nor
## of the European Bioinformatics Institute (EMBL-EBI), nor of the University
## of Heidelberg, nor the names of any contributors, may be used to endorse
## or promote products derived from this software without specific prior
## written permission.
## ------------------------------------------------------------------------ -->
##
##
import sys
import os.path
import time
# This class implements an identifier transformer, that means it can be used
# to rename all sbase elements.
class SetIdFromNames(libsbml.IdentifierTransformer):
def __init__(self, ids):
# call the constructor of the base class
libsbml.IdentifierTransformer.__init__(self)
# remember existing ids ...
self.existingIds = ids
# The function actually doing the transforming. This function is called
# once for each SBase element in the model.
def transform(self, element):
# return in case we don't have a valid element
if (element is None \
or element.getTypeCode() == libsbml.SBML_LOCAL_PARAMETER):
return libsbml.LIBSBML_OPERATION_SUCCESS
# or if there is nothing to do
if (element.isSetName() == False \
or element.getId() == element.getName()):
return libsbml.LIBSBML_OPERATION_SUCCESS
# find the new id
newId = self.getValidIdForName(element.getName())
# set it
element.setId(newId)
# remember it
self.existingIds.append(newId)
return libsbml.LIBSBML_OPERATION_SUCCESS
def nameToSbmlId(self, name):
IdStream = []
count = 0
end = len(name)
if '0' <= name[count] and name[count] <= '9':
IdStream.append('x_')
if '*' in name:
IdStream.append('xx')
for count in range(0, end):
if (('0' <= name[count] and name[count] <= '9') or
('a' <= name[count] and name[count] <= 'z') or
('A' <= name[count] and name[count] <= 'Z')):
IdStream.append(name[count])
else:
IdStream.append('_')
Id = ''.join(IdStream)
if (Id[len(Id) - 1] != '_'):
return Id
return Id[:-1]
#
# Generates the id out of the name, and ensures it is unique.
# It does so by appending numbers to the original name.
#
def getValidIdForName(self, name):
baseString = self.nameToSbmlId(name)
id = baseString
count = 1
while (self.existingIds.count(id) != 0):
id = "{0}_{1}".format(baseString, count)
count = count + 1
return id
# #
# # Returns a list of all ids from the given list of elements
# #
def getAllIds(allElements):
result = []
if allElements is None or allElements.getSize() == 0:
return result
for i in range(0, allElements.getSize()):
current = allElements.get(i)
if (current.isSetId() \
and current.getTypeCode() != libsbml.SBML_LOCAL_PARAMETER):
result.append(current.getId())
return result
def getSpeciesByName(model, name, compartment=''):
'''
Returns a list of species in the Model with the given name
compartment : (Optional) argument to specify the compartment name in which
to look for the species.
'''
if type(name) is not str:
raise ValueError('"name" must be a string.')
species_found = []
for species in model.getListOfSpecies():
if species.getName() == name:
if compartment != '':
comp_elem = species.getCompartment()
comp_name = model.getElementBySId(comp_elem).getName()
if comp_name == compartment:
species_found.append(species)
else:
continue
else:
species_found.append(species)
if len(species_found) == 1:
return species_found[0]
elif not species_found:
raise ValueError('The species ' + name + ' not found.')
else:
warn('Multiple species with name ' + name + ' found. Returning a list')
return species_found
| 39.892116 | 178 | 0.657427 |
ecdc51e665338e5a1840e8806bd42423b4d4f3f8 | 836 | py | Python | website/drawquest/management/commands/reset_quest_scores.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 19 | 2015-11-10T17:36:20.000Z | 2021-04-12T07:36:00.000Z | website/drawquest/management/commands/reset_quest_scores.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 1 | 2021-06-09T03:45:34.000Z | 2021-06-09T03:45:34.000Z | website/drawquest/management/commands/reset_quest_scores.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 6 | 2015-11-11T00:38:38.000Z | 2020-07-25T20:10:08.000Z | from datetime import datetime, timedelta
from time import time
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from drawquest.apps.quest_comments.models import QuestComment
from drawquest.apps.quests.models import Quest
from drawquest.apps.quests.top import top_quests_buffer
class Command(BaseCommand):
args = ''
help = 'Update quest scores for the top quests view.'
def handle(self, *args, **options):
start = time()
updates = 0
for quest_id in Quest.objects.all().values_list('id', flat=True):
updates += 1
quest = Quest.objects.get(id=quest_id)
print quest.title
quest.update_score()
print "Scores updated. Rows updated: %s Total elapsed time: %0.2fs" % (updates, (time() - start))
| 28.827586 | 105 | 0.684211 |
0e0b59bcde17fb7219b78daca454cff2209b055f | 7,433 | py | Python | savu/core/transports/dosna_transport.py | hir12111/Savu | 172246f1afacb4c230960ca44a60901b853de16b | [
"Apache-2.0"
] | null | null | null | savu/core/transports/dosna_transport.py | hir12111/Savu | 172246f1afacb4c230960ca44a60901b853de16b | [
"Apache-2.0"
] | null | null | null | savu/core/transports/dosna_transport.py | hir12111/Savu | 172246f1afacb4c230960ca44a60901b853de16b | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: dosna_transport
:platform: Unix
:synopsis: Transports data using DosNa(which provides several storing \
backends) at the beginning and end of the process list respectively.
.. moduleauthor:: Emilio Perez Juarez <scientificsoftware@diamond.ac.uk>
"""
import logging
from savu.core.transport_setup import MPI_setup
from savu.core.transports.base_transport import BaseTransport
from savu.core.transports.hdf5_transport import Hdf5Transport
from savu.data.chunking import Chunking
from savu.plugins.savers.utils.hdf5_utils import Hdf5Utils
import dosna as dn
log = logging.getLogger(__name__)
DEFAULT_CONNECTION = "savu-data"
DEFAULT_BACKEND = "ceph"
DEFAULT_ENGINE = "mpi"
class DosnaTransport(BaseTransport):
"""Transport implementation to use DosNa for managing storage and
chunking"""
def __init__(self):
super(DosnaTransport, self).__init__()
self.dosna_connection = None
self.global_data = True
self.h5trans = Hdf5Transport()
self.data_flow = None
self.count = 0
self.hdf5 = None
self.hdf5_flag = True
self.files = []
self.final_dict = None
self.dataset_cache = []
self.n_plugins = 0
def _transport_initialise(self, options):
MPI_setup(options)
backend = options.get("dosna_backend") or DEFAULT_BACKEND
engine = options.get("dosna_engine") or DEFAULT_ENGINE
dosna_connection_name = options.get("dosna_connection") \
or DEFAULT_CONNECTION
dosna_connection_options = options.get("dosna_connection_options")
dosna_options = {}
dosna_options.update(dict(item.split('=')
for item in dosna_connection_options))
log.debug("DosNa is using backend %s engine %s and options %s",
backend, engine, dosna_options)
dn.use(engine, backend)
self.dosna_connection = dn.Connection(dosna_connection_name,
**dosna_options)
self.dosna_connection.connect()
# initially reading from a hdf5 file so Hdf5TransportData will be used
# for all datasets created in a loader
options['transport'] = 'hdf5'
def _transport_update_plugin_list(self):
plugin_list = self.exp.meta_data.plugin_list
saver_idx = plugin_list._get_savers_index()
remove = []
# check the saver plugin and turn off if it is hdf5
for idx in saver_idx:
if plugin_list.plugin_list[idx]['name'] == 'Hdf5Saver':
remove.append(idx)
for idx in sorted(remove, reverse=True):
plugin_list._remove(idx)
def _transport_pre_plugin_list_run(self):
# loaders have completed now revert back to DosnaTransport, so any
# output datasets created by a plugin will use this.
self.hdf5 = Hdf5Utils(self.exp)
exp_coll = self.exp._get_experiment_collection()
self.data_flow = self.exp.meta_data.plugin_list._get_dataset_flow()
self.exp.meta_data.set('transport', 'dosna')
plist = self.exp.meta_data.plugin_list
self.n_plugins = plist._get_n_processing_plugins()
self.final_dict = plist.plugin_list[-1]
for plugin_index in range(self.n_plugins):
self.exp._set_experiment_for_current_plugin(plugin_index)
self.files.append(
self._get_filenames(exp_coll['plugin_dict'][plugin_index]))
self._set_file_details(self.files[plugin_index])
self._setup_dosna_objects() # creates the dosna objects
def _transport_post_plugin_list_run(self):
if not self.dosna_connection:
return
for dataset in self.dataset_cache:
self.dosna_connection.del_dataset(dataset.name)
self.dataset_cache = []
self.dosna_connection.disconnect()
self.dosna_connection = None
def _transport_terminate_dataset(self, data):
if self.exp.meta_data.get('transport') == "hdf5":
self.hdf5._close_file(data)
@staticmethod
def _extract_digits(data):
result = []
for char in data:
if ord(char) in range(ord('0'), ord('9') + 1):
result.append(char)
return "".join(result)
def _create_dosna_dataset(self, object_id, data, key, current_and_next):
group_name = self.exp.meta_data.get(["group_name", key])
data.data_info.set('group_name', group_name)
try:
group_name = group_name + '_' + data.name
except AttributeError:
pass
shape = data.get_shape()
dataset_name = "{}_{}".format(group_name,
self._extract_digits(object_id))
if current_and_next is 0:
data.data = self.dosna_connection.create_dataset(dataset_name,
shape,
data.dtype)
else:
chunking = Chunking(self.exp, current_and_next)
chunks = chunking._calculate_chunking(shape, data.dtype)
data.data = self.dosna_connection.create_dataset(dataset_name,
shape,
data.dtype,
chunk_size=chunks)
self.dataset_cache.append(data.data)
def _setup_dosna_objects(self):
out_data_dict = self.exp.index["out_data"]
current_and_next = [0]*len(out_data_dict)
if 'current_and_next' in self.exp.meta_data.get_dictionary():
current_and_next = self.exp.meta_data.get('current_and_next')
count = 0
for key in out_data_dict.keys():
out_data = out_data_dict[key]
filename = self.exp.meta_data.get(["filename", key])
self._create_dosna_dataset(filename, out_data, key,
current_and_next[count])
count += 1
def _transport_pre_plugin(self):
if self.count == self.n_plugins - 1:
self.__set_hdf5_transport()
def _transport_post_plugin(self):
if self.count == self.n_plugins - 2:
self.exp.meta_data.set('transport', 'hdf5')
elif self.count == self.n_plugins - 1: # final plugin
self.h5trans.exp = self.exp
self.h5trans.hdf5 = Hdf5Utils(self.exp)
self.h5trans._transport_post_plugin()
self.count += 1
def __set_hdf5_transport(self):
self.hdf5_flag = True
self.exp.meta_data.set('transport', 'hdf5')
files = self._get_filenames(self.final_dict)
self._set_file_details(files)
self._setup_h5_files()
| 38.713542 | 79 | 0.628548 |
9b675cf1d5db0301a15f13b493bb22f2bde6233d | 2,762 | py | Python | post_office/settings.py | ilikerobots/django-post_office | efe6e9fced5ccb6fbdb00a796f07a60ece945bd4 | [
"MIT"
] | null | null | null | post_office/settings.py | ilikerobots/django-post_office | efe6e9fced5ccb6fbdb00a796f07a60ece945bd4 | [
"MIT"
] | null | null | null | post_office/settings.py | ilikerobots/django-post_office | efe6e9fced5ccb6fbdb00a796f07a60ece945bd4 | [
"MIT"
] | null | null | null | import warnings
from django.conf import settings
from django.core.cache.backends.base import InvalidCacheBackendError
from django.template import engines as template_engines
from .compat import import_attribute, get_cache
def get_backend(alias='default'):
return get_available_backends()[alias]
def get_available_backends():
""" Returns a dictionary of defined backend classes. For example:
{
'default': 'django.core.mail.backends.smtp.EmailBackend',
'locmem': 'django.core.mail.backends.locmem.EmailBackend',
}
"""
backends = get_config().get('BACKENDS', {})
if backends:
return backends
# Try to get backend settings from old style
# POST_OFFICE = {
# 'EMAIL_BACKEND': 'mybackend'
# }
backend = get_config().get('EMAIL_BACKEND')
if backend:
warnings.warn('Please use the new POST_OFFICE["BACKENDS"] settings',
DeprecationWarning)
backends['default'] = backend
return backends
# Fall back to Django's EMAIL_BACKEND definition
backends['default'] = getattr(
settings, 'EMAIL_BACKEND',
'django.core.mail.backends.smtp.EmailBackend')
# If EMAIL_BACKEND is set to use PostOfficeBackend
# and POST_OFFICE_BACKEND is not set, fall back to SMTP
if 'post_office.EmailBackend' in backends['default']:
backends['default'] = 'django.core.mail.backends.smtp.EmailBackend'
return backends
def get_cache_backend():
if hasattr(settings, 'CACHES'):
if "post_office" in settings.CACHES:
return get_cache("post_office")
else:
# Sometimes this raises InvalidCacheBackendError, which is ok too
try:
return get_cache("default")
except InvalidCacheBackendError:
pass
return None
def get_config():
"""
Returns Post Office's configuration in dictionary format. e.g:
POST_OFFICE = {
'BATCH_SIZE': 1000
}
"""
return getattr(settings, 'POST_OFFICE', {})
def get_batch_size():
return get_config().get('BATCH_SIZE', 100)
def get_threads_per_process():
return get_config().get('THREADS_PER_PROCESS', 5)
def get_default_priority():
return get_config().get('DEFAULT_PRIORITY', 'medium')
def get_log_level():
return get_config().get('LOG_LEVEL', 2)
def get_sending_order():
return get_config().get('SENDING_ORDER', ['-priority'])
def get_template_engine():
using = get_config().get('TEMPLATE_ENGINE', 'django')
return template_engines[using]
CONTEXT_FIELD_CLASS = get_config().get('CONTEXT_FIELD_CLASS',
'jsonfield.JSONField')
context_field_class = import_attribute(CONTEXT_FIELD_CLASS)
| 27.078431 | 77 | 0.668718 |
72d62c6424e10435c726bc2442e3d702901b7b8a | 4,455 | py | Python | benchmark/startQiskit_noisy1624.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy1624.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy1624.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=56
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[1],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.z(input_qubit[1]) # number=37
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.h(input_qubit[4]) # number=21
prog.x(input_qubit[2]) # number=39
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=48
prog.cz(input_qubit[3],input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=50
prog.z(input_qubit[3]) # number=46
prog.cx(input_qubit[3],input_qubit[0]) # number=47
prog.x(input_qubit[4]) # number=40
prog.cx(input_qubit[3],input_qubit[0]) # number=35
prog.x(input_qubit[0]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.x(input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=44
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.cx(input_qubit[4],input_qubit[3]) # number=54
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.y(input_qubit[1]) # number=32
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[4],input_qubit[3]) # number=55
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1624.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.05036 | 82 | 0.617508 |
a1f2e9eb943fc76f9b2e20cbb4a9013b8e222c9d | 8,397 | py | Python | myven/lib/python3.8/site-packages/ansible/modules/monitoring/zabbix/zabbix_hostmacro.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2021-04-02T08:08:39.000Z | 2021-04-02T08:08:39.000Z | myven/lib/python3.8/site-packages/ansible/modules/monitoring/zabbix/zabbix_hostmacro.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | null | null | null | myven/lib/python3.8/site-packages/ansible/modules/monitoring/zabbix/zabbix_hostmacro.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2020-05-03T01:13:16.000Z | 2020-05-03T01:13:16.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_hostmacro
short_description: Zabbix host macro creates/updates/deletes
description:
- manages Zabbix host macros, it can create, update or delete them.
version_added: "2.0"
author:
- "(@cave)"
- Dean Hailin Song
requirements:
- "python >= 2.6"
- zabbix-api
options:
host_name:
description:
- Name of the host.
required: true
macro_name:
description:
- Name of the host macro.
required: true
macro_value:
description:
- Value of the host macro.
required: true
state:
description:
- State of the macro.
- On C(present), it will create if macro does not exist or update the macro if the associated data is different.
- On C(absent) will remove a macro if it exists.
required: false
choices: ['present', 'absent']
default: "present"
force:
description:
- Only updates an existing macro if set to C(yes).
default: 'yes'
choices: ['yes', 'no']
version_added: 2.5
extends_documentation_fragment:
- zabbix
'''
EXAMPLES = '''
- name: Create a new host macro or update an existing macro's value
local_action:
module: zabbix_hostmacro
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
macro_name: Example macro
macro_value: Example value
state: present
'''
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far).
class ZabbixAPIExtends(ZabbixAPI):
def __init__(self, server, timeout, user, passwd, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
from ansible.module_utils.basic import AnsibleModule
class HostMacro(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# get host id by host name
def get_host_id(self, host_name):
try:
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': host_name}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
host_id = host_list[0]['hostid']
return host_id
except Exception as e:
self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e))
# get host macro
def get_host_macro(self, macro_name, host_id):
try:
host_macro_list = self._zapi.usermacro.get(
{"output": "extend", "selectSteps": "extend", 'hostids': [host_id], 'filter': {'macro': '{$' + macro_name + '}'}})
if len(host_macro_list) > 0:
return host_macro_list[0]
return None
except Exception as e:
self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e))
# create host macro
def create_host_macro(self, macro_name, macro_value, host_id):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value})
self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name)
except Exception as e:
self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e))
# update host macro
def update_host_macro(self, host_macro_obj, macro_name, macro_value):
host_macro_id = host_macro_obj['hostmacroid']
if host_macro_obj['macro'] == '{$' + macro_name + '}' and host_macro_obj['value'] == macro_value:
self._module.exit_json(changed=False, result="Host macro %s already up to date" % macro_name)
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value})
self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name)
except Exception as e:
self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e))
# delete host macro
def delete_host_macro(self, host_macro_obj, macro_name):
host_macro_id = host_macro_obj['hostmacroid']
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.usermacro.delete([host_macro_id])
self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name)
except Exception as e:
self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e))
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
host_name=dict(type='str', required=True),
macro_name=dict(type='str', required=True),
macro_value=dict(type='str', required=True),
state=dict(default="present", choices=['present', 'absent']),
timeout=dict(type='int', default=10),
force=dict(type='bool', default=True)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
host_name = module.params['host_name']
macro_name = (module.params['macro_name']).upper()
macro_value = module.params['macro_value']
state = module.params['state']
timeout = module.params['timeout']
force = module.params['force']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host_macro_class_obj = HostMacro(module, zbx)
if host_name:
host_id = host_macro_class_obj.get_host_id(host_name)
host_macro_obj = host_macro_class_obj.get_host_macro(macro_name, host_id)
if state == 'absent':
if not host_macro_obj:
module.exit_json(changed=False, msg="Host Macro %s does not exist" % macro_name)
else:
# delete a macro
host_macro_class_obj.delete_host_macro(host_macro_obj, macro_name)
else:
if not host_macro_obj:
# create host macro
host_macro_class_obj.create_host_macro(macro_name, macro_value, host_id)
elif force:
# update host macro
host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value)
else:
module.exit_json(changed=False, result="Host macro %s already exists and force is set to no" % macro_name)
if __name__ == '__main__':
main()
| 37.824324 | 130 | 0.637251 |
2a1460588ec2274dba4956635e0a46dcd215a2f9 | 5,593 | py | Python | models/Deep-learning/dataset/feature_add3_pos_flag_floats_ddg_dataset.py | jingyi7777/CasRx_guide_efficiency | c9e900e4c4a73215f09852bd621b30e8dcb039e8 | [
"MIT"
] | null | null | null | models/Deep-learning/dataset/feature_add3_pos_flag_floats_ddg_dataset.py | jingyi7777/CasRx_guide_efficiency | c9e900e4c4a73215f09852bd621b30e8dcb039e8 | [
"MIT"
] | null | null | null | models/Deep-learning/dataset/feature_add3_pos_flag_floats_ddg_dataset.py | jingyi7777/CasRx_guide_efficiency | c9e900e4c4a73215f09852bd621b30e8dcb039e8 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
import random
#from dataset.dataset_filtered_utils import *
from dataset.dataset_utils import *
#from dataset.dataset_utils import normalize
def feature_add3_pos_flag_floats_ddg_dataset(args):
dataframe = pd.read_csv('dataset/integrated_guide_feature_filtered_f24_mismatch3_all_flanks.csv')
genes_filter_1 = ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B', 'RPL31',
'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H', 'EEF2', 'RPS11',
'SNRPD2', 'RPL37', 'SF3B3', 'DDX51', 'RPL7', 'RPS9', 'KARS',
'SF3A1', 'RPL32', 'PSMB2', 'RPS7', 'EIF4A3', 'U2AF1', 'PSMA1',
'PHB', 'POLR2D', 'RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2',
'SUPT5H', 'RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1',
'RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L', 'RPL5',
'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']
dataframe = dataframe[dataframe['gene'].isin(genes_filter_1)] #filter out 1 gene
num_examples = len(dataframe['gene'].values)
#lin_seq_dict, lin_result_dict = parse_guide_linearfold_fasta_into_dict()
lin_seq_dict, lin_result_dict = parse_guide_linearfold_fasta_into_dict_contrafold()
encoded_guides = [one_hot_encode_sequence(guide) for guide in dataframe['guide'].values]
encoded_linearfold = [one_hot_encode_linearfold(lin_seq_dict[guide], remove_universal_start=True) for guide in
dataframe['guide'].values]
linearfold_dr = [lin_seq_dict[guide][0:36] for guide in dataframe['guide'].values]
ref_dr = '.....(((((((.(((....))).))))))).....'
dr_disr_num =0
for jj in range(num_examples):
if linearfold_dr[jj] == ref_dr:
linearfold_dr[jj] = 0
else:
linearfold_dr[jj] = 1
dr_disr_num += 1
print('dr_disr_num:'+str(dr_disr_num))
linearfold_vals = [lin_result_dict[guide] for guide in dataframe['guide'].values]
for ii in range(num_examples):
linearfold_vals[ii] = abs(linearfold_vals[ii]-6.48)
#rnafe = dataframe['contrafold_2'].values
#rnafe_rel = rnafe - rnafe.min()
# encoded_guides = [reverse_complement_encoding(guide) for guide in dataframe['guide'].values]
#target with nearby seq, dg of native and unfolded
flank_l = int(args.flanklength)
lin_seq_flanks_dict, lin_result_flanks_dict = parse_target_flanks_linearfold_fasta_into_dict_contrafold(flank_len = flank_l)
linearfold_vals_target = [lin_result_flanks_dict[target_flanks] for target_flanks in dataframe['nearby_seq_all_'+str(flank_l)].values] #native energy
#lin_seq_flanks = [lin_seq_flanks_dict[target_flanks] for target_flanks in dataframe['nearby_seq_all_100'].values]
unfold_lin_seq_flanks_dict, unfold_lin_result_flanks_dict = parse_target_flanks_constraints_linearfold_fasta_into_dict_contrafold(flank_len = flank_l)
unfold_linearfold_vals_target = [unfold_lin_result_flanks_dict[target_flanks] for target_flanks in dataframe['nearby_seq_all_'+str(flank_l)].values] #unfolded target energy
ddg = [] #energy required to unfold the guide binding region
for jj in range(num_examples):
ddg.append((linearfold_vals_target[jj]-unfold_linearfold_vals_target[jj]))
#classes = 1 - dataframe['old_binary_relative_ratio_gene20'].values
#classes = dataframe['binary_relative_ratio'].values
classes = dataframe['binary_relative_ratio_075f'].values
outputs = dataframe['relative_ratio'].values if args.regression else classes.astype(np.float32)
other_single_value_inputs = np.empty((7, num_examples))
#other_single_value_inputs[0, :] = linearfold_vals
#other_single_value_inputs[1, :] = linearfold_dr
other_single_value_inputs[0, :] = dataframe['is_5UTR'].values
other_single_value_inputs[1, :] = dataframe['is_CDS'].values
other_single_value_inputs[2, :] = dataframe['is_3UTR'].values
#other_single_value_inputs[4, :] = dataframe['refseq_target_transcript_percent'].values
other_single_value_inputs[3, :] = dataframe['UTR5_position'].values
other_single_value_inputs[4, :] = dataframe['CDS_position'].values
other_single_value_inputs[5, :] = dataframe['UTR3_position'].values
other_single_value_inputs[6, :] = ddg
all_cols = [encoded_guides,
normalize(other_single_value_inputs.T),
# classes,
outputs
]
# tr, val, te = create_gene_splits(dataframe['gene'].values, all_cols)
#tr, val, te = create_gene_splits_kfold(dataframe['gene'].values, all_cols, 0)
if args.kfold == None:
tr, val, te = create_gene_splits(dataframe['gene'].values, all_cols)
else:
#tr, val, te = create_gene_splits_kfold(dataframe['gene'].values, all_cols, args.kfold, args.split)
tr, val, te = create_gene_splits_filter1_kfold(dataframe['gene'].values, all_cols, args.kfold, args.split)
tr_out = tr[-1]
tr = tuple(tr[:-1])
val_out = val[-1]
val = tuple(val[:-1])
te_out = te[-1]
te = tuple(te[:-1])
train_dataset = tf.data.Dataset.from_tensor_slices((tr, tr_out))
val_dataset = tf.data.Dataset.from_tensor_slices((val, val_out))
test_dataset = tf.data.Dataset.from_tensor_slices((te, te_out))
# shuffle and batch
train_dataset = prep_dataset(train_dataset, batch_size=128)
val_dataset = prep_dataset(val_dataset, batch_size=128)
test_dataset = prep_dataset(test_dataset, batch_size=128)
return train_dataset, val_dataset, test_dataset
| 47.803419 | 176 | 0.703737 |
f71e6662c54d213d3019b65fbc96d3cbbbbdac09 | 6,350 | py | Python | google/cloud/vision_v1p2beta1/services/image_annotator/transports/base.py | dylancaponi/python-vision | f94fb5b03bf8932e75967249292d23fed2ae2213 | [
"Apache-2.0"
] | null | null | null | google/cloud/vision_v1p2beta1/services/image_annotator/transports/base.py | dylancaponi/python-vision | f94fb5b03bf8932e75967249292d23fed2ae2213 | [
"Apache-2.0"
] | 1 | 2021-02-23T12:41:14.000Z | 2021-02-23T12:41:14.000Z | google/cloud/vision_v1p2beta1/services/image_annotator/transports/base.py | dylancaponi/python-vision | f94fb5b03bf8932e75967249292d23fed2ae2213 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.vision_v1p2beta1.types import image_annotator
from google.longrunning import operations_pb2 as operations # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-vision",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ImageAnnotatorTransport(abc.ABC):
"""Abstract transport class for ImageAnnotator."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
)
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.batch_annotate_images: gapic_v1.method.wrap_method(
self.batch_annotate_images,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
),
),
default_timeout=600.0,
client_info=client_info,
),
self.async_batch_annotate_files: gapic_v1.method.wrap_method(
self.async_batch_annotate_files,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
),
),
default_timeout=600.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def batch_annotate_images(
self,
) -> typing.Callable[
[image_annotator.BatchAnnotateImagesRequest],
typing.Union[
image_annotator.BatchAnnotateImagesResponse,
typing.Awaitable[image_annotator.BatchAnnotateImagesResponse],
],
]:
raise NotImplementedError()
@property
def async_batch_annotate_files(
self,
) -> typing.Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
__all__ = ("ImageAnnotatorTransport",)
| 37.797619 | 85 | 0.639685 |
7eb57dfbdc0e941164756f4527529055eb231abf | 4,232 | py | Python | openhab_creator/models/configuration/location/__init__.py | DerOetzi/openhab_creator | 197876df5aae84192c34418f6b9a7cfcee23b195 | [
"MIT"
] | 1 | 2021-11-16T22:48:26.000Z | 2021-11-16T22:48:26.000Z | openhab_creator/models/configuration/location/__init__.py | DerOetzi/openhab_creator | 197876df5aae84192c34418f6b9a7cfcee23b195 | [
"MIT"
] | null | null | null | openhab_creator/models/configuration/location/__init__.py | DerOetzi/openhab_creator | 197876df5aae84192c34418f6b9a7cfcee23b195 | [
"MIT"
] | null | null | null | from __future__ import annotations
from abc import abstractmethod
from importlib import import_module
from typing import TYPE_CHECKING, Dict, Final, List, Optional, Type
from openhab_creator.exception import RegistryException
from openhab_creator.models.configuration.baseobject import BaseObject
from openhab_creator.models.configuration.equipment import EquipmentType
if TYPE_CHECKING:
from openhab_creator.models.configuration import Configuration
from openhab_creator.models.common import Scene
from openhab_creator.models.configuration.equipment import Equipment
class Location(BaseObject):
def __init__(self,
configuration: Configuration,
name: str,
identifier: Optional[str] = None,
equipment: Optional[List[Dict]] = None):
super().__init__(name, identifier)
self.is_timecontrolled: bool = False
self._init_equipment(
configuration, [] if equipment is None else equipment)
self.parent = None
def _init_equipment(self, configuration: Configuration, equipment: List[Dict]) -> None:
self.equipment: List[Equipment] = []
for equipment_definition in equipment:
equipment = EquipmentType.new(configuration=configuration,
location=self,
**equipment_definition)
self.is_timecontrolled = self.is_timecontrolled or equipment.is_timecontrolled
self.equipment.append(equipment)
@property
@abstractmethod
def area(self) -> str:
pass
@property
@abstractmethod
def typed(self) -> str:
pass
@property
def has_parent(self) -> bool:
return self.parent is not None
@property
def toplevel(self) -> Location:
location = self
while location.has_parent:
location = location.parent
return location
@property
def location_tags(self) -> Dict[str, str]:
tags = {'area': self.area, self.typed.lower(): self.name}
if self.has_parent:
tags = {**tags, **self.parent.location_tags}
return tags
@property
def autoactive_id(self) -> str:
return f'autoActive{self.identifier}'
@property
def autoguest_id(self) -> str:
return f'autoGuest{self.identifier}'
@property
def autoweekend_id(self) -> str:
return f'autoWeekend{self.identifier}'
@property
def autoequipment(self) -> str:
return f'autoEquipment{self.identifier}'
def sceneassignment_id(self, scene: Scene) -> str:
return f'{scene.assignment_id}_{self.identifier}'
def __str__(self) -> str:
return self.identifier
class LocationFactory():
registry: Dict[str, Type['Location']] = {}
initialized: bool = False
@classmethod
def _init(cls):
if not cls.initialized:
import_module(
'openhab_creator.models.configuration.location.indoor')
import_module(
'openhab_creator.models.configuration.location.indoor.floors')
import_module(
'openhab_creator.models.configuration.location.indoor.rooms')
import_module(
'openhab_creator.models.configuration.location.indoor.buildings')
import_module(
'openhab_creator.models.configuration.location.outdoors')
cls.initialized = True
@classmethod
def register(cls, location_cls: Type[Location]) -> None:
cls.registry[location_cls.__name__.lower()] = location_cls
@classmethod
def new(cls, configuration: Configuration, **args: Dict) -> Location:
cls._init()
location_type = args.pop('typed').lower()
if location_type in cls.registry:
return cls.registry[location_type.lower()](configuration=configuration, **args)
raise RegistryException(f'No class for location type: {location_type}')
class LocationType():
def __init__(self, *args):
self.args = args
def __call__(self, location_cls: Type[Location]):
LocationFactory.register(location_cls)
return location_cls
| 30.228571 | 91 | 0.649811 |
85cae6df0301c65c4ac94acc2ffb851bbfc29501 | 9,724 | py | Python | mediagoblin/gmg_commands/reprocess.py | saksham1115/mediagoblin | 41302ad2b622b340caeb13339338ab3a5d0f7e6b | [
"CC0-1.0"
] | 60 | 2015-01-17T01:19:47.000Z | 2021-09-17T01:25:47.000Z | mediagoblin/gmg_commands/reprocess.py | saksham1115/mediagoblin | 41302ad2b622b340caeb13339338ab3a5d0f7e6b | [
"CC0-1.0"
] | 12 | 2015-02-03T09:14:42.000Z | 2020-12-04T12:18:03.000Z | mediagoblin/gmg_commands/reprocess.py | saksham1115/mediagoblin | 41302ad2b622b340caeb13339338ab3a5d0f7e6b | [
"CC0-1.0"
] | 23 | 2015-08-18T01:32:50.000Z | 2021-09-05T23:22:55.000Z | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import argparse
import os
from mediagoblin import mg_globals
from mediagoblin.db.models import MediaEntry
from mediagoblin.gmg_commands import util as commands_util
from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.processing import (
ProcessorDoesNotExist, ProcessorNotEligible,
get_entry_and_processing_manager, get_processing_manager_for_type,
ProcessingManagerDoesNotExist)
def reprocess_parser_setup(subparser):
subparser.add_argument(
'--celery',
action='store_true',
help="Don't process eagerly, pass off to celery")
subparsers = subparser.add_subparsers(dest="reprocess_subcommand")
###################
# available command
###################
available_parser = subparsers.add_parser(
"available",
help="Find out what actions are available for this media")
available_parser.add_argument(
"id_or_type",
help="Media id or media type to check")
available_parser.add_argument(
"--action-help",
action="store_true",
help="List argument help for each action available")
available_parser.add_argument(
"--state",
help="The state of media you would like to reprocess")
#############
# run command
#############
run_parser = subparsers.add_parser(
"run",
help="Run a reprocessing on one or more media")
run_parser.add_argument(
'media_id',
help="The media_entry id(s) you wish to reprocess.")
run_parser.add_argument(
'reprocess_command',
help="The reprocess command you intend to run")
run_parser.add_argument(
'reprocess_args',
nargs=argparse.REMAINDER,
help="rest of arguments to the reprocessing tool")
################
# thumbs command
################
thumbs = subparsers.add_parser(
'thumbs',
help='Regenerate thumbs for all processed media')
thumbs.add_argument(
'--size',
nargs=2,
type=int,
metavar=('max_width', 'max_height'))
#################
# initial command
#################
subparsers.add_parser(
'initial',
help='Reprocess all failed media')
##################
# bulk_run command
##################
bulk_run_parser = subparsers.add_parser(
'bulk_run',
help='Run reprocessing on a given media type or state')
bulk_run_parser.add_argument(
'type',
help='The type of media you would like to process')
bulk_run_parser.add_argument(
'--state',
default='processed',
nargs='?',
help='The state of the media you would like to process. Defaults to' \
" 'processed'")
bulk_run_parser.add_argument(
'reprocess_command',
help='The reprocess command you intend to run')
bulk_run_parser.add_argument(
'reprocess_args',
nargs=argparse.REMAINDER,
help='The rest of the arguments to the reprocessing tool')
###############
# help command?
###############
def available(args):
# Get the media type, either by looking up media id, or by specific type
try:
media_id = int(args.id_or_type)
media_entry, manager = get_entry_and_processing_manager(media_id)
media_type = media_entry.media_type
except ValueError:
media_type = args.id_or_type
media_entry = None
manager = get_processing_manager_for_type(media_type)
except ProcessingManagerDoesNotExist:
entry = MediaEntry.query.filter_by(id=args.id_or_type).first()
print('No such processing manager for {0}'.format(entry.media_type))
if args.state:
processors = manager.list_all_processors_by_state(args.state)
elif media_entry is None:
processors = manager.list_all_processors()
else:
processors = manager.list_eligible_processors(media_entry)
print("Available processors:")
print("=====================")
print("")
if args.action_help:
for processor in processors:
print(processor.name)
print("-" * len(processor.name))
parser = processor.generate_parser()
parser.print_help()
print("")
else:
for processor in processors:
if processor.description:
print(" - %s: %s" % (processor.name, processor.description))
else:
print(" - %s" % processor.name)
def run(args, media_id=None):
if not media_id:
media_id = args.media_id
try:
media_entry, manager = get_entry_and_processing_manager(media_id)
# TODO: (maybe?) This could probably be handled entirely by the
# processor class...
try:
processor_class = manager.get_processor(
args.reprocess_command, media_entry)
except ProcessorDoesNotExist:
print('No such processor "%s" for media with id "%s"' % (
args.reprocess_command, media_entry.id))
return
except ProcessorNotEligible:
print('Processor "%s" exists but media "%s" is not eligible' % (
args.reprocess_command, media_entry.id))
return
reprocess_parser = processor_class.generate_parser()
reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
reprocess_request = processor_class.args_to_request(reprocess_args)
run_process_media(
media_entry,
reprocess_action=args.reprocess_command,
reprocess_info=reprocess_request)
except ProcessingManagerDoesNotExist:
entry = MediaEntry.query.filter_by(id=media_id).first()
print('No such processing manager for {0}'.format(entry.media_type))
def bulk_run(args):
"""
Bulk reprocessing of a given media_type
"""
query = MediaEntry.query.filter_by(media_type=args.type,
state=args.state)
for entry in query:
run(args, entry.id)
def thumbs(args):
"""
Regenerate thumbs for all processed media
"""
query = MediaEntry.query.filter_by(state='processed')
for entry in query:
try:
media_entry, manager = get_entry_and_processing_manager(entry.id)
# TODO: (maybe?) This could probably be handled entirely by the
# processor class...
try:
processor_class = manager.get_processor(
'resize', media_entry)
except ProcessorDoesNotExist:
print('No such processor "%s" for media with id "%s"' % (
'resize', media_entry.id))
return
except ProcessorNotEligible:
print('Processor "%s" exists but media "%s" is not eligible' % (
'resize', media_entry.id))
return
reprocess_parser = processor_class.generate_parser()
# prepare filetype and size to be passed into reprocess_parser
if args.size:
extra_args = 'thumb --{0} {1} {2}'.format(
processor_class.thumb_size,
args.size[0],
args.size[1])
else:
extra_args = 'thumb'
reprocess_args = reprocess_parser.parse_args(extra_args.split())
reprocess_request = processor_class.args_to_request(reprocess_args)
run_process_media(
media_entry,
reprocess_action='resize',
reprocess_info=reprocess_request)
except ProcessingManagerDoesNotExist:
print('No such processing manager for {0}'.format(entry.media_type))
def initial(args):
"""
Reprocess all failed media
"""
query = MediaEntry.query.filter_by(state='failed')
for entry in query:
try:
media_entry, manager = get_entry_and_processing_manager(entry.id)
run_process_media(
media_entry,
reprocess_action='initial')
except ProcessingManagerDoesNotExist:
print('No such processing manager for {0}'.format(entry.media_type))
def reprocess(args):
# Run eagerly unless explicetly set not to
if not args.celery:
os.environ['CELERY_ALWAYS_EAGER'] = 'true'
commands_util.setup_app(args)
if args.reprocess_subcommand == "run":
run(args)
elif args.reprocess_subcommand == "available":
available(args)
elif args.reprocess_subcommand == "bulk_run":
bulk_run(args)
elif args.reprocess_subcommand == "thumbs":
thumbs(args)
elif args.reprocess_subcommand == "initial":
initial(args)
| 31.777778 | 80 | 0.626594 |
044f55ab24dcd858c3d458abb27b1f3b783b017e | 378 | py | Python | venv/Lib/site-packages/mcipc/rcon/je/commands/list.py | Svesnav2/Discord-Bot-Minecraft-server-status | ee34948e741930567a3adb557197523f9d32ace1 | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/mcipc/rcon/je/commands/list.py | Svesnav2/Discord-Bot-Minecraft-server-status | ee34948e741930567a3adb557197523f9d32ace1 | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/mcipc/rcon/je/commands/list.py | Svesnav2/Discord-Bot-Minecraft-server-status | ee34948e741930567a3adb557197523f9d32ace1 | [
"Unlicense"
] | null | null | null | """Implementation of the list command."""
from mcipc.rcon.client import Client
from mcipc.rcon.functions import parsed
from mcipc.rcon.response_types.players import parse
__all__ = ['list']
# pylint: disable=W0622
@parsed(parse)
def list(client: Client, uuids: bool = False) -> str:
"""Returns the players."""
return client.run('list', 'uuids' if uuids else None)
| 22.235294 | 57 | 0.719577 |
ea9a47fcbc427821fe2e0407ee058c585b37e0e9 | 3,077 | py | Python | tps/judge/tasktype.py | akmohtashami/tps-web | 9dab3ffe97c21f658be30ce2f2711dd93e4ba60f | [
"MIT"
] | 5 | 2019-02-26T06:10:43.000Z | 2021-07-24T17:11:45.000Z | tps/judge/tasktype.py | akmohtashami/tps-web | 9dab3ffe97c21f658be30ce2f2711dd93e4ba60f | [
"MIT"
] | 3 | 2019-08-15T13:56:03.000Z | 2021-06-10T18:43:16.000Z | tps/judge/tasktype.py | jonathanirvings/tps-web | 46519347d4fc8bdced9b5bceb6cdee5ea4e508f2 | [
"MIT"
] | 2 | 2018-12-28T13:12:59.000Z | 2020-12-25T18:42:13.000Z | class TaskType(object):
def __init__(self, judge):
self.judge = judge
def initialize_problem(
self,
problem_code,
code_name,
task_type_parameters,
helpers,
time_limit,
memory_limit,
):
"""
Initializes a new problem in the judge or updates an exisiting one.
problem_code (str): A string to reference this problem in the judge. This should be a unique string.
In case a problem with the same name have already been initialized,
ths judge will have to update (or simply delete and recreate) that problem.
code_name (str): The name for submitted solution files.
task_type_parameters(str): A json encoded dictionary containing task type parameters
helpers ([(str, FileModel)]): a list of files that are required when judging submissions
provided by the judges. Each element is a tuple of the form (name, file)
time_limit (float): Measured in seconds
memory_limit (int): Measured in Megabytes
:return (bool, str|None): returns a tuple, the first element is True if the problem was
created/updated successfully, and False otherwise. The second argument provides details
of the process, e.g. it migt contain the errors why the problem couldn't be created.
"""
raise NotImplementedError
def add_testcase(self, problem_code, testcase_code, input_file):
"""
Adds a testcase to a problem
problem_code (str): code used to reference the problem.
The problem should be previously initialized by calling initialize_problem
testcase_code (str): Name of the testcase. This value should be unique
among testcases of this problem.
input_file (FileModel)
:return (bool, str|None): returns a tuple, the first element is True if the
testcase was added successfully, and False otherwise.
The second argument provides details of the process,
e.g. it migt contain the errors why the testcase couldn't be created.
"""
raise NotImplementedError
def generate_output(self, problem_code, testcase_code, language, solution_file):
"""
Runs a solution on the given test-case and returns the output.
problem_code (str): code used to reference the problem.
The problem should be previously initialized by calling initialize_problem
testcase_code (str): Name of the testcase. The testcase should be previously added
by calling add_testcase.
language (str): the programming language of this solution
solution_file ((str, FileModel)): A tuple representing a single solution.
Each element is a tuple (name, file).
:return EvaluationResult: The output and the details of execution of solution
"""
raise NotImplementedError
def get_parameters_form(self):
"""
:return:
"""
# TODO: write the doc string
raise NotImplementedError
| 43.957143 | 108 | 0.669158 |
4bcb2d636181dfa8c224365c75c7782a8feb40d3 | 3,656 | py | Python | xmnlp/lexical/lexical_model.py | SeanLee97/xmnlp | f52fc62fee4f01fe95786f063e1e52e90f620c8a | [
"Apache-2.0"
] | 860 | 2018-02-14T00:56:52.000Z | 2022-03-31T13:34:21.000Z | xmnlp/lexical/lexical_model.py | 123bbqw/xmnlp | 0ffad8616c248845b18f819c0ac0465a4ec45f5d | [
"Apache-2.0"
] | 29 | 2018-02-14T01:59:26.000Z | 2022-03-30T11:46:00.000Z | xmnlp/lexical/lexical_model.py | 123bbqw/xmnlp | 0ffad8616c248845b18f819c0ac0465a4ec45f5d | [
"Apache-2.0"
] | 178 | 2018-02-28T13:35:12.000Z | 2022-03-12T01:36:33.000Z | # -*- coding: utf-8 -*-
# -------------------------------------------#
# author: sean lee #
# email: xmlee97@gmail.com #
# -------------------------------------------#
""" XMNLP - Lexical
Model Tree:
lexical
├── label2id.json
├── lexical.onnx
├── trans.npy
└── vocab.txt
"""
import os
import json
from typing import List, Tuple
import numpy as np
from tokenizers import BertWordPieceTokenizer
from xmnlp.base_model import BaseModel
from xmnlp.utils import rematch
MAX_LEN = 512
class LexicalModel(BaseModel):
def predict(self, token_ids: np.ndarray, segment_ids: np.ndarray) -> np.ndarray:
token_ids = token_ids.astype('float32')
segment_ids = segment_ids.astype('float32')
return self.sess.run(['crf/sub_1:0'], {'Input-Token:0': token_ids,
'Input-Segment:0': segment_ids})
class LexicalDecoder:
def __init__(self, model_dir, starts=None, ends=None):
self.trans = np.load(os.path.join(model_dir, 'trans.npy'))
self.tokenizer = BertWordPieceTokenizer(os.path.join(model_dir, 'vocab.txt'))
self.lexical_model = LexicalModel(os.path.join(model_dir, 'lexical.onnx'))
with open(os.path.join(model_dir, 'label2id.json'), encoding='utf-8') as reader:
label2id = json.load(reader)
self.id2label = {int(v): k for k, v in label2id.items()}
self.num_labels = len(self.trans)
self.non_starts = []
self.non_ends = []
if starts is not None:
for i in range(self.num_labels):
if i not in starts:
self.non_starts.append(i)
if ends is not None:
for i in range(self.num_labels):
if i not in ends:
self.non_ends.append(i)
def decode(self, nodes):
"""An elegant viterbi decode implementation
Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/snippets.py#L404
"""
# 预处理
nodes[0, self.non_starts] -= np.inf
nodes[-1, self.non_ends] -= np.inf
# 动态规划
labels = np.arange(self.num_labels).reshape((1, -1))
scores = nodes[0].reshape((-1, 1))
paths = labels
for i in range(1, len(nodes)):
M = scores + self.trans + nodes[i].reshape((1, -1))
idxs = M.argmax(0)
scores = M.max(0).reshape((-1, 1))
paths = np.concatenate([paths[:, idxs], labels], 0)
# 最优路径
return paths[:, scores[:, 0].argmax()]
def predict(self, text: str) -> List[Tuple[str, str]]:
tokenized = self.tokenizer.encode(text)
if len(tokenized.tokens) > MAX_LEN:
raise ValueError('The text is too long (>512) to process')
token_ids = tokenized.ids
segment_ids = tokenized.type_ids
mapping = rematch(tokenized.offsets)
token_ids, segment_ids = np.array([token_ids]), np.array([segment_ids])
nodes = self.lexical_model.predict(token_ids, segment_ids)[0][0]
labels = self.decode(nodes)
entities, starting = [], False
for i, label in enumerate(labels):
if label > 0:
if label % 2 == 1:
starting = True
entities.append([[i], self.id2label[(label - 1) // 2]])
elif starting:
entities[-1][0].append(i)
else:
starting = False
else:
starting = False
return [(text[mapping[w[0]][0]:mapping[w[-1]][-1] + 1], l)
for w, l in entities]
| 33.851852 | 98 | 0.549234 |
f8196471e12efa04ef015ee669ec65599f53fbdf | 1,323 | py | Python | insights/parsers/tests/test_sysconfig_irqbalance.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 121 | 2017-05-30T20:23:25.000Z | 2022-03-23T12:52:15.000Z | insights/parsers/tests/test_sysconfig_irqbalance.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 1,977 | 2017-05-26T14:36:03.000Z | 2022-03-31T10:38:53.000Z | insights/parsers/tests/test_sysconfig_irqbalance.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 244 | 2017-05-30T20:22:57.000Z | 2022-03-26T10:09:39.000Z | from insights.parsers.sysconfig import IrqbalanceSysconfig
from insights.tests import context_wrap
IRQBALANCE_SYSCONF_TEST = """
# irqbalance is a daemon process that distributes interrupts across
# CPUS on SMP systems. The default is to rebalance once every 10
# seconds. This is the environment file that is specified to systemd via the
# EnvironmentFile key in the service unit file (or via whatever method the init
# system you're using has.
#
# ONESHOT=yes
# after starting, wait for a minute, then look at the interrupt
# load and balance it once; after balancing exit and do not change
# it again.
#IRQBALANCE_ONESHOT=yes
#
# IRQBALANCE_BANNED_CPUS
# 64 bit bitmask which allows you to indicate which cpu's should
# be skipped when reblancing irqs. Cpu numbers which have their
# corresponding bits set to one in this mask will not have any
# irq's assigned to them on rebalance
#
IRQBALANCE_BANNED_CPUS=f8
#
# IRQBALANCE_ARGS
# append any args here to the irqbalance daemon as documented in the man page
#
IRQBALANCE_ARGS="-d"
""".strip()
def test_irqbalance_conf():
ret = IrqbalanceSysconfig(context_wrap(IRQBALANCE_SYSCONF_TEST))
assert ret['IRQBALANCE_BANNED_CPUS'] == 'f8'
assert 'IRQBALANCE_ARGS' in ret
assert ret.get('IRQBALANCE_ARGS') == '-d'
assert 'IRQBALANCE_ONESHOT' not in ret
| 33.075 | 79 | 0.773998 |
4708fa32c0445506a48fa457eb17c0187c717e5e | 207 | py | Python | customquad/__init__.py | augustjohansson/customquad | 26f4a42e463d5368b79b4c340dbfb85e64f7d6d4 | [
"Xnet",
"X11"
] | null | null | null | customquad/__init__.py | augustjohansson/customquad | 26f4a42e463d5368b79b4c340dbfb85e64f7d6d4 | [
"Xnet",
"X11"
] | null | null | null | customquad/__init__.py | augustjohansson/customquad | 26f4a42e463d5368b79b4c340dbfb85e64f7d6d4 | [
"Xnet",
"X11"
] | 1 | 2021-06-25T16:59:48.000Z | 2021-06-25T16:59:48.000Z | from .assemble_matrix import assemble_matrix, custom_assemble_matrix
from .assemble_vector import assemble_vector, custom_assemble_vector
from .assemble_scalar import assemble_scalar, custom_assemble_scalar
| 51.75 | 68 | 0.898551 |
fd79c94b557f5d0e92360bb5453288ba5e39b4e7 | 1,272 | py | Python | serving/score-model.py | chzbrgr71/got-image-classification | f2edd1ea3b82089ceaae068b926324ef29fb39d0 | [
"MIT"
] | 10 | 2019-05-15T20:42:41.000Z | 2022-02-28T03:27:58.000Z | serving/score-model.py | chzbrgr71/got-image-classification | f2edd1ea3b82089ceaae068b926324ef29fb39d0 | [
"MIT"
] | 1 | 2022-01-22T04:51:56.000Z | 2022-01-22T04:51:56.000Z | serving/score-model.py | chzbrgr71/got-image-classification | f2edd1ea3b82089ceaae068b926324ef29fb39d0 | [
"MIT"
] | 7 | 2019-06-05T16:15:57.000Z | 2020-05-24T23:02:07.000Z | import os, sys
import tensorflow as tf
# pass in model path as arg (eg - /tf-output/latest_model)
# python score-model.py '../tf-output/latest_model'
model_path = sys.argv[1]
label_lines = [line.rstrip() for line in tf.gfile.GFile(model_path + "/got_retrained_labels.txt")]
with tf.gfile.FastGFile(model_path + "/got_retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
total_score = 0
with tf.Session() as sess:
images = ['jon-snow.jpg','night-king.jpg','cersei.jpg','robert-baratheon.jpg','theon-greyjoy.jpg','daenerys-targaryen.jpg','drogon.jpg','hodor.jpg','samwell.jpg','tyrion.jpg']
for image in images:
image_data = tf.gfile.FastGFile(image, 'rb').read()
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
score = predictions[0][top_k[0]]
character = label_lines[top_k[0]]
print(character + ': ' + str(score))
total_score = total_score + score
avg_score = total_score / 10
print('---')
print('average model accuracy: ' + str(avg_score)) | 39.75 | 179 | 0.680818 |
a5c61ecc75fdb98052ba2693c7ee4c12ad72827f | 3,769 | py | Python | src/securityAbandonerAndInjector/forcedToReceiveEthers/main.py | xf97/HuangGai | 40a349be6102d5eb63893fb914659405ae162d93 | [
"MIT"
] | 23 | 2020-09-20T02:10:44.000Z | 2022-03-22T12:58:13.000Z | src/securityAbandonerAndInjector/forcedToReceiveEthers/main.py | contractshark/HuangGai | 1b26f77b043aa5903774420964c61ab370eb6c7a | [
"MIT"
] | 3 | 2020-09-22T15:28:33.000Z | 2022-01-22T07:48:53.000Z | src/securityAbandonerAndInjector/forcedToReceiveEthers/main.py | contractshark/HuangGai | 1b26f77b043aa5903774420964c61ab370eb6c7a | [
"MIT"
] | 5 | 2021-07-15T02:45:09.000Z | 2022-03-21T13:36:40.000Z | #!/usr/bin/python
#-*- coding: utf-8 -*-
#cache路径
CACHE_PATH = "./cache/"
#缓存合约路径
CACHE_CONTRACT_PATH = "./cache/temp.sol"
#缓存路径信息文件
CACHE_PATHINFO_PATH = "./cache/temp_sol.json"
#缓存抽象语法树文件
CACHE_AST_PATH = "./cache/temp.sol_json.ast"
#源代码保存路径
CONTRACT_PATH = "../../contractExtractor/forcedToReceiveEthersExtractor/result"
#注入信息保存路径
INJECT_INFO_PATH = "../../contractExtractor/forcedToReceiveEthersExtractor/injectInfo"
#sol文件后缀
SOL_SUFFIX = ".sol"
#json.ast文件后缀
JSON_AST_SUFFIX = "_json.ast"
from forcedToReceiveEthersInjector import forcedToReceiveEthersInjector #注入器
import os
import time
class forcedToReceiveEthers:
def __init__(self, _injectInfo, _contractPath):
self.injectInfo = _injectInfo #所有文件的路径信息情况
self.targetInfoFile = self.targetPathInfo(self.injectInfo)
self.targetContract = self.targetContractList(self.targetInfoFile, _contractPath) #合约列表
self.targetAstFile = self.targetAstList(self.targetInfoFile, _contractPath) #ast列表
self.nowNum = 0
try:
os.mkdir(CACHE_PATH) #建立缓存文件夹
except:
#print("The cache folder already exists.")
pass
def targetAstList(self, _fileList, _contractPath):
result = list()
for filename in _fileList:
jsonAstName = os.path.splitext(os.path.split(filename)[1])[0] + SOL_SUFFIX + JSON_AST_SUFFIX
result.append(os.path.join(_contractPath, jsonAstName))
return result
def targetContractList(self, _fileList, _contractPath):
result = list()
for filename in _fileList:
contractName = os.path.splitext(os.path.split(filename)[1])[0] + SOL_SUFFIX
result.append(os.path.join(_contractPath, contractName))
return result
def targetPathInfo(self, _pathInfo):
fileList = os.listdir(_pathInfo)
result = list()
for item in fileList:
result.append(os.path.join(_pathInfo, item))
return result
def getInfoFile(self, _contractName, _infoFileList):
preName = os.path.splitext(os.path.split(_contractName)[1])[0]
for file in _infoFileList:
if preName in file:
return file
else:
continue
return str()
def getAstFile(self, _contractName, _astFileList):
preName = os.path.splitext(os.path.split(_contractName)[1])[0]
for file in _astFileList:
if preName in file:
return file
else:
continue
return str()
def cacheFile(self, _contract, _pathInfo, _astPath):
try:
with open(CACHE_CONTRACT_PATH, "w+", encoding = "utf-8") as f:
f.write(open(_contract).read())
with open(CACHE_PATHINFO_PATH, "w+", encoding = "utf-8") as f:
f.write(open(_pathInfo).read())
with open(CACHE_AST_PATH, "w+", encoding = "utf-8") as f:
f.write(open(_astPath).read())
return
except:
raise Exception("Failed to cache contract.")
def run(self):
stime = time.time()
contractNum = 0
for contractFile in self.targetContract:
contractNum += 1
try:
#1. 获取每个合约的源代码, ast和注入信息
pathInfoFile = self.getInfoFile(contractFile, self.targetInfoFile)
astFile = self.getAstFile(contractFile, self.targetAstFile)
print("\r\t Injecting contract: ", os.path.split(contractFile)[1], end = "")
#2. 缓存当前文件
self.cacheFile(contractFile, pathInfoFile, astFile)
#3. 根据目标路径和源代码注入bug
FI = forcedToReceiveEthersInjector(CACHE_CONTRACT_PATH, CACHE_PATHINFO_PATH, astFile, self.getOriginalContractName(contractFile))
FI.inject()
FI.output()
#4. 输出进度
self.nowNum += 1
#print("\r当前注入进度: %.2f" % (self.nowNum / len(self.targetContract)))
except Exception as e:
self.nowNum += 1
#print(e)
continue
print()
#print(time.time() - stime)
#print(contractNum)
def getOriginalContractName(self, _contractPath):
return os.path.splitext(os.path.split(_contractPath)[1])[0]
#单元测试
if __name__ == "__main__":
ftee = forcedToReceiveEthers(INJECT_INFO_PATH, CONTRACT_PATH)
ftee.run()
| 30.395161 | 133 | 0.727779 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.